diff --git a/charts/karpenter-crd/Chart.yaml b/charts/karpenter-crd/Chart.yaml index b0cd11fd318f..7fa7f0a32d46 100644 --- a/charts/karpenter-crd/Chart.yaml +++ b/charts/karpenter-crd/Chart.yaml @@ -2,8 +2,8 @@ apiVersion: v2 name: karpenter-crd description: A Helm chart for Karpenter Custom Resource Definitions (CRDs). type: application -version: 0.37.0 -appVersion: 0.37.0 +version: 1.0.0 +appVersion: 1.0.0 keywords: - cluster - node diff --git a/charts/karpenter/Chart.yaml b/charts/karpenter/Chart.yaml index 4ee804fca789..bab15fc3ffde 100644 --- a/charts/karpenter/Chart.yaml +++ b/charts/karpenter/Chart.yaml @@ -2,8 +2,8 @@ apiVersion: v2 name: karpenter description: A Helm chart for Karpenter, an open-source node provisioning project built for Kubernetes. type: application -version: 0.37.0 -appVersion: 0.37.0 +version: 1.0.0 +appVersion: 1.0.0 keywords: - cluster - node diff --git a/charts/karpenter/README.md b/charts/karpenter/README.md index 68f02bd816e7..694a517d720f 100644 --- a/charts/karpenter/README.md +++ b/charts/karpenter/README.md @@ -2,7 +2,7 @@ A Helm chart for Karpenter, an open-source node provisioning project built for Kubernetes. -![Version: 0.37.0](https://img.shields.io/badge/Version-0.37.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 0.37.0](https://img.shields.io/badge/AppVersion-0.37.0-informational?style=flat-square) +![Version: 1.0.0](https://img.shields.io/badge/Version-1.0.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.0.0](https://img.shields.io/badge/AppVersion-1.0.0-informational?style=flat-square) ## Documentation @@ -15,7 +15,7 @@ You can follow the detailed installation instruction in the [documentation](http ```bash helm upgrade --install --namespace karpenter --create-namespace \ karpenter oci://public.ecr.aws/karpenter/karpenter \ - --version 0.37.0 \ + --version 1.0.0 \ --set "serviceAccount.annotations.eks\.amazonaws\.com/role-arn=${KARPENTER_IAM_ROLE_ARN}" \ --set settings.clusterName=${CLUSTER_NAME} \ --set settings.interruptionQueue=${CLUSTER_NAME} \ @@ -27,13 +27,13 @@ helm upgrade --install --namespace karpenter --create-namespace \ As the OCI Helm chart is signed by [Cosign](https://github.com/sigstore/cosign) as part of the release process you can verify the chart before installing it by running the following command. ```shell -cosign verify public.ecr.aws/karpenter/karpenter:0.37.0 \ +cosign verify public.ecr.aws/karpenter/karpenter:1.0.0 \ --certificate-oidc-issuer=https://token.actions.githubusercontent.com \ --certificate-identity-regexp='https://github\.com/aws/karpenter-provider-aws/\.github/workflows/release\.yaml@.+' \ --certificate-github-workflow-repository=aws/karpenter-provider-aws \ --certificate-github-workflow-name=Release \ - --certificate-github-workflow-ref=refs/tags/v0.37.0 \ - --annotations version=0.37.0 + --certificate-github-workflow-ref=refs/tags/v1.0.0 \ + --annotations version=1.0.0 ``` ## Values @@ -48,9 +48,9 @@ cosign verify public.ecr.aws/karpenter/karpenter:0.37.0 \ | controller.envFrom | list | `[]` | | | controller.extraVolumeMounts | list | `[]` | Additional volumeMounts for the controller pod. | | controller.healthProbe.port | int | `8081` | The container port to use for http health probe. | -| controller.image.digest | string | `"sha256:157f478f5db1fe999f5e2d27badcc742bf51cc470508b3cebe78224d0947674f"` | SHA256 digest of the controller image. | +| controller.image.digest | string | `"sha256:1eb1073b9f4ed804634aabf320e4d6e822bb61c0f5ecfd9c3a88f05f1ca4c5c5"` | SHA256 digest of the controller image. | | controller.image.repository | string | `"public.ecr.aws/karpenter/controller"` | Repository path to the controller image. | -| controller.image.tag | string | `"0.37.0"` | Tag of the controller image. | +| controller.image.tag | string | `"1.0.0"` | Tag of the controller image. | | controller.metrics.port | int | `8080` | The container port to use for metrics. | | controller.resources | object | `{}` | Resources for the controller pod. | | controller.sidecarContainer | list | `[]` | Additional sidecarContainer config | @@ -72,7 +72,9 @@ cosign verify public.ecr.aws/karpenter/karpenter:0.37.0 \ | podDisruptionBudget.name | string | `"karpenter"` | | | podLabels | object | `{}` | Additional labels for the pod. | | podSecurityContext | object | `{"fsGroup":65532}` | SecurityContext for the pod. | -| postInstallHook.image | string | `public.ecr.aws/bitnami/kubectl:1.30` | The image to run the post-install hook. This minimally needs to have `kubectl` installed | +| postInstallHook.image.digest | string | `"sha256:13a2ad1bd37ce42ee2a6f1ab0d30595f42eb7fe4a90d6ec848550524104a1ed6"` | SHA256 digest of the post-install hook image. | +| postInstallHook.image.repository | string | `"public.ecr.aws/bitnami/kubectl"` | Repository path to the post-install hook. This minimally needs to have `kubectl` installed | +| postInstallHook.image.tag | string | `"1.30"` | Tag of the post-install hook image. | | priorityClassName | string | `"system-cluster-critical"` | PriorityClass name for the pod. | | replicas | int | `2` | Number of replicas. | | revisionHistoryLimit | int | `10` | The number of old ReplicaSets to retain to allow rollback. | diff --git a/charts/karpenter/values.yaml b/charts/karpenter/values.yaml index 62f65a77d934..67dce4b0b265 100644 --- a/charts/karpenter/values.yaml +++ b/charts/karpenter/values.yaml @@ -101,9 +101,9 @@ controller: # -- Repository path to the controller image. repository: public.ecr.aws/karpenter/controller # -- Tag of the controller image. - tag: 0.37.0 + tag: 1.0.0 # -- SHA256 digest of the controller image. - digest: sha256:157f478f5db1fe999f5e2d27badcc742bf51cc470508b3cebe78224d0947674f + digest: sha256:1eb1073b9f4ed804634aabf320e4d6e822bb61c0f5ecfd9c3a88f05f1ca4c5c5 # -- Additional environment variables for the controller pod. env: [] # - name: AWS_REGION @@ -137,7 +137,7 @@ controller: healthProbe: # -- The container port to use for http health probe. port: 8081 -postInstallHook: +postInstallHook: image: # -- Repository path to the post-install hook. This minimally needs to have `kubectl` installed repository: public.ecr.aws/bitnami/kubectl diff --git a/go.mod b/go.mod index 050b91b64788..41b37882c58e 100644 --- a/go.mod +++ b/go.mod @@ -31,8 +31,8 @@ require ( k8s.io/klog/v2 v2.130.1 k8s.io/utils v0.0.0-20240102154912-e7106e64919e knative.dev/pkg v0.0.0-20231010144348-ca8c009405dd - sigs.k8s.io/controller-runtime v0.18.4 - sigs.k8s.io/karpenter v0.37.1-0.20240812180459-92547d1f9c20 + sigs.k8s.io/controller-runtime v0.18.5 + sigs.k8s.io/karpenter v1.0.1-0.20240815170320-bb7468a3a758 sigs.k8s.io/yaml v1.4.0 ) diff --git a/go.sum b/go.sum index 210678fda946..5395ecd1a912 100644 --- a/go.sum +++ b/go.sum @@ -757,12 +757,12 @@ knative.dev/pkg v0.0.0-20231010144348-ca8c009405dd/go.mod h1:36cYnaOVHkzmhgybmYX rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/controller-runtime v0.18.4 h1:87+guW1zhvuPLh1PHybKdYFLU0YJp4FhJRmiHvm5BZw= -sigs.k8s.io/controller-runtime v0.18.4/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg= +sigs.k8s.io/controller-runtime v0.18.5 h1:nTHio/W+Q4aBlQMgbnC5hZb4IjIidyrizMai9P6n4Rk= +sigs.k8s.io/controller-runtime v0.18.5/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/karpenter v0.37.1-0.20240812180459-92547d1f9c20 h1:HA+J1LbKlvFWUT7w7EDvVmhBeAoqqPeRbqGp8NUnLnw= -sigs.k8s.io/karpenter v0.37.1-0.20240812180459-92547d1f9c20/go.mod h1:3NLmsnHHw8p4VutpjTOPUZyhE3qH6yGTs8O94Lsu8uw= +sigs.k8s.io/karpenter v1.0.1-0.20240815170320-bb7468a3a758 h1:VEibnW+C/lW8QVgGlsZadhhTPXwhkR2CQj828zHu8Ao= +sigs.k8s.io/karpenter v1.0.1-0.20240815170320-bb7468a3a758/go.mod h1:SGH7B5ZSeaCXBnwvj4cSmIPC6TqRq7kPZmQyJRdxC6k= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= diff --git a/hack/docs/compatibilitymatrix_gen/compatibility.yaml b/hack/docs/compatibilitymatrix_gen/compatibility.yaml index 19440233d155..19b0701ff966 100644 --- a/hack/docs/compatibilitymatrix_gen/compatibility.yaml +++ b/hack/docs/compatibilitymatrix_gen/compatibility.yaml @@ -50,4 +50,7 @@ compatibility: maxK8sVersion: 1.29 - appVersion: 0.37.0 minK8sVersion: 1.23 + maxK8sVersion: 1.30 + - appVersion: 1.0.0 + minK8sVersion: 1.25 maxK8sVersion: 1.30 \ No newline at end of file diff --git a/pkg/cloudprovider/cloudprovider.go b/pkg/cloudprovider/cloudprovider.go index 81ee525ea522..acde46ed2276 100644 --- a/pkg/cloudprovider/cloudprovider.go +++ b/pkg/cloudprovider/cloudprovider.go @@ -209,6 +209,10 @@ func (c *CloudProvider) Delete(ctx context.Context, nodeClaim *karpv1.NodeClaim) return c.instanceProvider.Delete(ctx, id) } +func (c *CloudProvider) DisruptionReasons() []karpv1.DisruptionReason { + return nil +} + func (c *CloudProvider) IsDrifted(ctx context.Context, nodeClaim *karpv1.NodeClaim) (cloudprovider.DriftReason, error) { // Not needed when GetInstanceTypes removes nodepool dependency nodePoolName, ok := nodeClaim.Labels[karpv1.NodePoolLabelKey] diff --git a/pkg/fake/cloudprovider.go b/pkg/fake/cloudprovider.go index 98b05ad876c3..75d9ba516725 100644 --- a/pkg/fake/cloudprovider.go +++ b/pkg/fake/cloudprovider.go @@ -77,6 +77,10 @@ func (c *CloudProvider) Delete(context.Context, *karpv1.NodeClaim) error { return nil } +func (c *CloudProvider) DisruptionReasons() []karpv1.DisruptionReason { + return nil +} + // Name returns the CloudProvider implementation name. func (c *CloudProvider) Name() string { return "fake" diff --git a/test/hack/resource/clean/main.go b/test/hack/resource/clean/main.go index dd8b1aa9ca46..2cb51b27dcf5 100644 --- a/test/hack/resource/clean/main.go +++ b/test/hack/resource/clean/main.go @@ -90,19 +90,20 @@ func main() { if err != nil { resourceLogger.Errorf("%v", err) } + cleaned := []string{} resourceLogger.With("ids", ids, "count", len(ids)).Infof("discovered resourceTypes") if len(ids) > 0 { - cleaned, err := resourceTypes[i].Cleanup(ctx, ids) + cleaned, err = resourceTypes[i].Cleanup(ctx, ids) if err != nil { resourceLogger.Errorf("%v", err) } - // Should only fire metrics if the resource have expired - if lo.FromPtr(clusterName) == "" { - if err = metricsClient.FireMetric(ctx, sweeperCleanedResourcesTableName, fmt.Sprintf("%sDeleted", resourceTypes[i].String()), float64(len(cleaned)), lo.Ternary(resourceTypes[i].Global(), "global", cfg.Region)); err != nil { - resourceLogger.Errorf("%v", err) - } - } resourceLogger.With("ids", cleaned, "count", len(cleaned)).Infof("deleted resourceTypes") } + // Should only fire metrics if the resource have expired + if lo.FromPtr(clusterName) == "" { + if err = metricsClient.FireMetric(ctx, sweeperCleanedResourcesTableName, fmt.Sprintf("%sDeleted", resourceTypes[i].String()), float64(len(cleaned)), lo.Ternary(resourceTypes[i].Global(), "global", cfg.Region)); err != nil { + resourceLogger.Errorf("%v", err) + } + } } } diff --git a/website/content/en/docs/concepts/disruption.md b/website/content/en/docs/concepts/disruption.md index 001de3a242ff..cccb8d297a8c 100644 --- a/website/content/en/docs/concepts/disruption.md +++ b/website/content/en/docs/concepts/disruption.md @@ -1,7 +1,7 @@ --- title: "Disruption" linkTitle: "Disruption" -weight: 4 +weight: 50 description: > Understand different ways Karpenter disrupts nodes --- @@ -13,7 +13,7 @@ The finalizer blocks deletion of the node object while the Termination Controlle ### Disruption Controller -Karpenter automatically discovers disruptable nodes and spins up replacements when needed. Karpenter disrupts nodes by executing one [automated method](#automated-methods) at a time, in order of Expiration, Drift, and then Consolidation. Each method varies slightly, but they all follow the standard disruption process. Karpenter uses [disruption budgets]({{}}) to control the speed of disruption. +Karpenter automatically discovers disruptable nodes and spins up replacements when needed. Karpenter disrupts nodes by executing one [automated method](#automated-methods) at a time, first doing Drift then Consolidation. Each method varies slightly, but they all follow the standard disruption process. Karpenter uses [disruption budgets]({{}}) to control the speed at which these disruptions begin. 1. Identify a list of prioritized candidates for the disruption method. * If there are [pods that cannot be evicted](#pod-eviction) on the node, Karpenter will ignore the node and try disrupting it later. * If there are no disruptable nodes, continue to the next disruption method. @@ -61,11 +61,10 @@ By adding the finalizer, Karpenter improves the default Kubernetes process of no When you run `kubectl delete node` on a node without a finalizer, the node is deleted without triggering the finalization logic. The instance will continue running in EC2, even though there is no longer a node object for it. The kubelet isn’t watching for its own existence, so if a node is deleted, the kubelet doesn’t terminate itself. All the pod objects get deleted by a garbage collection process later, because the pods’ node is gone. {{% /alert %}} -## Automated Methods +## Automated Graceful Methods -Automated methods can be rate limited through [NodePool Disruption Budgets]({{}}) +Automated graceful methods, can be rate limited through [NodePool Disruption Budgets]({{}}) -* **Expiration**: Karpenter will mark nodes as expired and disrupt them after they have lived a set number of seconds, based on the NodePool's `spec.disruption.expireAfter` value. You can use node expiry to periodically recycle nodes due to security concerns. * [**Consolidation**]({{}}): Karpenter works to actively reduce cluster cost by identifying when: * Nodes can be removed because the node is empty * Nodes can be removed as their workloads will run on other nodes in the cluster. @@ -74,22 +73,22 @@ Automated methods can be rate limited through [NodePool Disruption Budgets]({{}}): Karpenter will watch for upcoming interruption events that could affect your nodes (health events, spot interruption, etc.) and will taint, drain, and terminate the node(s) ahead of the event to reduce workload disruption. {{% alert title="Defaults" color="secondary" %}} -Disruption is configured through the NodePool's disruption block by the `consolidationPolicy`, `expireAfter` and `consolidateAfter` fields. Karpenter will configure these fields with the following values by default if they are not set: +Disruption is configured through the NodePool's disruption block by the `consolidationPolicy`, and `consolidateAfter` fields. `expireAfter` can also be used to control disruption. Karpenter will configure these fields with the following values by default if they are not set: ```yaml spec: disruption: - consolidationPolicy: WhenUnderutilized - expireAfter: 720h + consolidationPolicy: WhenEmptyOrUnderutilized + template: + spec: + expireAfter: 720h ``` {{% /alert %}} -{{% alert title="Warning" color="warning" %}} -`consolidateAfter` **cannot** be set if `consolidationPolicy` is set to `WhenUnderutilized`. See [kubernetes-sigs/karpenter#735](https://github.com/kubernetes-sigs/karpenter/issues/735) for more information. -{{% /alert %}} - ### Consolidation +Consolidation is configured by `consolidationPolicy` and `consolidateAfter`. `consolidationPolicy` determines the pre-conditions for nodes to be considered consolidatable, and are `whenEmpty` or `whenEmptyOrUnderutilized`. If a node has no running non-daemon pods, it is considered empty. `consolidateAfter` can be set to indicate how long Karpenter should wait after a pod schedules or is removed from the node before considering the node consolidatable. With `whenEmptyOrUnderutilized`, Karpenter will consider a node consolidatable when its `consolidateAfter` has been reached, empty or not. + Karpenter has two mechanisms for cluster consolidation: 1. **Deletion** - A node is eligible for deletion if all of its pods can run on free capacity of other nodes in the cluster. 2. **Replace** - A node can be replaced if all of its pods can run on a combination of free capacity of other nodes in the cluster and a single lower price replacement node. @@ -169,6 +168,13 @@ Karpenter will add the `Drifted` status condition on NodeClaims if the NodeClaim 1. The `Drift` feature gate is not enabled but the NodeClaim is drifted, Karpenter will remove the status condition. 2. The NodeClaim isn't drifted, but has the status condition, Karpenter will remove it. +## Automated Forceful Methods + +Automated forceful methods will begin draining nodes as soon as the condition is met. Note that these methods blow past NodePool Disruption Budgets, and do not wait for a pre-spin replacement node to be healthy for the pods to reschedule, unlike the graceful methods mentioned above. Use Pod Disruption Budgets and `do-not-disrupt` on your nodes to rate-limit the speed at which your applications are disrupted. + +### Expiration +Karpenter will disrupt nodes as soon as they're expired after they've lived for the duration of the NodePool's `spec.template.spec.expireAfter`. You can use expiration to periodically recycle nodes due to security concern. + ### Interruption If interruption-handling is enabled, Karpenter will watch for upcoming involuntary interruption events that would cause disruption to your workloads. These interruption events include: @@ -194,9 +200,20 @@ To enable interruption handling, configure the `--interruption-queue` CLI argume ## Controls -### Disruption Budgets +### TerminationGracePeriod + +This is the duration of time that a node can be draining before it's forcibly deleted. A node begins draining when it's deleted. Pods will be deleted preemptively based on its TerminationGracePeriodSeconds before this terminationGracePeriod ends to give as much time to cleanup as possible. Note that if your pod's terminationGracePeriodSeconds is larger than this terminationGracePeriod, Karpenter may forcibly delete the pod before it has its full terminationGracePeriod to cleanup. + +This is especially useful in combination with `nodepool.spec.template.spec.expireAfter` to define an absolute maximum on the lifetime of a node, where a node is deleted at `expireAfter` and finishes draining within the `terminationGracePeriod` thereafter. Pods blocking eviction like PDBs and do-not-disrupt will block full draining until the `terminationGracePeriod` is reached. + +For instance, a NodeClaim with `terminationGracePeriod` set to `1h` and an `expireAfter` set to `23h` will begin draining after it's lived for `23h`. Let's say a `do-not-disrupt` pod has `TerminationGracePeriodSeconds` set to `300` seconds. If the node hasn't been fully drained after `55m`, Karpenter will delete the pod to allow it's full `terminationGracePeriodSeconds` to cleanup. If no pods are blocking draining, Karpenter will cleanup the node as soon as the node is fully drained, rather than waiting for the NodeClaim's `terminationGracePeriod` to finish. -You can rate limit Karpenter's disruption through the NodePool's `spec.disruption.budgets`. If undefined, Karpenter will default to one budget with `nodes: 10%`. Budgets will consider nodes that are actively being deleted for any reason, and will only block Karpenter from disrupting nodes voluntarily through expiration, drift, emptiness, and consolidation. +### NodePool Disruption Budgets + +You can rate limit Karpenter's disruption through the NodePool's `spec.disruption.budgets`. If undefined, Karpenter will default to one budget with `nodes: 10%`. Budgets will consider nodes that are actively being deleted for any reason, and will only block Karpenter from disrupting nodes voluntarily through drift, emptiness, and consolidation. Note that NodePool Disruption Budgets do not prevent Karpenter from cleaning up expired or drifted nodes. + +#### Reasons +Karpenter allows specifying if a budget applies to any of `Drifted`, `Underutilized`, or `Empty`. When a budget has no reasons, it's assumed that it applies to all reasons. When calculating allowed disruptions for a given reason, Karpenter will take the minimum of the budgets that have listed the reason or have left reasons undefined. #### Nodes When calculating if a budget will block nodes from disruption, Karpenter lists the total number of nodes owned by a NodePool, subtracting out the nodes owned by that NodePool that are currently being deleted and nodes that are NotReady. If the number of nodes being deleted by Karpenter or any other processes is greater than the number of allowed disruptions, disruption for this node will not proceed. @@ -204,25 +221,32 @@ When calculating if a budget will block nodes from disruption, Karpenter lists t If the budget is configured with a percentage value, such as `20%`, Karpenter will calculate the number of allowed disruptions as `allowed_disruptions = roundup(total * percentage) - total_deleting - total_notready`. If otherwise defined as a non-percentage value, Karpenter will simply subtract the number of nodes from the total `(total - non_percentage_value) - total_deleting - total_notready`. For multiple budgets in a NodePool, Karpenter will take the minimum value (most restrictive) of each of the budgets. For example, the following NodePool with three budgets defines the following requirements: -- The first budget will only allow 20% of nodes owned by that NodePool to be disrupted. For instance, if there were 19 nodes owned by the NodePool, 4 disruptions would be allowed, rounding up from `19 * .2 = 3.8`. +- The first budget will only allow 20% of nodes owned by that NodePool to be disrupted if it's empty or drifted. For instance, if there were 19 nodes owned by the NodePool, 4 empty or drifted nodes could be disrupted, rounding up from `19 * .2 = 3.8`. - The second budget acts as a ceiling to the previous budget, only allowing 5 disruptions when there are more than 25 nodes. -- The last budget only blocks disruptions during the first 10 minutes of the day, where 0 disruptions are allowed. +- The last budget only blocks disruptions during the first 10 minutes of the day, where 0 disruptions are allowed, only applying to underutilized nodes. ```yaml -apiVersion: karpenter.sh/v1beta1 +apiVersion: karpenter.sh/v1 kind: NodePool metadata: name: default spec: + template: + spec: + expireAfter: 720h # 30 * 24h = 720h disruption: - consolidationPolicy: WhenUnderutilized - expireAfter: 720h # 30 * 24h = 720h + consolidationPolicy: WhenEmptyOrUnderutilized budgets: - nodes: "20%" + reasons: + - "Empty" + - "Drifted" - nodes: "5" - nodes: "0" schedule: "@daily" duration: 10m + reasons: + - "Underutilized" ``` #### Schedule @@ -294,7 +318,7 @@ metadata: To disable disruption for all nodes launched by a NodePool, you can configure its `.spec.disruption.budgets`. Setting a budget of zero nodes will prevent any of those nodes from being considered for voluntary disruption. ```yaml -apiVersion: karpenter.sh/v1beta1 +apiVersion: karpenter.sh/v1 kind: NodePool metadata: name: default diff --git a/website/content/en/docs/concepts/nodeclaims.md b/website/content/en/docs/concepts/nodeclaims.md new file mode 100644 index 000000000000..325d98bd5124 --- /dev/null +++ b/website/content/en/docs/concepts/nodeclaims.md @@ -0,0 +1,357 @@ +--- +title: "NodeClaims" +linkTitle: "NodeClaims" +weight: 30 +description: > + Understand NodeClaims +--- + +Karpenter uses NodeClaims to manage the lifecycle of Kubernetes Nodes with the underlying cloud provider. +Karpenter will create and delete NodeClaims in response to the demands of Pods in the cluster. +It does this by evaluating the requirements of pending pods, finding a compatible [NodePool]({{< ref "./nodepools" >}}) and [NodeClass]({{< ref "./nodeclasses" >}}) pair, and creating a NodeClaim which meets both sets of requirements. +Although NodeClaims are immutable resources managed by Karpenter, you can monitor NodeClaims to keep track of the status of your Nodes. + +In addition to tracking the lifecycle of Nodes, NodeClaims serve as requests for capacity. +Karpenter creates NodeClaims in response to provisioning and disruption needs (pre-spin). Whenever Karpenter +creates a NodeClaim, it asks the cloud provider to create the instance (launch), register and link the created node +with the NodeClaim (registration), and wait for the node and its resources to be ready (initialization). + +This page describes how NodeClaims integrate throughout Karpenter and the cloud provider implementation. + +If you want to learn more about the nodes being managed by Karpenter, you can either look directly at the NodeClaim or at the nodes they are associated with: + +* Checking NodeClaims: +If something goes wrong in the process of creating a node, you can look at the NodeClaim +to see where the node creation process might have failed. `kubectl get nodeclaims` will show you the NodeClaims +for the cluster, and its linked node. Using `kubectl describe nodeclaim ` will show the status of a particular NodeClaim. +For example, if the node is NotReady, you might see statuses indicating that the NodeClaim failed to launch, register, or initialize. +There will be logs emitted by the Karpenter controller to indicate this too. + +* Checking nodes: +Use commands such as `kubectl get node` and `kubectl describe node ` to see the actual resources, +labels, and other attributes associated with a particular node. + +## NodeClaim roles in node creation + +NodeClaims provide a critical role in the Karpenter workflow for provisioning capacity, and in node disruptions. + +The following diagram illustrates how NodeClaims interact with other components during Karpenter-driven node creation. + +![nodeclaim-node-creation](/nodeclaims.png) + +{{% alert title="Note" color="primary" %}} +Configure the `KARPENTER_NAMESPACE` environment variable to the namespace where you've installed Karpenter (`kube-system` is the default). Follow along with the Karpenter logs in your cluster and do the following: + +```bash +export KARPENTER_NAMESPACE="kube-system" +kubectl logs -f -n "${KARPENTER_NAMESPACE}" \ + -l app.kubernetes.io/name=karpenter +``` +In a separate terminal, start some pods that would require Karpenter to create nodes to handle those pods. +For example, start up some inflate pods as described in [Scale up deployment]({{< ref "../getting-started/getting-started-with-karpenter/#6-scale-up-deployment" >}}). +{{% /alert %}} + +As illustrated in the previous diagram, Karpenter interacts with NodeClaims and related components when creating a node: + +1. Watches for pods and monitors NodePools and NodeClasses: + * Checks the pod scheduling constraints and resource requests. + * Cross-references the requirements with the existing NodePools and NodeClasses, (e.g. zones, arch, os) + + **Example log:** + ```json + { + "level": "INFO", + "time": "2024-06-22T02:24:16.114Z", + "message": "found provisionable pod(s)", + "commit": "490ef94", + "Pods": "default/inflate-66fb68585c-xvs86, default/inflate-66fb68585c-hpcdz, default/inflate-66fb68585c-8xztf,01234567adb205c7e default/inflate-66fb68585c-t29d8, default/inflate-66fb68585c-nxflz", + "duration": "100.761702ms" + } + ``` + +2. Computes the shape and size of a NodeClaim (or NodeClaims) to create in the cluster to fit the set of pods from step 1. + + **Example log:** + ```json + { + "level": "INFO", + "time": "2024-06-22T02:24:16.114Z", + "message": "computed new nodeclaim(s) to fit pod(s)", + "controller": "provisioner", + "nodeclaims": 1, + "pods": 5 + } + ``` + +3. Creates the NodeClaim object in the cluster. + + **Example log:** + ```json + { + "level": "INFO", + "time": "2024-06-22T02:24:16.128Z", + "message": "created nodeclaim", + "controller": "provisioner", + "NodePool": { + "name":"default" + }, + "NodeClaim": { + "name":"default-sfpsl" + }, + "requests": { + "cpu":"5150m", + "pods":"8" + }, + "instance-types": "c3.2xlarge, c4.2xlarge, c4.4xlarge, c5.2xlarge, c5.4xlarge and 55 other(s)" + } + ``` + +4. Finds the new NodeClaim and translates it into an API call to create a cloud provider instance, logging + the response of the API call. + + If the API response is an unrecoverable error, such as an Insufficient Capacity Error, Karpenter will delete the NodeClaim, mark that instance type as temporarily unavailable, and create another NodeClaim if necessary. + + **Example log:** + ```json + { + "level": "INFO", + "time": "2024-06-22T02:24:19.028Z", + "message": "launched nodeclaim", + "controller": "nodeclaim.lifecycle", + "NodeClaim": { + "name": "default-sfpsl" + }, + "provider-id": "aws:///us-west-2b/i-01234567adb205c7e", + "instance-type": "c3.2xlarge", + "zone": "us-west-2b", + "capacity-type": "spot", + "allocatable": { + "cpu": "7910m", + "ephemeral-storage": "17Gi", + "memory": "13215Mi", + "pods": "58" + } + } + ``` + +5. Karpenter watches for the instance to register itself with the cluster as a node, and updates the node's + labels, annotations, taints, owner refs, and finalizer to match what was defined in the NodePool and NodeClaim. Once this step is + completed, Karpenter will remove the `karpenter.sh/unregistered` taint from the Node. + + If this fails to succeed within 15 minutes, Karpenter will remove the NodeClaim from the cluster and delete + the underlying instance, creating another NodeClaim if necessary. + + **Example log:** + ```json + { + "level": "INFO", + "time": "2024-06-22T02:26:19.028Z", + "message": "registered nodeclaim", + "controller": "nodeclaim.lifecycle", + "NodeClaim": { + "name": "default-sfpsl" + }, + "provider-id": "aws:///us-west-2b/i-01234567adb205c7e", + "Node": { + "name": "ip-xxx-xxx-xx-xxx.us-west-2.compute.internal" + } + } + ``` + +6. Karpenter continues to watch the node, waiting until the node becomes ready, has all its startup taints removed, + and has all requested resources registered on the node. + + **Example log:** + ```json + { + "level": "INFO", + "time": "2024-06-22T02:24:52.642Z", + "message": "initialized nodeclaim", + "controller": "nodeclaim.lifecycle", + "NodeClaim": { + "name": "default-sfpsl" + }, + "provider-id": "aws:///us-west-2b/i-01234567adb205c7e", + "Node": { + "name": "ip-xxx-xxx-xx-xxx.us-west-2.compute.internal" + }, + "allocatable": { + "cpu": "7910m", + "ephemeral-storage": "18242267924", + "hugepages-2Mi": "0", + "memory": "14320468Ki", + "pods": "58" + } + } + ``` + +## NodeClaim example +The following is an example of a NodeClaim. Keep in mind that you cannot modify a NodeClaim. +To see the contents of a NodeClaim, get the name of your NodeClaim, then run `kubectl describe` to see its contents: + +``` +kubectl get nodeclaim +NAME TYPE ZONE NODE READY AGE +default-m6pzn c7i-flex.2xlarge us-west-1a ip-xxx-xxx-xx-xxx.us-west-1.compute.internal True 7m50s + +kubectl describe nodeclaim default-m6pzn +``` +Starting at the bottom of this example, here are some highlights of what the NodeClaim contains: + +* The Node Name (ip-xxx-xxx-xx-xxx.us-west-1.compute.internal) and Provider ID (aws:///us-west-1a/i-xxxxxxxxxxxxxxxxx) identify the instance that is fulfilling this NodeClaim. +* Image ID (ami-0ccbbed159cce4e37) represents the operating system image running on the node. +* Status shows the resources that are available on the node (CPU, memory, and so on) as well as the conditions associated with the node. The conditions show the status of the node, including whether the node is launched, registered, and initialized. This is particularly useful if Pods are not deploying to the node and you want to determine the cause. +* Spec contains the metadata required for Karpenter to launch and manage an instance. This includes any scheduling requirements, resource requirements, the NodeClass reference, taints, and immutable disruption fields (expireAfter and terminationGracePeriod). +* Additional information includes annotations and labels which should be synced to the Node, creation metadata, the termination finalizer, and the owner reference. + +``` +Name: default-x9wxq +Namespace: +Labels: karpenter.k8s.aws/instance-category=c + karpenter.k8s.aws/instance-cpu=8 + karpenter.k8s.aws/instance-cpu-manufacturer=amd + karpenter.k8s.aws/instance-ebs-bandwidth=3170 + karpenter.k8s.aws/instance-encryption-in-transit-supported=true + karpenter.k8s.aws/instance-family=c5a + karpenter.k8s.aws/instance-generation=5 + karpenter.k8s.aws/instance-hypervisor=nitro + karpenter.k8s.aws/instance-memory=16384 + karpenter.k8s.aws/instance-network-bandwidth=2500 + karpenter.k8s.aws/instance-size=2xlarge + karpenter.sh/capacity-type=spot + karpenter.sh/nodepool=default + kubernetes.io/arch=amd64 + kubernetes.io/os=linux + node.kubernetes.io/instance-type=c5a.2xlarge + topology.k8s.aws/zone-id=usw2-az3 + topology.kubernetes.io/region=us-west-2 + topology.kubernetes.io/zone=us-west-2c +Annotations: compatibility.karpenter.k8s.aws/cluster-name-tagged: true + compatibility.karpenter.k8s.aws/kubelet-drift-hash: 15379597991425564585 + karpenter.k8s.aws/ec2nodeclass-hash: 5763643673275251833 + karpenter.k8s.aws/ec2nodeclass-hash-version: v3 + karpenter.k8s.aws/tagged: true + karpenter.sh/nodepool-hash: 377058807571762610 + karpenter.sh/nodepool-hash-version: v3 +API Version: karpenter.sh/v1 +Kind: NodeClaim +Metadata: + Creation Timestamp: 2024-08-07T05:37:30Z + Finalizers: + karpenter.sh/termination + Generate Name: default- + Generation: 1 + Owner References: + API Version: karpenter.sh/v1 + Block Owner Deletion: true + Kind: NodePool + Name: default + UID: 6b9c6781-ac05-4a4c-ad6a-7551a07b2ce7 + Resource Version: 19600526 + UID: 98a2ba32-232d-45c4-b7c0-b183cfb13d93 +Spec: + Expire After: 720h0m0s + Node Class Ref: + Group: + Kind: EC2NodeClass + Name: default + Requirements: + Key: kubernetes.io/arch + Operator: In + Values: + amd64 + Key: kubernetes.io/os + Operator: In + Values: + linux + Key: karpenter.sh/capacity-type + Operator: In + Values: + spot + Key: karpenter.k8s.aws/instance-category + Operator: In + Values: + c + m + r + Key: karpenter.k8s.aws/instance-generation + Operator: Gt + Values: + 2 + Key: karpenter.sh/nodepool + Operator: In + Values: + default + Key: node.kubernetes.io/instance-type + Operator: In + Values: + c3.xlarge + c4.xlarge + c5.2xlarge + c5.xlarge + c5a.xlarge + c5ad.2xlarge + c5ad.xlarge + c5d.2xlarge + Resources: + Requests: + Cpu: 3150m + Pods: 6 + Startup Taints: + Effect: NoSchedule + Key: app.dev/example-startup + Taints: + Effect: NoSchedule + Key: app.dev/example + Termination Grace Period: 1h0m0s +Status: + Allocatable: + Cpu: 7910m + Ephemeral - Storage: 17Gi + Memory: 14162Mi + Pods: 58 + vpc.amazonaws.com/pod-eni: 38 + Capacity: + Cpu: 8 + Ephemeral - Storage: 20Gi + Memory: 15155Mi + Pods: 58 + vpc.amazonaws.com/pod-eni: 38 + Conditions: + Last Transition Time: 2024-08-07T05:38:08Z + Message: + Reason: Consolidatable + Status: True + Type: Consolidatable + Last Transition Time: 2024-08-07T05:38:07Z + Message: + Reason: Initialized + Status: True + Type: Initialized + Last Transition Time: 2024-08-07T05:37:33Z + Message: + Reason: Launched + Status: True + Type: Launched + Last Transition Time: 2024-08-07T05:38:07Z + Message: + Reason: Ready + Status: True + Type: Ready + Last Transition Time: 2024-08-07T05:37:55Z + Message: + Reason: Registered + Status: True + Type: Registered + Image ID: ami-08946d4d49fc3f27b + Node Name: ip-xxx-xxx-xxx-xxx.us-west-2.compute.internal + Provider ID: aws:///us-west-2c/i-01234567890123 +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Launched 70s karpenter Status condition transitioned, Type: Launched, Status: Unknown -> True, Reason: Launched + Normal DisruptionBlocked 70s karpenter Cannot disrupt NodeClaim: state node doesn't contain both a node and a nodeclaim + Normal Registered 48s karpenter Status condition transitioned, Type: Registered, Status: Unknown -> True, Reason: Registered + Normal Initialized 36s karpenter Status condition transitioned, Type: Initialized, Status: Unknown -> True, Reason: Initialized + Normal Ready 36s karpenter Status condition transitioned, Type: Ready, Status: Unknown -> True, Reason: Ready +``` diff --git a/website/content/en/docs/concepts/nodeclasses.md b/website/content/en/docs/concepts/nodeclasses.md index d9726cea5e44..4749988e69e9 100644 --- a/website/content/en/docs/concepts/nodeclasses.md +++ b/website/content/en/docs/concepts/nodeclasses.md @@ -1,4 +1,4 @@ - --- +--- title: "NodeClasses" linkTitle: "NodeClasses" weight: 2 @@ -11,7 +11,7 @@ Each NodePool must reference an EC2NodeClass using `spec.template.spec.nodeClass Multiple NodePools may point to the same EC2NodeClass. ```yaml -apiVersion: karpenter.sh/v1beta1 +apiVersion: karpenter.sh/v1 kind: NodePool metadata: name: default @@ -19,15 +19,43 @@ spec: template: spec: nodeClassRef: - apiVersion: karpenter.k8s.aws/v1beta1 + group: karpenter.k8s.aws kind: EC2NodeClass name: default --- -apiVersion: karpenter.k8s.aws/v1beta1 +apiVersion: karpenter.k8s.aws/v1 kind: EC2NodeClass metadata: name: default spec: + kubelet: + podsPerCore: 2 + maxPods: 20 + systemReserved: + cpu: 100m + memory: 100Mi + ephemeral-storage: 1Gi + kubeReserved: + cpu: 200m + memory: 100Mi + ephemeral-storage: 3Gi + evictionHard: + memory.available: 5% + nodefs.available: 10% + nodefs.inodesFree: 10% + evictionSoft: + memory.available: 500Mi + nodefs.available: 15% + nodefs.inodesFree: 15% + evictionSoftGracePeriod: + memory.available: 1m + nodefs.available: 1m30s + nodefs.inodesFree: 2m + evictionMaxPodGracePeriod: 60 + imageGCHighThresholdPercent: 85 + imageGCLowThresholdPercent: 80 + cpuCFSQuota: true + clusterDNS: ["10.0.1.100"] # Required, resolves a default ami and userdata amiFamily: AL2 @@ -66,25 +94,20 @@ spec: # Must specify one of "role" or "instanceProfile" for Karpenter to launch nodes instanceProfile: "KarpenterNodeInstanceProfile-${CLUSTER_NAME}" - # Optional, discovers amis to override the amiFamily's default amis # Each term in the array of amiSelectorTerms is ORed together # Within a single term, all conditions are ANDed amiSelectorTerms: - # Select on any AMI that has both the "karpenter.sh/discovery: ${CLUSTER_NAME}" tag - # AND the "environment: test" tag OR any AMI with the "my-ami" name - # OR any AMI with ID "ami-123" + # Select on any AMI that has both the `karpenter.sh/discovery: ${CLUSTER_NAME}` + # AND `environment: test` tags OR any AMI with the name `my-ami` OR an AMI with + # ID `ami-123` - tags: karpenter.sh/discovery: "${CLUSTER_NAME}" environment: test - name: my-ami - id: ami-123 - - # Optional, use instance-store volumes for node ephemeral-storage - instanceStorePolicy: RAID0 - - # Optional, overrides autogenerated userdata with a merge semantic - userData: | - echo "Hello world" + # Select EKS optimized AL2023 AMIs with version `v20240703`. This term is mutually + # exclusive and can't be specified with other terms. + # - alias: al2023@v20240703 # Optional, propagates tags to underlying EC2 resources tags: @@ -95,7 +118,7 @@ spec: metadataOptions: httpEndpoint: enabled httpProtocolIPv6: disabled - httpPutResponseHopLimit: 2 + httpPutResponseHopLimit: 1 # This is changed to disable IMDS access from containers not on the host network httpTokens: required # Optional, configures storage devices for the instance @@ -111,6 +134,13 @@ spec: throughput: 125 snapshotID: snap-0123456789 + # Optional, use instance-store volumes for node ephemeral-storage + instanceStorePolicy: RAID0 + + # Optional, overrides autogenerated userdata with a merge semantic + userData: | + echo "Hello world" + # Optional, configures detailed monitoring for the instance detailedMonitoring: true @@ -159,12 +189,244 @@ status: # Generated instance profile name from "role" instanceProfile: "${CLUSTER_NAME}-0123456778901234567789" + conditions: + - lastTransitionTime: "2024-02-02T19:54:34Z" + status: "True" + type: InstanceProfileReady + - lastTransitionTime: "2024-02-02T19:54:34Z" + status: "True" + type: SubnetsReady + - lastTransitionTime: "2024-02-02T19:54:34Z" + status: "True" + type: SecurityGroupsReady + - lastTransitionTime: "2024-02-02T19:54:34Z" + status: "True" + type: AMIsReady + - lastTransitionTime: "2024-02-02T19:54:34Z" + status: "True" + type: Ready +``` +Refer to the [NodePool docs]({{}}) for settings applicable to all providers. To explore various `EC2NodeClass` configurations, refer to the examples provided [in the Karpenter Github repository](https://github.com/aws/karpenter/blob/v1.0.0/examples/v1/). + + +## spec.kubelet + +Karpenter provides the ability to specify a few additional Kubelet arguments. +These are all optional and provide support for additional customization and use cases. +Adjust these only if you know you need to do so. +For more details on kubelet settings, see the [KubeletConfiguration reference](https://kubernetes.io/docs/reference/config-api/kubelet-config.v1/). +The implemented fields are a subset of the full list of upstream kubelet configuration arguments. + +```yaml +kubelet: + podsPerCore: 2 + maxPods: 20 + systemReserved: + cpu: 100m + memory: 100Mi + ephemeral-storage: 1Gi + kubeReserved: + cpu: 200m + memory: 100Mi + ephemeral-storage: 3Gi + evictionHard: + memory.available: 5% + nodefs.available: 10% + nodefs.inodesFree: 10% + evictionSoft: + memory.available: 500Mi + nodefs.available: 15% + nodefs.inodesFree: 15% + evictionSoftGracePeriod: + memory.available: 1m + nodefs.available: 1m30s + nodefs.inodesFree: 2m + evictionMaxPodGracePeriod: 60 + imageGCHighThresholdPercent: 85 + imageGCLowThresholdPercent: 80 + cpuCFSQuota: true + clusterDNS: ["10.0.1.100"] +``` + +{{% alert title="Note" color="primary" %}} +If you need to specify a field that isn't present in `spec.kubelet`, you can set it via custom [UserData]({{< ref "#specuserdata" >}}). +For example, if you wanted to configure `maxPods` and `registryPullQPS` you would set the former through `spec.kubelet` and the latter through UserData. +The following example achieves this with AL2023: + +```yaml +apiVersion: karpenter.k8s.aws/v1 +kind: EC2NodeClass +spec: + amiSelectorTerms: + - alias: al2023@v20240807 + kubelet: + maxPods: 42 + userData: | + apiVersion: node.eks.aws/v1alpha1 + kind: NodeConfig + spec: + kubelet: + config: + # Configured through UserData since unavailable in `spec.kubelet` + registryPullQPS: 10 +``` + +Note that when using the `Custom` AMIFamily you will need to specify fields **both** in `spec.kublet` and `spec.userData`. +{{% /alert %}} + +{{% alert title="Warning" color="warning" %}} +The Bottlerocket AMIFamily does not support the following fields: + +* `evictionSoft` +* `evictionSoftGracePeriod` +* `evictionMaxPodGracePeriod` +* `cpuCFSQuota` + +If any of these fields are specified on a Bottlerocket EC2NodeClass, they will be ommited from generated UserData and ignored for scheduling purposes. +Support for these fields can be tracked via GitHub issue [#3722](https://github.com/aws/karpenter-provider-aws/issues/3722). +{{% /alert %}} + +#### Pods Per Core + +An alternative way to dynamically set the maximum density of pods on a node is to use the `.spec.kubelet.podsPerCore` value. Karpenter will calculate the pod density during scheduling by multiplying this value by the number of logical cores (vCPUs) on an instance type. This value will also be passed through to the `--pods-per-core` value on kubelet startup to configure the number of allocatable pods the kubelet can assign to the node instance. + +The value generated from `podsPerCore` cannot exceed `maxPods`, meaning, if both are set, the minimum of the `podsPerCore` dynamic pod density and the static `maxPods` value will be used for scheduling. + +{{% alert title="Note" color="primary" %}} +`maxPods` may not be set in the `kubelet` of an EC2NodeClass, but may still be restricted by the `ENI_LIMITED_POD_DENSITY` value. You may want to ensure that the `podsPerCore` value that will be used for instance families associated with the EC2NodeClass will not cause unexpected behavior by exceeding the `maxPods` value. +{{% /alert %}} + +#### Max Pods + +For small instances that require an increased pod density or large instances that require a reduced pod density, you can override this default value with `.spec.kubelet.maxPods`. This value will be used during Karpenter pod scheduling and passed through to `--max-pods` on kubelet startup. + +{{% alert title="Note" color="primary" %}} +When using small instance types, it may be necessary to enable [prefix assignment mode](https://aws.amazon.com/blogs/containers/amazon-vpc-cni-increases-pods-per-node-limits/) in the AWS VPC CNI plugin to support a higher pod density per node. Prefix assignment mode was introduced in AWS VPC CNI v1.9 and allows ENIs to manage a broader set of IP addresses. Much higher pod densities are supported as a result. +{{% /alert %}} + +{{% alert title="Windows Support Notice" color="warning" %}} +Presently, Windows worker nodes do not support using more than one ENI. +As a consequence, the number of IP addresses, and subsequently, the number of pods that a Windows worker node can support is limited by the number of IPv4 addresses available on the primary ENI. +Currently, Karpenter will only consider individual secondary IP addresses when calculating the pod density limit. +{{% /alert %}} + +### Reserved Resources + +Karpenter will automatically configure the system and kube reserved resource requests on the fly on your behalf. These requests are used to configure your node and to make scheduling decisions for your pods. If you have specific requirements or know that you will have additional capacity requirements, you can optionally override the `--system-reserved` configuration defaults with the `.spec.kubelet.systemReserved` values and the `--kube-reserved` configuration defaults with the `.spec.kubelet.kubeReserved` values. + +{{% alert title="Note" color="primary" %}} +Karpenter considers these reserved resources when computing the allocatable ephemeral storage on a given instance type. +If `kubeReserved` is not specified, Karpenter will compute the default reserved [CPU](https://github.com/awslabs/amazon-eks-ami/blob/db28da15d2b696bc08ac3aacc9675694f4a69933/files/bootstrap.sh#L251) and [memory](https://github.com/awslabs/amazon-eks-ami/blob/db28da15d2b696bc08ac3aacc9675694f4a69933/files/bootstrap.sh#L235) resources for the purpose of ephemeral storage computation. +These defaults are based on the defaults on Karpenter's supported AMI families, which are not the same as the kubelet defaults. +You should be aware of the CPU and memory default calculation when using Custom AMI Families. If they don't align, there may be a difference in Karpenter's computed allocatable ephemeral storage and the actually ephemeral storage available on the node. +{{% /alert %}} + +### Eviction Thresholds + +The kubelet supports eviction thresholds by default. When enough memory or file system pressure is exerted on the node, the kubelet will begin to evict pods to ensure that system daemons and other system processes can continue to run in a healthy manner. + +Kubelet has the notion of [hard evictions](https://kubernetes.io/docs/concepts/scheduling-eviction/node-pressure-eviction/#hard-eviction-thresholds) and [soft evictions](https://kubernetes.io/docs/concepts/scheduling-eviction/node-pressure-eviction/#soft-eviction-thresholds). In hard evictions, pods are evicted as soon as a threshold is met, with no grace period to terminate. Soft evictions, on the other hand, provide an opportunity for pods to be terminated gracefully. They do so by sending a termination signal to pods that are planning to be evicted and allowing those pods to terminate up to their grace period. + +Karpenter supports [hard evictions](https://kubernetes.io/docs/concepts/scheduling-eviction/node-pressure-eviction/#hard-eviction-thresholds) through the `.spec.kubelet.evictionHard` field and [soft evictions](https://kubernetes.io/docs/concepts/scheduling-eviction/node-pressure-eviction/#soft-eviction-thresholds) through the `.spec.kubelet.evictionSoft` field. `evictionHard` and `evictionSoft` are configured by listing [signal names](https://kubernetes.io/docs/concepts/scheduling-eviction/node-pressure-eviction/#eviction-signals) with either percentage values or resource values. + +```yaml +kubelet: + evictionHard: + memory.available: 500Mi + nodefs.available: 10% + nodefs.inodesFree: 10% + imagefs.available: 5% + imagefs.inodesFree: 5% + pid.available: 7% + evictionSoft: + memory.available: 1Gi + nodefs.available: 15% + nodefs.inodesFree: 15% + imagefs.available: 10% + imagefs.inodesFree: 10% + pid.available: 10% +``` + +#### Supported Eviction Signals + +| Eviction Signal | Description | +|--------------------|---------------------------------------------------------------------------------| +| memory.available | memory.available := node.status.capacity[memory] - node.stats.memory.workingSet | +| nodefs.available | nodefs.available := node.stats.fs.available | +| nodefs.inodesFree | nodefs.inodesFree := node.stats.fs.inodesFree | +| imagefs.available | imagefs.available := node.stats.runtime.imagefs.available | +| imagefs.inodesFree | imagefs.inodesFree := node.stats.runtime.imagefs.inodesFree | +| pid.available | pid.available := node.stats.rlimit.maxpid - node.stats.rlimit.curproc | + +For more information on eviction thresholds, view the [Node-pressure Eviction](https://kubernetes.io/docs/concepts/scheduling-eviction/node-pressure-eviction) section of the official Kubernetes docs. + +#### Soft Eviction Grace Periods + +Soft eviction pairs an eviction threshold with a specified grace period. With soft eviction thresholds, the kubelet will only begin evicting pods when the node exceeds its soft eviction threshold over the entire duration of its grace period. For example, if you specify `evictionSoft[memory.available]` of `500Mi` and a `evictionSoftGracePeriod[memory.available]` of `1m30`, the node must have less than `500Mi` of available memory over a minute and a half in order for the kubelet to begin evicting pods. + +Optionally, you can specify an `evictionMaxPodGracePeriod` which defines the administrator-specified maximum pod termination grace period to use during soft eviction. If a namespace-owner had specified a pod `terminationGracePeriodInSeconds` on pods in their namespace, the minimum of `evictionPodGracePeriod` and `terminationGracePeriodInSeconds` would be used. + +```yaml +kubelet: + evictionSoftGracePeriod: + memory.available: 1m + nodefs.available: 1m30s + nodefs.inodesFree: 2m + imagefs.available: 1m30s + imagefs.inodesFree: 2m + pid.available: 2m + evictionMaxPodGracePeriod: 60 ``` -Refer to the [NodePool docs]({{}}) for settings applicable to all providers. To explore various `EC2NodeClass` configurations, refer to the examples provided [in the Karpenter Github repository](https://github.com/aws/karpenter/blob/v0.37.0/examples/v1beta1/). + +### Pod Density + +By default, the number of pods on a node is limited by both the number of networking interfaces (ENIs) that may be attached to an instance type and the number of IP addresses that can be assigned to each ENI. See [IP addresses per network interface per instance type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#AvailableIpPerENI) for a more detailed information on these instance types' limits. + +{{% alert title="Note" color="primary" %}} +By default, the VPC CNI allocates IPs for a node and pods from the same subnet. With [VPC CNI Custom Networking](https://aws.github.io/aws-eks-best-practices/networking/custom-networking), the pods will receive IP addresses from another subnet dedicated to pod IPs. This approach makes it easier to manage IP addresses and allows for separate Network Access Control Lists (NACLs) applied to your pods. VPC CNI Custom Networking reduces the pod density of a node since one of the ENI attachments will be used for the node and cannot share the allocated IPs on the interface to pods. Karpenter supports VPC CNI Custom Networking and similar CNI setups where the primary node interface is separated from the pods interfaces through a global [setting](./settings.md#configmap) within the karpenter-global-settings configmap: `aws.reservedENIs`. In the common case, `aws.reservedENIs` should be set to `"1"` if using Custom Networking. +{{% /alert %}} + +{{% alert title="Windows Support Notice" color="warning" %}} +It's currently not possible to specify custom networking with Windows nodes. +{{% /alert %}} ## spec.amiFamily -AMIFamily is a required field, dictating both the default bootstrapping logic for nodes provisioned through this `EC2NodeClass` but also selecting a group of recommended, latest AMIs by default. Currently, Karpenter supports `amiFamily` values `AL2`, `AL2023`, `Bottlerocket`, `Ubuntu`, `Windows2019`, `Windows2022` and `Custom`. GPUs are only supported by default with `AL2` and `Bottlerocket`. The `AL2` amiFamily does not support ARM64 GPU instance types unless you specify custom [`amiSelectorTerms`]({{}}). Default bootstrapping logic is shown below for each of the supported families. +AMIFamily dictates the default bootstrapping logic for nodes provisioned through this `EC2NodeClass`. +An `amiFamily` is only required if you don't specify a `spec.amiSelectorTerms.alias` object. +For example, if you specify `alias: al2023@v20240807`, the `amiFamily` is implicitly `AL2023`. + +AMIFamily does not impact which AMI is discovered, only the UserData generation and default BlockDeviceMappings. To automatically discover EKS optimized AMIs, use the new [`alias` field in amiSelectorTerms]({{< ref "#specamiselectorterms" >}}). + +{{% alert title="Ubuntu Support Dropped at v1" color="warning" %}} + +Support for the Ubuntu AMIFamily has been dropped at Karpenter `v1.0.0`. +This means Karpenter no longer supports automatic AMI discovery and UserData generation for Ubuntu. +To continue using Ubuntu AMIs, you will need to select Ubuntu AMIs using `amiSelectorTerms`. + +Additionally, you will need to either maintain UserData yourself using the `Custom` AMIFamily, or you can use the `AL2` AMIFamily and custom `blockDeviceMappings` (as shown below). +The `AL2` family has an identical UserData format, but this compatibility isn't guaranteed long term. +Changes to AL2's or Ubuntu's UserData format could result in incompatibility, at which point the `Custom` AMIFamily must be used. + +**Ubuntu NodeClass Example:** +```yaml +apiVersion: karpenter.k8s.aws/v1 +kind: EC2NodeClass +spec: + amiFamily: AL2 + amiSelectorTerms: + - id: ami-placeholder + blockDeviceMappings: + - deviceName: '/dev/sda1' + rootVolume: true + ebs: + encrypted: true + volumeType: gp3 + volumeSize: 20Gi +``` + +{{% /alert %}} + ### AL2 @@ -228,24 +490,6 @@ max-pods = 110 'karpenter.sh/nodepool' = 'test' ``` -### Ubuntu - -```bash -MIME-Version: 1.0 -Content-Type: multipart/mixed; boundary="//" - ---// -Content-Type: text/x-shellscript; charset="us-ascii" - -#!/bin/bash -xe -exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1 -/etc/eks/bootstrap.sh 'test-cluster' --apiserver-endpoint 'https://test-cluster' --b64-cluster-ca 'ca-bundle' \ ---dns-cluster-ip '10.100.0.10' \ ---use-max-pods false \ ---kubelet-extra-args '--node-labels="karpenter.sh/capacity-type=on-demand,karpenter.sh/nodepool=test" --max-pods=110' ---//-- -``` - ### Windows2019 ```powershell @@ -264,13 +508,9 @@ exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1 ``` -{{% alert title="Note" color="primary" %}} -Karpenter will automatically query for the appropriate [EKS optimized AMI](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-amis.html) via AWS Systems Manager (SSM). In the case of the `Custom` AMIFamily, no default AMIs are defined. As a result, `amiSelectorTerms` must be specified to inform Karpenter on which custom AMIs are to be used. -{{% /alert %}} - ### Custom -The `Custom` AMIFamily ships without any default userData to allow you to configure custom bootstrapping for control planes or images that don't support the default methods from the other families. +The `Custom` AMIFamily ships without any default userData to allow you to configure custom bootstrapping for control planes or images that don't support the default methods from the other families. For this AMIFamily, kubelet must add the taint `karpenter.sh/unregistered:NoExecute` via the `--register-with-taints` flag ([flags](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/#options)) or the KubeletConfiguration spec ([options](https://kubernetes.io/docs/reference/config-api/kubelet-config.v1/#kubelet-config-k8s-io-v1-CredentialProviderConfig) and [docs](https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/)). Karpenter will fail to register nodes that do not have this taint. ## spec.subnetSelectorTerms @@ -427,29 +667,72 @@ spec: - id: "sg-06e0cf9c198874591" ``` -## spec.amiSelectorTerms +## spec.role -AMI Selector Terms are used to configure custom AMIs for Karpenter to use, where the AMIs are discovered through ids, owners, name, and [tags](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html). **When you specify `amiSelectorTerms`, you fully override the default AMIs that are selected on by your EC2NodeClass [`amiFamily`]({{< ref "#specamifamily" >}}).** +`Role` is an optional field and tells Karpenter which IAM identity nodes should assume. You must specify one of `role` or `instanceProfile` when creating a Karpenter `EC2NodeClass`. If using the [Karpenter Getting Started Guide]({{}}) to deploy Karpenter, you can use the `KarpenterNodeRole-$CLUSTER_NAME` role provisioned by that process. + +```yaml +spec: + role: "KarpenterNodeRole-$CLUSTER_NAME" +``` + +## spec.instanceProfile + +`InstanceProfile` is an optional field and tells Karpenter which IAM identity nodes should assume. You must specify one of `role` or `instanceProfile` when creating a Karpenter `EC2NodeClass`. If you use the `instanceProfile` field instead of `role`, Karpenter will not manage the InstanceProfile on your behalf; instead, it expects that you have pre-provisioned an IAM instance profile and assigned it a role. + +You can provision and assign a role to an IAM instance profile using [CloudFormation](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-instanceprofile.html) or by using the [`aws iam create-instance-profile`](https://docs.aws.amazon.com/cli/latest/reference/iam/create-instance-profile.html) and [`aws iam add-role-to-instance-profile`](https://docs.aws.amazon.com/cli/latest/reference/iam/add-role-to-instance-profile.html) commands in the CLI. {{% alert title="Note" color="primary" %}} -[`amiFamily`]({{< ref "#specamifamily" >}}) determines the bootstrapping mode, while `amiSelectorTerms` specifies specific AMIs to be used. Therefore, you need to ensure consistency between [`amiFamily`]({{< ref "#specamifamily" >}}) and `amiSelectorTerms` to avoid conflicts during bootstrapping. + +For [private clusters](https://docs.aws.amazon.com/eks/latest/userguide/private-clusters.html) that do not have access to the public internet, using `spec.instanceProfile` is required. `spec.role` cannot be used since Karpenter needs to access IAM endpoints to manage a generated instance profile. IAM [doesn't support private endpoints](https://docs.aws.amazon.com/vpc/latest/privatelink/aws-services-privatelink-support.html) to enable accessing the service without going to the public internet. + {{% /alert %}} -This selection logic is modeled as terms, where each term contains multiple conditions that must all be satisfied for the selector to match. Effectively, all requirements within a single term are ANDed together. It's possible that you may want to select on two different AMIs that have unrelated requirements. In this case, you can specify multiple terms which will be ORed together to form your selection logic. The example below shows how this selection logic is fulfilled. +## spec.amiSelectorTerms + +AMI Selector Terms are __required__ and are used to configure AMIs for Karpenter to use. AMIs are discovered through alias, id, owner, name, and [tags](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html). + +This selection logic is modeled as terms, where each term contains multiple conditions that must all be satisfied for the selector to match. +Effectively, all requirements within a single term are ANDed together. +It's possible that you may want to select on two different AMIs that have unrelated requirements. +In this case, you can specify multiple terms which will be ORed together to form your selection logic. +The example below shows how this selection logic is fulfilled. ```yaml amiSelectorTerms: - # Select on any AMI that has both the "karpenter.sh/discovery: ${CLUSTER_NAME}" tag - # AND the "environment: test" tag OR any AMI with the "my-ami" name - # OR any AMI with ID "ami-123" + # Select on any AMI that has both the `karpenter.sh/discovery: ${CLUSTER_NAME}` + # AND `environment: test` tags OR any AMI with the name `my-ami` OR an AMI with + # ID `ami-123` - tags: karpenter.sh/discovery: "${CLUSTER_NAME}" environment: test - name: my-ami - id: ami-123 + # Select EKS optimized AL2023 AMIs with version `v20240807`. This term is mutually + # exclusive and can't be specified with other terms. + # - alias: al2023@v20240807 +``` + +An `alias` term can be used to select EKS-optimized AMIs. An `alias` is formatted as `family@version`. Family can be one of the following values: + +* `al2` +* `al2023` +* `bottlerocket` +* `windows2019` +* `windows2022` + +The version string can be set to `latest`, or pinned to a specific AMI using the format of that AMI's GitHub release tags. +For example, AL2 and AL2023 use dates for their release, so they can be pinned as follows: +```yaml +alias: al2023@v20240703 ``` +Bottlerocket uses a semantic version for their releases. You can pin bottlerocket as follows: +```yaml +alias: bottlerocket@v1.20.4 +``` +The Windows family does not support pinning, so only `latest` is supported. -This field is optional, and Karpenter will use the latest EKS-optimized AMIs for the AMIFamily if no amiSelectorTerms are specified. To select an AMI by name, use the `name` field in the selector term. To select an AMI by id, use the `id` field in the selector term. To ensure that AMIs are owned by the expected owner, use the `owner` field - you can use a combination of account aliases (e.g. `self` `amazon`, `your-aws-account-name`) and account IDs. +To select an AMI by name, use the `name` field in the selector term. To select an AMI by id, use the `id` field in the selector term. To select AMIs that are not owned by `amazon` or the account that Karpenter is running in, use the `owner` field - you can use a combination of account aliases (e.g. `self` `amazon`, `your-aws-account-name`) and account IDs. If owner is not set for `name`, it defaults to `self,amazon`, preventing Karpenter from inadvertently selecting an AMI that is owned by a different account. Tags don't require an owner as tags can only be discovered by the user who created them. @@ -461,14 +744,21 @@ AMIs may be specified by any AWS tag, including `Name`. Selecting by tag or by n If `amiSelectorTerms` match more than one AMI, Karpenter will automatically determine which AMI best fits the workloads on the launched worker node under the following constraints: * When launching nodes, Karpenter automatically determines which architecture a custom AMI is compatible with and will use images that match an instanceType's requirements. - * Note that Karpenter **cannot** detect any requirement other than architecture. If you need to specify different AMIs for different kind of nodes (e.g. accelerated GPU AMIs), you should use a separate `EC2NodeClass`. + * Unless using an alias, Karpenter **cannot** detect requirements other than architecture. If you need to specify different AMIs for different kind of nodes (e.g. accelerated GPU AMIs), you should use a separate `EC2NodeClass`. * If multiple AMIs are found that can be used, Karpenter will choose the latest one. * If no AMIs are found that can be used, then no nodes will be provisioned. {{% /alert %}} #### Examples +Select by AMI family and version: +```yaml + amiSelectorTerms: + - alias: al2023@v20240807 +``` + Select all with a specified tag: + ```yaml amiSelectorTerms: - tags: @@ -519,27 +809,6 @@ Specify using ids: - id: "ami-456" ``` -## spec.role - -`Role` is an optional field and tells Karpenter which IAM identity nodes should assume. You must specify one of `role` or `instanceProfile` when creating a Karpenter `EC2NodeClass`. If using the [Karpenter Getting Started Guide]({{}}) to deploy Karpenter, you can use the `KarpenterNodeRole-$CLUSTER_NAME` role provisioned by that process. - -```yaml -spec: - role: "KarpenterNodeRole-$CLUSTER_NAME" -``` - -## spec.instanceProfile - -`InstanceProfile` is an optional field and tells Karpenter which IAM identity nodes should assume. You must specify one of `role` or `instanceProfile` when creating a Karpenter `EC2NodeClass`. If you use the `instanceProfile` field instead of `role`, Karpenter will not manage the InstanceProfile on your behalf; instead, it expects that you have pre-provisioned an IAM instance profile and assigned it a role. - -You can provision and assign a role to an IAM instance profile using [CloudFormation](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-instanceprofile.html) or by using the [`aws iam create-instance-profile`](https://docs.aws.amazon.com/cli/latest/reference/iam/create-instance-profile.html) and [`aws iam add-role-to-instance-profile`](https://docs.aws.amazon.com/cli/latest/reference/iam/add-role-to-instance-profile.html) commands in the CLI. - -{{% alert title="Note" color="primary" %}} - -For [private clusters](https://docs.aws.amazon.com/eks/latest/userguide/private-clusters.html) that do not have access to the public internet, using `spec.instanceProfile` is required. `spec.role` cannot be used since Karpenter needs to access IAM endpoints to manage a generated instance profile. IAM [doesn't support private endpoints](https://docs.aws.amazon.com/vpc/latest/privatelink/aws-services-privatelink-support.html) to enable accessing the service without going to the public internet. - -{{% /alert %}} - ## spec.tags Karpenter adds tags to all resources it creates, including EC2 Instances, EBS volumes, and Launch Templates. The default set of tags are listed below. @@ -550,6 +819,7 @@ karpenter.sh/nodeclaim: karpenter.sh/nodepool: karpenter.k8s.aws/ec2nodeclass: kubernetes.io/cluster/: owned +eks:eks-cluster-name: ``` Additional tags can be added in the tags section, which will be merged with the default tags specified above. @@ -578,7 +848,7 @@ spec: metadataOptions: httpEndpoint: enabled httpProtocolIPv6: disabled - httpPutResponseHopLimit: 2 + httpPutResponseHopLimit: 1 httpTokens: required ``` @@ -643,17 +913,6 @@ spec: encrypted: true ``` -### Ubuntu -```yaml -spec: - blockDeviceMappings: - - deviceName: /dev/sda1 - ebs: - volumeSize: 20Gi - volumeType: gp3 - encrypted: true -``` - ### Windows2019/Windows2022 ```yaml spec: @@ -707,7 +966,7 @@ Since the Kubelet & Containerd will be using the instance-store filesystem, you You can control the UserData that is applied to your worker nodes via this field. This allows you to run custom scripts or pass-through custom configuration to Karpenter instances on start-up. ```yaml -apiVersion: karpenter.k8s.aws/v1beta1 +apiVersion: karpenter.k8s.aws/v1 kind: EC2NodeClass metadata: name: bottlerocket-example @@ -731,7 +990,7 @@ See [Node NotReady]({{< ref "../troubleshooting/#node-notready" >}}) troubleshoo {{% /alert %}} ```yaml -apiVersion: karpenter.k8s.aws/v1beta1 +apiVersion: karpenter.k8s.aws/v1 kind: EC2NodeClass metadata: name: al2-example @@ -749,11 +1008,11 @@ spec: chown -R ec2-user ~ec2-user/.ssh ``` -For more examples on configuring fields for different AMI families, see the [examples here](https://github.com/aws/karpenter/blob/v0.37.0/examples/v1beta1/). +For more examples on configuring fields for different AMI families, see the [examples here](https://github.com/aws/karpenter/blob/v1.0.0/examples/v1). Karpenter will merge the userData you specify with the default userData for that AMIFamily. See the [AMIFamily]({{< ref "#specamifamily" >}}) section for more details on these defaults. View the sections below to understand the different merge strategies for each AMIFamily. -### AL2/Ubuntu +### AL2 * Your UserData can be in the [MIME multi part archive](https://cloudinit.readthedocs.io/en/latest/topics/format.html#mime-multi-part-archive) format. * Karpenter will transform your custom user-data as a MIME part, if necessary, and then merge a final MIME part to the end of your UserData parts which will bootstrap the worker node. Karpenter will have full control over all the parameters being passed to the bootstrap script. @@ -829,16 +1088,15 @@ exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1 --//-- ``` -{{% alert title="Note" color="primary" %}} -You can also set kubelet-config properties by modifying the kubelet-config.json file before the EKS bootstrap script starts the kubelet: +{{% alert title="Tip" color="secondary" %}} +You can set additional kubelet configuration properties, unavailable through `spec.kubelet`, by updating the `kubelet-config.json` file: ```yaml -apiVersion: karpenter.k8s.aws/v1beta1 +apiVersion: karpenter.k8s.aws/v1 kind: EC2NodeClass metadata: name: kubelet-config-example spec: - ... amiFamily: AL2 userData: | #!/bin/bash @@ -850,7 +1108,12 @@ spec: * Your UserData may be in one of three formats: a [MIME multi part archive](https://cloudinit.readthedocs.io/en/latest/topics/format.html#mime-multi-part-archive), a NodeConfig YAML / JSON string, or a shell script. * Karpenter will transform your custom UserData into a MIME part, if necessary, and then create a MIME multi-part archive. This archive will consist of a generated NodeConfig, containing Karpenter's default values, followed by the transformed custom UserData. For more information on the NodeConfig spec, refer to the [AL2023 EKS Optimized AMI docs](https://awslabs.github.io/amazon-eks-ami/nodeadm/doc/examples/). -* If a value is specified both in the Karpenter generated NodeConfig and the same value is specified in the custom user data, the value in the custom user data will take precedence. + +{{% alert title="Warning" color="warning" %}} +Any values configured by the Karpenter generated NodeConfig object will take precedent over values specifed in `spec.userData`. +This includes cluster name, cluster CIDR, cluster endpoint, certificate authority, taints, labels, and any value in [spec.kubelet]({{< ref "#speckubelet" >}}). +These fields must be configured natively through Karpenter rather than through UserData. +{{% /alert %}} #### Passed-in UserData (NodeConfig) @@ -870,7 +1133,16 @@ MIME-Version: 1.0 Content-Type: multipart/mixed; boundary="//" --// -# Karpenter Generated NodeConfig +Content-Type: application/node.eks.aws + +apiVersion: node.eks.aws/v1alpha1 +kind: NodeConfig +spec: + kubelet: + config: + maxPods: 42 + +--// Content-Type: application/node.eks.aws # Karpenter Generated NodeConfig @@ -890,15 +1162,6 @@ spec: flags: - --node-labels="karpenter.sh/capacity-type=on-demand,karpenter.sh/nodepool=default" ---// -Content-Type: application/node.eks.aws - -apiVersion: node.eks.aws/v1alpha1 -kind: NodeConfig -spec: - kubelet: - config: - maxPods: 42 --//-- ``` @@ -915,6 +1178,12 @@ echo "Hello, AL2023!" MIME-Version: 1.0 Content-Type: multipart/mixed; boundary="//" +--// +Content-Type: text/x-shellscript; charset="us-ascii" + +#!/bin/bash +echo "Hello, AL2023!" + --// Content-Type: application/node.eks.aws @@ -935,11 +1204,6 @@ spec: flags: - --node-labels="karpenter.sh/capacity-type=on-demand,karpenter.sh/nodepool=default" ---// -Content-Type: text/x-shellscript; charset="us-ascii" - -#!/bin/bash -echo "Hello, AL2023!" --//-- ``` @@ -949,6 +1213,12 @@ echo "Hello, AL2023!" MIME-Version: 1.0 Content-Type: multipart/mixed; boundary="//" +--// +Content-Type: text/x-shellscript; charset="us-ascii" + +#!/bin/bash +echo "Hello, AL2023!" + --// Content-Type: application/node.eks.aws @@ -959,11 +1229,6 @@ spec: config: maxPods: 42 --// -Content-Type: text/x-shellscript; charset="us-ascii" - -#!/bin/bash -echo "Hello, AL2023!" ---// ``` #### Merged UserData (MIME) @@ -975,6 +1240,21 @@ Content-Type: multipart/mixed; boundary="//" --// Content-Type: application/node.eks.aws +apiVersion: node.eks.aws/v1alpha1 +kind: NodeConfig +spec: + kubelet: + config: + maxPods: 42 +--// +Content-Type: text/x-shellscript; charset="us-ascii" + +#!/bin/bash +echo "Hello, AL2023!" + +--// +Content-Type: application/node.eks.aws + # Karpenter Generated NodeConfig apiVersion: node.eks.aws/v1alpha1 kind: NodeConfig @@ -992,32 +1272,20 @@ spec: flags: - --node-labels="karpenter.sh/capacity-type=on-demand,karpenter.sh/nodepool=default" ---// -Content-Type: application/node.eks.aws - -apiVersion: node.eks.aws/v1alpha1 -kind: NodeConfig -spec: - kubelet: - config: - maxPods: 42 ---// -Content-Type: text/x-shellscript; charset="us-ascii" - -#!/bin/bash -echo "Hello, AL2023!" --//-- ``` ### Bottlerocket * Your UserData must be valid TOML. -* Karpenter will automatically merge settings to ensure successful bootstrap including `cluster-name`, `api-server` and `cluster-certificate`. Any labels and taints that need to be set based on pod requirements will also be specified in the final merged UserData. - * All Kubelet settings that Karpenter applies will override the corresponding settings in the provided UserData. For example, if you've specified `settings.kubernetes.cluster-name`, it will be overridden. - * If MaxPods is specified via the binary arg to Karpenter, the value will override anything specified in the UserData. - * If ClusterDNS is specified via `spec.kubeletConfiguration`, then that value will override anything specified in the UserData. * Unknown TOML fields will be ignored when the final merged UserData is generated by Karpenter. +{{% alert title="Warning" color="warning" %}} +Any values configured by Karpenter will take precedent over values specifed in `spec.userData`. +This includes cluster name, cluster endpoint, cluster certificate, taints, labels, and any value in [spec.kubelet]({{< ref "#speckubelet" >}}). +These fields must be configured natively through Karpenter rather than through UserData. +{{% /alert %}} + Consider the following example to understand how your custom UserData settings will be merged in. #### Passed-in UserData @@ -1091,6 +1359,9 @@ spec: ### Custom * No merging is performed, your UserData must perform all setup required of the node to allow it to join the cluster. +* Custom UserData must meet the following requirements to work correctly with Karpenter: + * It must ensure the node is registered with the `karpenter.sh/unregistered:NoExecute` taint (via kubelet configuration field `registerWithTaints`) + * It must set kubelet config options to match those configured in `spec.kubelet` ## spec.detailedMonitoring @@ -1103,7 +1374,10 @@ spec: ## spec.associatePublicIPAddress -A boolean field that controls whether instances created by Karpenter for this EC2NodeClass will have an associated public IP address. This overrides the `MapPublicIpOnLaunch` setting applied to the subnet the node is launched in. If this field is not set, the `MapPublicIpOnLaunch` field will be respected. +You can explicitly set `AssociatePublicIPAddress: false` when you are only launching into private subnets. +Previously, Karpenter auto-set `associatePublicIPAddress` on the primary ENI to false if a user’s subnet options were all private subnets. +This value is a boolean field that controls whether instances created by Karpenter for this EC2NodeClass will have an associated public IP address. This overrides the `MapPublicIpOnLaunch` setting applied to the subnet the node is launched in. If this field is not set, the `MapPublicIpOnLaunch` field will be respected. + {{% alert title="Note" color="warning" %}} If a `NodeClaim` requests `vpc.amazonaws.com/efa` resources, `spec.associatePublicIPAddress` is respected. However, if this `NodeClaim` requests **multiple** EFA resources and the value for `spec.associatePublicIPAddress` is true, the instance will fail to launch. This is due to an EC2 restriction which @@ -1161,11 +1435,12 @@ status: #### Examples -Default AMIs resolved from the AL2 AMIFamily: +AMIs resolved with an AL2 alias: ```yaml spec: - amiFamily: AL2 + amiSelectorTerms: + - alias: al2@v20240807 status: amis: - id: ami-03c3a3dcda64f5b75 @@ -1210,11 +1485,10 @@ status: operator: DoesNotExist ``` -AMIs resolved from [`spec.amiSelectorTerms`]({{< ref "#specamiselectorterms" >}}): +AMIs resolved from tags: ```yaml spec: - amiFamily: AL2 amiSelectorTerms: - tags: karpenter.sh/discovery: "${CLUSTER_NAME}" @@ -1249,7 +1523,7 @@ status: ## status.conditions -[`status.conditions`]({{< ref "#statusconditions" >}}) indicates EC2NodeClass readiness. This will be `Ready` when Karpenter successfully discovers AMIs, Instance Profile, Subnets, Cluster CIDR and SecurityGroups for the EC2NodeClass. +[`status.conditions`]({{< ref "#statusconditions" >}}) indicates EC2NodeClass readiness. This will be `Ready` when Karpenter successfully discovers AMIs, Instance Profile, Subnets, Cluster CIDR (AL2023 only) and SecurityGroups for the EC2NodeClass. ```yaml spec: @@ -1276,6 +1550,3 @@ status: Status: False Type: Ready ``` -{{% alert title="Note" color="primary" %}} -An EC2NodeClass that uses AL2023 requires the cluster CIDR for launching nodes. Cluster CIDR will not be resolved for EC2NodeClass that doesn't use AL2023. -{{% /alert %}} diff --git a/website/content/en/docs/concepts/nodepools.md b/website/content/en/docs/concepts/nodepools.md index 9d72c848be20..69198bb0a6b7 100644 --- a/website/content/en/docs/concepts/nodepools.md +++ b/website/content/en/docs/concepts/nodepools.md @@ -1,7 +1,7 @@ --- title: "NodePools" linkTitle: "NodePools" -weight: 1 +weight: 10 description: > Configure Karpenter with NodePools --- @@ -22,10 +22,15 @@ Here are things you should know about NodePools: * If Karpenter encounters a startup taint in the NodePool it will be applied to nodes that are provisioned, but pods do not need to tolerate the taint. Karpenter assumes that the taint is temporary and some other system will remove the taint. * It is recommended to create NodePools that are mutually exclusive. So no Pod should match multiple NodePools. If multiple NodePools are matched, Karpenter will use the NodePool with the highest [weight](#specweight). -For some example `NodePool` configurations, see the [examples in the Karpenter GitHub repository](https://github.com/aws/karpenter/blob/v0.37.0/examples/v1beta1/). + +{{% alert title="Note" color="primary" %}} +Objects for setting Kubelet features have been moved from the NodePool spec to the EC2NodeClasses spec, to not require other Karpenter providers to support those features. +{{% /alert %}} + +For some example `NodePool` configurations, see the [examples in the Karpenter GitHub repository](https://github.com/aws/karpenter/blob/v1.0.0/examples/v1/). ```yaml -apiVersion: karpenter.sh/v1beta1 +apiVersion: karpenter.sh/v1 kind: NodePool metadata: name: default @@ -46,7 +51,7 @@ spec: spec: # References the Cloud Provider's NodeClass resource, see your cloud provider specific documentation nodeClassRef: - apiVersion: karpenter.k8s.aws/v1beta1 + group: karpenter.k8s.aws # Updated since only a single version will be served kind: EC2NodeClass name: default @@ -63,6 +68,21 @@ spec: - key: example.com/another-taint effect: NoSchedule + # The amount of time a Node can live on the cluster before being removed + # Avoiding long-running Nodes helps to reduce security vulnerabilities as well as to reduce the chance of issues that can plague Nodes with long uptimes such as file fragmentation or memory leaks from system processes + # You can choose to disable expiration entirely by setting the string value 'Never' here + + # Note: changing this value in the nodepool will drift the nodeclaims. + expireAfter: 720h | Never + + # The amount of time that a node can be draining before it's forcibly deleted. A node begins draining when a delete call is made against it, starting + # its finalization flow. Pods with TerminationGracePeriodSeconds will be deleted preemptively before this terminationGracePeriod ends to give as much time to cleanup as possible. + # If your pod's terminationGracePeriodSeconds is larger than this terminationGracePeriod, Karpenter may forcibly delete the pod + # before it has its full terminationGracePeriod to cleanup. + + # Note: changing this value in the nodepool will drift the nodeclaims. + terminationGracePeriod: 48h + # Requirements that constrain the parameters of provisioned nodes. # These requirements are combined with pod.spec.topologySpreadConstraints, pod.spec.affinity.nodeAffinity, pod.spec.affinity.podAffinity, and pod.spec.nodeSelector rules. # Operators { In, NotIn, Exists, DoesNotExist, Gt, and Lt } are supported. @@ -72,7 +92,7 @@ spec: operator: In values: ["c", "m", "r"] # minValues here enforces the scheduler to consider at least that number of unique instance-category to schedule the pods. - # This field is ALPHA and can be dropped or replaced at any time + # This field is ALPHA and can be dropped or replaced at any time minValues: 2 - key: "karpenter.k8s.aws/instance-family" operator: In @@ -97,55 +117,18 @@ spec: operator: In values: ["spot", "on-demand"] - # Karpenter provides the ability to specify a few additional Kubelet args. - # These are all optional and provide support for additional customization and use cases. - kubelet: - clusterDNS: ["10.0.1.100"] - systemReserved: - cpu: 100m - memory: 100Mi - ephemeral-storage: 1Gi - kubeReserved: - cpu: 200m - memory: 100Mi - ephemeral-storage: 3Gi - evictionHard: - memory.available: 5% - nodefs.available: 10% - nodefs.inodesFree: 10% - evictionSoft: - memory.available: 500Mi - nodefs.available: 15% - nodefs.inodesFree: 15% - evictionSoftGracePeriod: - memory.available: 1m - nodefs.available: 1m30s - nodefs.inodesFree: 2m - evictionMaxPodGracePeriod: 60 - imageGCHighThresholdPercent: 85 - imageGCLowThresholdPercent: 80 - cpuCFSQuota: true - podsPerCore: 2 - maxPods: 20 - # Disruption section which describes the ways in which Karpenter can disrupt and replace Nodes # Configuration in this section constrains how aggressive Karpenter can be with performing operations # like rolling Nodes due to them hitting their maximum lifetime (expiry) or scaling down nodes to reduce cluster cost disruption: # Describes which types of Nodes Karpenter should consider for consolidation - # If using 'WhenUnderutilized', Karpenter will consider all nodes for consolidation and attempt to remove or replace Nodes when it discovers that the Node is underutilized and could be changed to reduce cost + # If using 'WhenEmptyOrUnderutilized', Karpenter will consider all nodes for consolidation and attempt to remove or replace Nodes when it discovers that the Node is empty or underutilized and could be changed to reduce cost # If using `WhenEmpty`, Karpenter will only consider nodes for consolidation that contain no workload pods - consolidationPolicy: WhenUnderutilized | WhenEmpty + consolidationPolicy: WhenEmptyOrUnderutilized | WhenEmpty - # The amount of time Karpenter should wait after discovering a consolidation decision - # This value can currently only be set when the consolidationPolicy is 'WhenEmpty' + # The amount of time Karpenter should wait to consolidate a node after a pod has been added or removed from the node. # You can choose to disable consolidation entirely by setting the string value 'Never' here - consolidateAfter: 30s - - # The amount of time a Node can live on the cluster before being removed - # Avoiding long-running Nodes helps to reduce security vulnerabilities as well as to reduce the chance of issues that can plague Nodes with long uptimes such as file fragmentation or memory leaks from system processes - # You can choose to disable expiration entirely by setting the string value 'Never' here - expireAfter: 720h + consolidateAfter: 1m | Never # Added to allow additional control over consolidation aggressiveness # Budgets control the speed Karpenter can scale down nodes. # Karpenter will respect the minimum of the currently active budgets, and will round up @@ -167,7 +150,49 @@ spec: # to select. Higher weights indicate higher priority when comparing NodePools. # Specifying no weight is equivalent to specifying a weight of 0. weight: 10 +status: + conditions: + - type: Initialized + status: "False" + observedGeneration: 1 + lastTransitionTime: "2024-02-02T19:54:34Z" + reason: NodeClaimNotLaunched + message: "NodeClaim hasn't succeeded launch" + resources: + cpu: "20" + memory: "8192Mi" + ephemeral-storage: "100Gi" ``` +## metadata.name +The name of the NodePool. + +## spec.template.metadata.labels +Arbitrary key/value pairs to apply to all nodes. + +## spec.template.metadata.annotations +Arbitrary key/value pairs to apply to all nodes. + +## spec.template.spec.nodeClassRef + +This field points to the Cloud Provider NodeClass resource. See [EC2NodeClasses]({{}}) for details. + +## spec.template.spec.taints + +Taints to add to provisioned nodes. Pods that don't tolerate those taints could be prevented from scheduling. +See [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) for details. + +## spec.template.spec.startupTaints + +Taints that are added to nodes to indicate that a certain condition must be met, such as starting an agent or setting up networking, before the node is can be initialized. +These taints must be cleared before pods can be deployed to a node. + +## spec.template.spec.expireAfter + +The amount of time a Node can live on the cluster before being deleted by Karpenter. Nodes will begin draining once it's expiration has been hit. + +## spec.template.spec.terminationGracePeriod + +The amount of time a Node can be draining before Karpenter forcibly cleans up the node. Pods blocking eviction like PDBs and do-not-disrupt will be respected during draining until the `terminationGracePeriod` is reached, where those pods will be forcibly deleted. ## spec.template.spec.requirements @@ -234,7 +259,7 @@ Karpenter prioritizes Spot offerings if the NodePool allows Spot and on-demand i Karpenter also allows `karpenter.sh/capacity-type` to be used as a topology key for enforcing topology-spread. {{% alert title="Note" color="primary" %}} -There is currently a limit of 30 on the total number of requirements on both the NodePool and the NodeClaim. It's important to note that `spec.template.metadata.labels` are also propagated as requirements on the NodeClaim when it's created, meaning that you can't have more than 30 requirements and labels combined set on your NodePool. +There is currently a limit of 100 on the total number of requirements on both the NodePool and the NodeClaim. It's important to note that `spec.template.metadata.labels` are also propagated as requirements on the NodeClaim when it's created, meaning that you can't have more than 100 requirements and labels combined set on your NodePool. {{% /alert %}} ### Min Values @@ -331,157 +356,12 @@ spec: {{% /alert %}} -## spec.template.spec.nodeClassRef - -This field points to the Cloud Provider NodeClass resource. Learn more about [EC2NodeClasses]({{}}). - -## spec.template.spec.kubelet - -Karpenter provides the ability to specify a few additional Kubelet args. These are all optional and provide support for -additional customization and use cases. Adjust these only if you know you need to do so. For more details on kubelet configuration arguments, [see the KubeletConfiguration API specification docs](https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/). The implemented fields are a subset of the full list of upstream kubelet configuration arguments. Please cut an issue if you'd like to see another field implemented. - -```yaml -kubelet: - clusterDNS: ["10.0.1.100"] - systemReserved: - cpu: 100m - memory: 100Mi - ephemeral-storage: 1Gi - kubeReserved: - cpu: 200m - memory: 100Mi - ephemeral-storage: 3Gi - evictionHard: - memory.available: 5% - nodefs.available: 10% - nodefs.inodesFree: 10% - evictionSoft: - memory.available: 500Mi - nodefs.available: 15% - nodefs.inodesFree: 15% - evictionSoftGracePeriod: - memory.available: 1m - nodefs.available: 1m30s - nodefs.inodesFree: 2m - evictionMaxPodGracePeriod: 60 - imageGCHighThresholdPercent: 85 - imageGCLowThresholdPercent: 80 - cpuCFSQuota: true - podsPerCore: 2 - maxPods: 20 -``` - -### Reserved Resources - -Karpenter will automatically configure the system and kube reserved resource requests on the fly on your behalf. These requests are used to configure your node and to make scheduling decisions for your pods. If you have specific requirements or know that you will have additional capacity requirements, you can optionally override the `--system-reserved` configuration defaults with the `.spec.template.spec.kubelet.systemReserved` values and the `--kube-reserved` configuration defaults with the `.spec.template.spec.kubelet.kubeReserved` values. - -{{% alert title="Note" color="primary" %}} -Karpenter considers these reserved resources when computing the allocatable ephemeral storage on a given instance type. -If `kubeReserved` is not specified, Karpenter will compute the default reserved [CPU](https://github.com/awslabs/amazon-eks-ami/blob/db28da15d2b696bc08ac3aacc9675694f4a69933/files/bootstrap.sh#L251) and [memory](https://github.com/awslabs/amazon-eks-ami/blob/db28da15d2b696bc08ac3aacc9675694f4a69933/files/bootstrap.sh#L235) resources for the purpose of ephemeral storage computation. -These defaults are based on the defaults on Karpenter's supported AMI families, which are not the same as the kubelet defaults. -You should be aware of the CPU and memory default calculation when using Custom AMI Families. If they don't align, there may be a difference in Karpenter's computed allocatable ephemeral storage and the actually ephemeral storage available on the node. -{{% /alert %}} - -### Eviction Thresholds - -The kubelet supports eviction thresholds by default. When enough memory or file system pressure is exerted on the node, the kubelet will begin to evict pods to ensure that system daemons and other system processes can continue to run in a healthy manner. - -Kubelet has the notion of [hard evictions](https://kubernetes.io/docs/concepts/scheduling-eviction/node-pressure-eviction/#hard-eviction-thresholds) and [soft evictions](https://kubernetes.io/docs/concepts/scheduling-eviction/node-pressure-eviction/#soft-eviction-thresholds). In hard evictions, pods are evicted as soon as a threshold is met, with no grace period to terminate. Soft evictions, on the other hand, provide an opportunity for pods to be terminated gracefully. They do so by sending a termination signal to pods that are planning to be evicted and allowing those pods to terminate up to their grace period. - -Karpenter supports [hard evictions](https://kubernetes.io/docs/concepts/scheduling-eviction/node-pressure-eviction/#hard-eviction-thresholds) through the `.spec.template.spec.kubelet.evictionHard` field and [soft evictions](https://kubernetes.io/docs/concepts/scheduling-eviction/node-pressure-eviction/#soft-eviction-thresholds) through the `.spec.template.spec.kubelet.evictionSoft` field. `evictionHard` and `evictionSoft` are configured by listing [signal names](https://kubernetes.io/docs/concepts/scheduling-eviction/node-pressure-eviction/#eviction-signals) with either percentage values or resource values. - -```yaml -kubelet: - evictionHard: - memory.available: 500Mi - nodefs.available: 10% - nodefs.inodesFree: 10% - imagefs.available: 5% - imagefs.inodesFree: 5% - pid.available: 7% - evictionSoft: - memory.available: 1Gi - nodefs.available: 15% - nodefs.inodesFree: 15% - imagefs.available: 10% - imagefs.inodesFree: 10% - pid.available: 10% -``` - -#### Supported Eviction Signals - -| Eviction Signal | Description | -|--------------------|---------------------------------------------------------------------------------| -| memory.available | memory.available := node.status.capacity[memory] - node.stats.memory.workingSet | -| nodefs.available | nodefs.available := node.stats.fs.available | -| nodefs.inodesFree | nodefs.inodesFree := node.stats.fs.inodesFree | -| imagefs.available | imagefs.available := node.stats.runtime.imagefs.available | -| imagefs.inodesFree | imagefs.inodesFree := node.stats.runtime.imagefs.inodesFree | -| pid.available | pid.available := node.stats.rlimit.maxpid - node.stats.rlimit.curproc | - -For more information on eviction thresholds, view the [Node-pressure Eviction](https://kubernetes.io/docs/concepts/scheduling-eviction/node-pressure-eviction) section of the official Kubernetes docs. - -#### Soft Eviction Grace Periods - -Soft eviction pairs an eviction threshold with a specified grace period. With soft eviction thresholds, the kubelet will only begin evicting pods when the node exceeds its soft eviction threshold over the entire duration of its grace period. For example, if you specify `evictionSoft[memory.available]` of `500Mi` and a `evictionSoftGracePeriod[memory.available]` of `1m30`, the node must have less than `500Mi` of available memory over a minute and a half in order for the kubelet to begin evicting pods. - -Optionally, you can specify an `evictionMaxPodGracePeriod` which defines the administrator-specified maximum pod termination grace period to use during soft eviction. If a namespace-owner had specified a pod `terminationGracePeriodInSeconds` on pods in their namespace, the minimum of `evictionPodGracePeriod` and `terminationGracePeriodInSeconds` would be used. - -```yaml -kubelet: - evictionSoftGracePeriod: - memory.available: 1m - nodefs.available: 1m30s - nodefs.inodesFree: 2m - imagefs.available: 1m30s - imagefs.inodesFree: 2m - pid.available: 2m - evictionMaxPodGracePeriod: 60 -``` - -### Pod Density - -By default, the number of pods on a node is limited by both the number of networking interfaces (ENIs) that may be attached to an instance type and the number of IP addresses that can be assigned to each ENI. See [IP addresses per network interface per instance type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#AvailableIpPerENI) for a more detailed information on these instance types' limits. - -{{% alert title="Note" color="primary" %}} -By default, the VPC CNI allocates IPs for a node and pods from the same subnet. With [VPC CNI Custom Networking](https://aws.github.io/aws-eks-best-practices/networking/custom-networking), the pods will receive IP addresses from another subnet dedicated to pod IPs. This approach makes it easier to manage IP addresses and allows for separate Network Access Control Lists (NACLs) applied to your pods. VPC CNI Custom Networking reduces the pod density of a node since one of the ENI attachments will be used for the node and cannot share the allocated IPs on the interface to pods. Karpenter supports VPC CNI Custom Networking and similar CNI setups where the primary node interface is separated from the pods interfaces through a global [setting](./settings.md#configmap) within the karpenter-global-settings configmap: `aws.reservedENIs`. In the common case, `aws.reservedENIs` should be set to `"1"` if using Custom Networking. -{{% /alert %}} - -{{% alert title="Windows Support Notice" color="warning" %}} -It's currently not possible to specify custom networking with Windows nodes. -{{% /alert %}} - -#### Max Pods - -For small instances that require an increased pod density or large instances that require a reduced pod density, you can override this default value with `.spec.template.spec.kubelet.maxPods`. This value will be used during Karpenter pod scheduling and passed through to `--max-pods` on kubelet startup. - -{{% alert title="Note" color="primary" %}} -When using small instance types, it may be necessary to enable [prefix assignment mode](https://aws.amazon.com/blogs/containers/amazon-vpc-cni-increases-pods-per-node-limits/) in the AWS VPC CNI plugin to support a higher pod density per node. Prefix assignment mode was introduced in AWS VPC CNI v1.9 and allows ENIs to manage a broader set of IP addresses. Much higher pod densities are supported as a result. -{{% /alert %}} - -{{% alert title="Windows Support Notice" color="warning" %}} -Presently, Windows worker nodes do not support using more than one ENI. -As a consequence, the number of IP addresses, and subsequently, the number of pods that a Windows worker node can support is limited by the number of IPv4 addresses available on the primary ENI. -Currently, Karpenter will only consider individual secondary IP addresses when calculating the pod density limit. -{{% /alert %}} - -#### Pods Per Core - -An alternative way to dynamically set the maximum density of pods on a node is to use the `.spec.template.spec.kubelet.podsPerCore` value. Karpenter will calculate the pod density during scheduling by multiplying this value by the number of logical cores (vCPUs) on an instance type. This value will also be passed through to the `--pods-per-core` value on kubelet startup to configure the number of allocatable pods the kubelet can assign to the node instance. - -The value generated from `podsPerCore` cannot exceed `maxPods`, meaning, if both are set, the minimum of the `podsPerCore` dynamic pod density and the static `maxPods` value will be used for scheduling. - -{{% alert title="Note" color="primary" %}} -`maxPods` may not be set in the `kubelet` of a NodePool, but may still be restricted by the `ENI_LIMITED_POD_DENSITY` value. You may want to ensure that the `podsPerCore` value that will be used for instance families associated with the NodePool will not cause unexpected behavior by exceeding the `maxPods` value. -{{% /alert %}} - -{{% alert title="Pods Per Core on Bottlerocket" color="warning" %}} -Bottlerocket AMIFamily currently does not support `podsPerCore` configuration. If a NodePool contains a `provider` or `providerRef` to a node template that will launch a Bottlerocket instance, the `podsPerCore` value will be ignored for scheduling and for configuring the kubelet. -{{% /alert %}} ## spec.disruption -You can configure Karpenter to disrupt Nodes through your NodePool in multiple ways. You can use `spec.disruption.consolidationPolicy`, `spec.disruption.consolidateAfter` or `spec.disruption.expireAfter`. Read [Disruption]({{}}) for more. +You can configure Karpenter to disrupt Nodes through your NodePool in multiple ways. You can use `spec.disruption.consolidationPolicy`, `spec.disruption.consolidateAfter`, or `spec.template.spec.expireAfter`. +You can also rate limit Karpenter's disruption through the NodePool's `spec.disruption.budgets`. +Read [Disruption]({{}}) for more. ## spec.limits @@ -490,7 +370,7 @@ The NodePool spec includes a limits section (`spec.limits`), which constrains th Karpenter supports limits of any resource type reported by your cloudprovider. It limits instance types when scheduling to those that will not exceed the specified limits. If a limit has been exceeded, nodes provisioning is prevented until some nodes have been terminated. ```yaml -apiVersion: karpenter.sh/v1beta1 +apiVersion: karpenter.sh/v1 kind: NodePool metadata: name: default @@ -528,6 +408,18 @@ Karpenter allows you to describe NodePool preferences through a `weight` mechani For more information on weighting NodePools, see the [Weighted NodePools section]({{}}) in the scheduling docs. +## status.conditions +[Conditions](https://github.com/kubernetes/apimachinery/blob/f14778da5523847e4c07346e3161a4b4f6c9186e/pkg/apis/meta/v1/types.go#L1523) objects add observability features to Karpenter. +* The `status.conditions.type` object reflects node status, such as `Initialized` or `Available`. +* The status of the condition, `status.conditions.status`, indicates if the condition is `True` or `False`. +* The `status.conditions.observedGeneration` indicates if the instance is out of date with the current state of `.metadata.generation`. +* The `status.conditions.lastTransitionTime` object contains a programatic identifier that indicates the time of the condition's previous transition. +* The `status.conditions.reason` object indicates the reason for the condition's previous transition. +* The `status.conditions.message` object provides human-readable details about the condition's previous transition. + +## status.resources +Objects under `status.resources` provide information about the status of resources such as `cpu`, `memory`, and `ephemeral-storage`. + ## Examples ### Isolating Expensive Hardware @@ -536,13 +428,13 @@ A NodePool can be set up to only provision nodes on particular processor types. The following example sets a taint that only allows pods with tolerations for Nvidia GPUs to be scheduled: ```yaml -apiVersion: karpenter.sh/v1beta1 +apiVersion: karpenter.sh/v1 kind: NodePool metadata: name: gpu spec: disruption: - consolidationPolicy: WhenUnderutilized + consolidationPolicy: WhenEmptyOrUnderutilized template: spec: requirements: @@ -560,14 +452,16 @@ In order for a pod to run on a node defined in this NodePool, it must tolerate ` Per the Cilium [docs](https://docs.cilium.io/en/stable/installation/taints/#taint-effects), it's recommended to place a taint of `node.cilium.io/agent-not-ready=true:NoExecute` on nodes to allow Cilium to configure networking prior to other pods starting. This can be accomplished via the use of Karpenter `startupTaints`. These taints are placed on the node, but pods aren't required to tolerate these taints to be considered for provisioning. +Failure to provide accurate `startupTaints` can result in Karpenter continually provisioning new nodes. When the new node joins and the startup taint that Karpenter is unaware of is added, Karpenter now considers the pending pod to be unschedulable to this node. Karpenter will attempt to provision yet another new node to schedule the pending pod. + ```yaml -apiVersion: karpenter.sh/v1beta1 +apiVersion: karpenter.sh/v1 kind: NodePool metadata: name: cilium-startup spec: disruption: - consolidationPolicy: WhenUnderutilized + consolidationPolicy: WhenEmptyOrUnderutilized template: spec: startupTaints: diff --git a/website/content/en/docs/concepts/scheduling.md b/website/content/en/docs/concepts/scheduling.md index 437a6f1cda5f..2ef5b2c62897 100755 --- a/website/content/en/docs/concepts/scheduling.md +++ b/website/content/en/docs/concepts/scheduling.md @@ -1,7 +1,7 @@ --- title: "Scheduling" linkTitle: "Scheduling" -weight: 3 +weight: 40 description: > Learn about scheduling workloads with Karpenter --- @@ -174,8 +174,9 @@ requirements: - key: user.defined.label/type operator: Exists ``` + {{% alert title="Note" color="primary" %}} -There is currently a limit of 30 on the total number of requirements on both the NodePool and the NodeClaim. It's important to note that `spec.template.metadata.labels` are also propagated as requirements on the NodeClaim when it's created, meaning that you can't have more than 30 requirements and labels combined set on your NodePool. +There is currently a limit of 100 on the total number of requirements on both the NodePool and the NodeClaim. It's important to note that `spec.template.metadata.labels` are also propagated as requirements on the NodeClaim when it's created, meaning that you can't have more than 100 requirements and labels combined set on your NodePool. {{% /alert %}} #### Node selectors @@ -195,6 +196,16 @@ Then the pod can declare that custom label. See [nodeSelector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) in the Kubernetes documentation for details. +## Preferences + +Karpenter is aware of preferences (node affinity, pod affinity, pod anti-affinity, and pod topology) and treats them as requirements in most circumstances. Karpenter uses these preferences when determining if a pod can schedule on a node (absent topology requirements), or when determining if a pod can be shifted to a new node. + +Karpenter starts by treating preferred affinities as required affinities when constructing requirements for a pod. When these requirements cannot be met, the pod's preferences are relaxed one-at-a-time by ascending weight (lowest weight is relaxed first), and the remaining requirements are tried again. + +{{% alert title="Warning" color="warning" %}} +Karpenter does not interpret preferred affinities as required when constructing topology requirements for scheduling to a node. If these preferences are necessary, required affinities should be used [as documented in Node Affinity](#node-affinity). +{{% /alert %}} + ### Node affinity Examples below illustrate how to use Node affinity to include (`In`) and exclude (`NotIn`) objects. @@ -204,6 +215,10 @@ When setting rules, the following Node affinity types define how hard or soft ea * **requiredDuringSchedulingIgnoredDuringExecution**: This is a hard rule that must be met. * **preferredDuringSchedulingIgnoredDuringExecution**: This is a preference, but the pod can run on a node where it is not guaranteed. +{{% alert title="Note" color="primary" %}} +Preferred affinities on pods can result in more nodes being created than expected because Karpenter will prefer to create new nodes to satisfy preferences, [see the preferences documentation](#preferences) for details. +{{% /alert %}} + The `IgnoredDuringExecution` part of each tells the pod to keep running, even if conditions change on the node so the rules no longer matched. You can think of these concepts as `required` and `preferred`, since Kubernetes never implemented other variants of these rules. @@ -264,13 +279,13 @@ If they all fail, Karpenter will fail to provision the pod. Karpenter will backoff and retry over time. So if capacity becomes available, it will schedule the pod without user intervention. -## Taints and tolerations +### Taints and tolerations Taints are the opposite of affinity. Setting a taint on a node tells the scheduler to not run a pod on it unless the pod has explicitly said it can tolerate that taint. This example shows a NodePool that was set up with a taint for only running pods that require a GPU, such as the following: ```yaml -apiVersion: karpenter.sh/v1beta1 +apiVersion: karpenter.sh/v1 kind: NodePool metadata: name: gpu @@ -311,9 +326,14 @@ spec: ``` See [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) in the Kubernetes documentation for details. -## Topology Spread +### Topology Spread By using the Kubernetes `topologySpreadConstraints` you can ask the NodePool to have pods push away from each other to limit the blast radius of an outage. Think of it as the Kubernetes evolution for pod affinity: it lets you relate pods with respect to nodes while still allowing spread. + +{{% alert title="Note" color="primary" %}} +Preferred topology spread (`ScheduleAnyway`) can result in more nodes being created than expected because Karpenter will prefer to create new nodes to satisfy spread constraints, [see the preferences documentation](#preferences) for details. +{{% /alert %}} + For example: ```yaml @@ -358,9 +378,15 @@ See [Pod Topology Spread Constraints](https://kubernetes.io/docs/concepts/worklo NodePools do not attempt to balance or rebalance the availability zones for their nodes. Availability zone balancing may be achieved by defining zonal Topology Spread Constraints for Pods that require multi-zone durability, and NodePools will respect these constraints while optimizing for compute costs. {{% /alert %}} -## Pod affinity/anti-affinity +### Pod affinity/anti-affinity + +By using the `podAffinity` and `podAntiAffinity` configuration on a pod spec, you can inform the Karpenter scheduler of your desire for pods to schedule together or apart with respect to different topology domains. -By using the `podAffinity` and `podAntiAffinity` configuration on a pod spec, you can inform the Karpenter scheduler of your desire for pods to schedule together or apart with respect to different topology domains. For example: +{{% alert title="Note" color="primary" %}} +Preferred affinities on pods can result in more nodes being created than expected because Karpenter will prefer to create new nodes to satisfy preferences, [see the preferences documentation](#preferences) for details. +{{% /alert %}} + +For example: ```yaml spec: @@ -388,7 +414,7 @@ The anti-affinity rule would cause it to avoid running on any node with a pod la See [Inter-pod affinity and anti-affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity) in the Kubernetes documentation for details. -## Persistent Volume Topology +### Persistent Volume Topology Karpenter automatically detects storage scheduling requirements and includes them in node launch decisions. @@ -456,7 +482,7 @@ If you have purchased a [Savings Plan](https://aws.amazon.com/savingsplans/) or To enable this, you will need to tell the Karpenter controllers which instance types to prioritize and what is the maximum amount of capacity that should be provisioned using those instance types. We can set the `.spec.limits` field on the NodePool to limit the capacity that can be launched by this NodePool. Combined with the `.spec.weight` value, we can tell Karpenter to pull from instance types in the reserved NodePool before defaulting to generic instance types. ```yaml -apiVersion: karpenter.sh/v1beta1 +apiVersion: karpenter.sh/v1 kind: NodePool metadata: name: reserved-instance @@ -471,7 +497,7 @@ spec: operator: In values: ["c4.large"] --- -apiVersion: karpenter.sh/v1beta1 +apiVersion: karpenter.sh/v1 kind: NodePool metadata: name: default @@ -494,7 +520,7 @@ Pods that do not specify node selectors or affinities can potentially be assigne By assigning a higher `.spec.weight` value and restricting a NodePool to a specific capacity type or architecture, we can set default configuration for the nodes launched by pods that don't have node configuration restrictions. ```yaml -apiVersion: karpenter.sh/v1beta1 +apiVersion: karpenter.sh/v1 kind: NodePool metadata: name: default @@ -510,7 +536,7 @@ spec: operator: In values: ["amd64"] --- -apiVersion: karpenter.sh/v1beta1 +apiVersion: karpenter.sh/v1 kind: NodePool metadata: name: arm64-specific @@ -628,7 +654,7 @@ If using Gt/Lt operators, make sure to use values under the actual label values The `Exists` operator can be used on a NodePool to provide workload segregation across nodes. ```yaml -apiVersion: karpenter.sh/v1beta1 +apiVersion: karpenter.sh/v1 kind: NodePool spec: template: @@ -712,7 +738,7 @@ This is not identical to a topology spread with a specified ratio. We are const #### NodePools ```yaml -apiVersion: karpenter.sh/v1beta1 +apiVersion: karpenter.sh/v1 kind: NodePool metadata: name: spot @@ -731,7 +757,7 @@ spec: - "4" - "5" --- -apiVersion: karpenter.sh/v1beta1 +apiVersion: karpenter.sh/v1 kind: NodePool metadata: name: on-demand diff --git a/website/content/en/docs/contributing/documentation-updates.md b/website/content/en/docs/contributing/documentation-updates.md new file mode 100644 index 000000000000..0eb9db1e10b3 --- /dev/null +++ b/website/content/en/docs/contributing/documentation-updates.md @@ -0,0 +1,11 @@ +--- +title: "Documentation Updates" +linkTitle: "Documentation Updates" +weight: 50 +description: > + Information helpful for contributing simple documentation updates. +--- + +- Documentation for https://karpenter.sh/docs/ is built under website/content/en/preview/. +- Documentation updates should be made to the "preview" directory. Your changes will be promoted to website/content/en/docs/ by an automated process after the change has been merged. +- Previews for your changes are built and available a few minutes after you push. Look for the "netlify Deploy Preview" link in a comment in your PR. diff --git a/website/content/en/docs/faq.md b/website/content/en/docs/faq.md index 80a49837e625..2318827a4dfe 100644 --- a/website/content/en/docs/faq.md +++ b/website/content/en/docs/faq.md @@ -7,6 +7,9 @@ description: > --- ## General +### Is Karpenter safe for production use? +Karpenter v1 is the first stable Karpenter API. Any future incompatible API changes will require a v2 version. + ### How does a NodePool decide to manage a particular node? See [Configuring NodePools]({{< ref "./concepts/#configuring-nodepools" >}}) for information on how Karpenter configures and manages nodes. @@ -14,7 +17,7 @@ See [Configuring NodePools]({{< ref "./concepts/#configuring-nodepools" >}}) for AWS is the first cloud provider supported by Karpenter, although it is designed to be used with other cloud providers as well. ### Can I write my own cloud provider for Karpenter? -Yes, but there is no documentation yet for it. Start with Karpenter's GitHub [cloudprovider](https://github.com/aws/karpenter-core/tree/v0.37.0/pkg/cloudprovider) documentation to see how the AWS provider is built, but there are other sections of the code that will require changes too. +Yes, but there is no documentation yet for it. Start with Karpenter's GitHub [cloudprovider](https://github.com/aws/karpenter-core/tree/v1.0.0/pkg/cloudprovider) documentation to see how the AWS provider is built, but there are other sections of the code that will require changes too. ### What operating system nodes does Karpenter deploy? Karpenter uses the OS defined by the [AMI Family in your EC2NodeClass]({{< ref "./concepts/nodeclasses#specamifamily" >}}). @@ -26,7 +29,7 @@ Karpenter has multiple mechanisms for configuring the [operating system]({{< ref Karpenter is flexible to multi-architecture configurations using [well known labels]({{< ref "./concepts/scheduling/#supported-labels">}}). ### What RBAC access is required? -All the required RBAC rules can be found in the Helm chart template. See [clusterrole-core.yaml](https://github.com/aws/karpenter/blob/v0.37.0/charts/karpenter/templates/clusterrole-core.yaml), [clusterrole.yaml](https://github.com/aws/karpenter/blob/v0.37.0/charts/karpenter/templates/clusterrole.yaml), [rolebinding.yaml](https://github.com/aws/karpenter/blob/v0.37.0/charts/karpenter/templates/rolebinding.yaml), and [role.yaml](https://github.com/aws/karpenter/blob/v0.37.0/charts/karpenter/templates/role.yaml) files for details. +All the required RBAC rules can be found in the Helm chart template. See [clusterrole-core.yaml](https://github.com/aws/karpenter/blob/v1.0.0/charts/karpenter/templates/clusterrole-core.yaml), [clusterrole.yaml](https://github.com/aws/karpenter/blob/v1.0.0/charts/karpenter/templates/clusterrole.yaml), [rolebinding.yaml](https://github.com/aws/karpenter/blob/v1.0.0/charts/karpenter/templates/rolebinding.yaml), and [role.yaml](https://github.com/aws/karpenter/blob/v1.0.0/charts/karpenter/templates/role.yaml) files for details. ### Can I run Karpenter outside of a Kubernetes cluster? Yes, as long as the controller has network and IAM/RBAC access to the Kubernetes API and your provider API. @@ -119,7 +122,7 @@ Karpenter has a concept of an “offering” for each instance type, which is a Yes! Karpenter dynamically discovers if you are running in an IPv6 cluster by checking the kube-dns service's cluster-ip. When using an AMI Family such as `AL2`, Karpenter will automatically configure the EKS Bootstrap script for IPv6. Some EC2 instance types do not support IPv6 and the Amazon VPC CNI only supports instance types that run on the Nitro hypervisor. It's best to add a requirement to your NodePool to only allow Nitro instance types: ``` -apiVersion: karpenter.sh/v1beta1 +apiVersion: karpenter.sh/v1 kind: NodePool ... spec: @@ -138,6 +141,25 @@ For more documentation on enabling IPv6 with the Amazon VPC CNI, see the [docs]( Windows nodes do not support IPv6. {{% /alert %}} +### Why do I see extra nodes get launched to schedule pending pods that remain empty and are later removed? + +You might have a daemonset, userData configuration, or some other workload that applies a taint after a node is provisioned. After the taint is applied, Karpenter will detect that the pod cannot be scheduled to this new node due to the added taint. As a result, Karpenter will provision yet another node. Typically, the original node has the taint removed and the pod schedules to it, leaving the extra new node unused and reaped by emptiness/consolidation. If the taint is not removed quickly enough, Karpenter may remove the original node before the pod can be scheduled via emptiness consolidation. This could result in an infinite loop of nodes being provisioned and consolidated without the pending pod ever scheduling. + +The solution is to configure [startupTaints]({{}}) to make Karpenter aware of any temporary taints that are needed to ensure that pods do not schedule on nodes that are not yet ready to receive them. + +Here's an example for Cilium's startup taint. +``` +apiVersion: karpenter.sh/v1 +kind: NodePool +... +spec: + template: + spec: + startupTaints: + - key: node.cilium.io/agent-not-ready + effect: NoSchedule +``` + ## Scheduling ### When using preferred scheduling constraints, Karpenter launches the correct number of nodes at first. Why do they then sometimes get consolidated immediately? @@ -179,10 +201,10 @@ Yes, see the [KubeletConfiguration Section in the NodePool docs]({{ To get started with Karpenter, the [Getting Started with Karpenter]({{< relref "getting-started-with-karpenter" >}}) guide provides an end-to-end procedure for creating a cluster (with `eksctl`) and adding Karpenter. + +See the [AKS Node autoprovisioning article](https://learn.microsoft.com/azure/aks/node-autoprovision) on how to use Karpenter on Azure's AKS or go to the [Karpenter provider for Azure open source repository](https://github.com/Azure/karpenter-provider-azure) for self-hosting on Azure and additional information. + If you prefer, the following instructions use Terraform to create a cluster and add Karpenter: * [Amazon EKS Blueprints for Terraform](https://aws-ia.github.io/terraform-aws-eks-blueprints): Follow a basic [Getting Started](https://aws-ia.github.io/terraform-aws-eks-blueprints/getting-started/) guide and also add modules and add-ons. This includes a [Karpenter](https://aws-ia.github.io/terraform-aws-eks-blueprints/patterns/karpenter/) add-on that lets you bypass the instructions in this guide for setting up Karpenter. diff --git a/website/content/en/docs/getting-started/getting-started-with-karpenter/_index.md b/website/content/en/docs/getting-started/getting-started-with-karpenter/_index.md index 26b6b5de8ad8..14879bbe720e 100644 --- a/website/content/en/docs/getting-started/getting-started-with-karpenter/_index.md +++ b/website/content/en/docs/getting-started/getting-started-with-karpenter/_index.md @@ -11,7 +11,10 @@ Karpenter automatically provisions new nodes in response to unschedulable pods. This guide shows how to get started with Karpenter by creating a Kubernetes cluster and installing Karpenter. To use Karpenter, you must be running a supported Kubernetes cluster on a supported cloud provider. -Currently, only EKS on AWS is supported. + +The guide below explains how to utilize the [Karpenter provider for AWS](https://github.com/aws/karpenter-provider-aws) with EKS. + +See the [AKS Node autoprovisioning article](https://learn.microsoft.com/azure/aks/node-autoprovision) on how to use Karpenter on Azure's AKS or go to the [Karpenter provider for Azure open source repository](https://github.com/Azure/karpenter-provider-azure) for self-hosting on Azure and additional information. ## Create a cluster and add Karpenter @@ -45,7 +48,7 @@ After setting up the tools, set the Karpenter and Kubernetes version: ```bash export KARPENTER_NAMESPACE="kube-system" -export KARPENTER_VERSION="0.37.0" +export KARPENTER_VERSION="1.0.0" export K8S_VERSION="1.30" ``` @@ -112,17 +115,17 @@ See [Enabling Windows support](https://docs.aws.amazon.com/eks/latest/userguide/ As the OCI Helm chart is signed by [Cosign](https://github.com/sigstore/cosign) as part of the release process you can verify the chart before installing it by running the following command. ```bash -cosign verify public.ecr.aws/karpenter/karpenter:0.37.0 \ +cosign verify public.ecr.aws/karpenter/karpenter:1.0.0 \ --certificate-oidc-issuer=https://token.actions.githubusercontent.com \ --certificate-identity-regexp='https://github\.com/aws/karpenter-provider-aws/\.github/workflows/release\.yaml@.+' \ --certificate-github-workflow-repository=aws/karpenter-provider-aws \ --certificate-github-workflow-name=Release \ - --certificate-github-workflow-ref=refs/tags/v0.37.0 \ - --annotations version=0.37.0 + --certificate-github-workflow-ref=refs/tags/v1.0.0 \ + --annotations version=1.0.0 ``` {{% alert title="DNS Policy Notice" color="warning" %}} -Karpenter uses the `ClusterFirst` pod DNS policy by default. This is the Kubernetes cluster default and this ensures that Karpetner can reach-out to internal Kubernetes services during its lifetime. There may be cases where you do not have the DNS service that you are using on your cluster up-and-running before Karpenter starts up. The most common case of this is you want Karpenter to manage the node capacity where your DNS service pods are running. +Karpenter uses the `ClusterFirst` pod DNS policy by default. This is the Kubernetes cluster default and this ensures that Karpenter can reach-out to internal Kubernetes services during its lifetime. There may be cases where you do not have the DNS service that you are using on your cluster up-and-running before Karpenter starts up. The most common case of this is you want Karpenter to manage the node capacity where your DNS service pods are running. If you need Karpenter to manage the DNS service pods' capacity, this means that DNS won't be running when Karpenter starts-up. In this case, you will need to set the pod DNS policy to `Default` with `--set dnsPolicy=Default`. This will tell Karpenter to use the host's DNS resolution instead of the internal DNS resolution, ensuring that you don't have a dependency on the DNS service pods to run. More details on this issue can be found in the following Github issues: [#2186](https://github.com/aws/karpenter-provider-aws/issues/2186) and [#4947](https://github.com/aws/karpenter-provider-aws/issues/4947). {{% /alert %}} @@ -151,7 +154,7 @@ A single Karpenter NodePool is capable of handling many different pod shapes. Ka Create a default NodePool using the command below. This NodePool uses `securityGroupSelectorTerms` and `subnetSelectorTerms` to discover resources used to launch nodes. We applied the tag `karpenter.sh/discovery` in the `eksctl` command above. Depending on how these resources are shared between clusters, you may need to use different tagging schemes. -The `consolidationPolicy` set to `WhenUnderutilized` in the `disruption` block configures Karpenter to reduce cluster cost by removing and replacing nodes. As a result, consolidation will terminate any empty nodes on the cluster. This behavior can be disabled by setting `consolidateAfter` to `Never`, telling Karpenter that it should never consolidate nodes. Review the [NodePool API docs]({{}}) for more information. +The `consolidationPolicy` set to `WhenEmptyOrUnderutilized` in the `disruption` block configures Karpenter to reduce cluster cost by removing and replacing nodes. As a result, consolidation will terminate any empty nodes on the cluster. This behavior can be disabled by setting `consolidateAfter` to `Never`, telling Karpenter that it should never consolidate nodes. Review the [NodePool API docs]({{}}) for more information. Note: This NodePool will create capacity as long as the sum of all created capacity is less than the specified limit. diff --git a/website/content/en/docs/getting-started/getting-started-with-karpenter/cloudformation.yaml b/website/content/en/docs/getting-started/getting-started-with-karpenter/cloudformation.yaml index 1878cd6d352a..567808be5830 100644 --- a/website/content/en/docs/getting-started/getting-started-with-karpenter/cloudformation.yaml +++ b/website/content/en/docs/getting-started/getting-started-with-karpenter/cloudformation.yaml @@ -83,7 +83,8 @@ Resources: ], "Condition": { "StringEquals": { - "aws:RequestTag/kubernetes.io/cluster/${ClusterName}": "owned" + "aws:RequestTag/kubernetes.io/cluster/${ClusterName}": "owned", + "aws:RequestTag/eks:eks-cluster-name": "${ClusterName}" }, "StringLike": { "aws:RequestTag/karpenter.sh/nodepool": "*" @@ -105,6 +106,7 @@ Resources: "Condition": { "StringEquals": { "aws:RequestTag/kubernetes.io/cluster/${ClusterName}": "owned", + "aws:RequestTag/eks:eks-cluster-name": "${ClusterName}", "ec2:CreateAction": [ "RunInstances", "CreateFleet", @@ -128,8 +130,12 @@ Resources: "StringLike": { "aws:ResourceTag/karpenter.sh/nodepool": "*" }, + "StringEqualsIfExists": { + "aws:RequestTag/eks:eks-cluster-name": "${ClusterName}" + }, "ForAllValues:StringEquals": { "aws:TagKeys": [ + "eks:eks-cluster-name", "karpenter.sh/nodeclaim", "Name" ] @@ -220,6 +226,7 @@ Resources: "Condition": { "StringEquals": { "aws:RequestTag/kubernetes.io/cluster/${ClusterName}": "owned", + "aws:RequestTag/eks:eks-cluster-name": "${ClusterName}", "aws:RequestTag/topology.kubernetes.io/region": "${AWS::Region}" }, "StringLike": { @@ -239,6 +246,7 @@ Resources: "aws:ResourceTag/kubernetes.io/cluster/${ClusterName}": "owned", "aws:ResourceTag/topology.kubernetes.io/region": "${AWS::Region}", "aws:RequestTag/kubernetes.io/cluster/${ClusterName}": "owned", + "aws:RequestTag/eks:eks-cluster-name": "${ClusterName}", "aws:RequestTag/topology.kubernetes.io/region": "${AWS::Region}" }, "StringLike": { diff --git a/website/content/en/docs/getting-started/getting-started-with-karpenter/grafana-values.yaml b/website/content/en/docs/getting-started/getting-started-with-karpenter/grafana-values.yaml index 5f239b186f25..0b301d9a20ea 100644 --- a/website/content/en/docs/getting-started/getting-started-with-karpenter/grafana-values.yaml +++ b/website/content/en/docs/getting-started/getting-started-with-karpenter/grafana-values.yaml @@ -22,6 +22,6 @@ dashboardProviders: dashboards: default: capacity-dashboard: - url: https://karpenter.sh/v0.37/getting-started/getting-started-with-karpenter/karpenter-capacity-dashboard.json + url: https://karpenter.sh/v1.0/getting-started/getting-started-with-karpenter/karpenter-capacity-dashboard.json performance-dashboard: - url: https://karpenter.sh/v0.37/getting-started/getting-started-with-karpenter/karpenter-performance-dashboard.json + url: https://karpenter.sh/v1.0/getting-started/getting-started-with-karpenter/karpenter-performance-dashboard.json diff --git a/website/content/en/docs/getting-started/getting-started-with-karpenter/karpenter-capacity-dashboard.json b/website/content/en/docs/getting-started/getting-started-with-karpenter/karpenter-capacity-dashboard.json index 7f93053b3206..e85e582de299 100644 --- a/website/content/en/docs/getting-started/getting-started-with-karpenter/karpenter-capacity-dashboard.json +++ b/website/content/en/docs/getting-started/getting-started-with-karpenter/karpenter-capacity-dashboard.json @@ -115,7 +115,7 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "sum by(cluster,nodepool) (karpenter_nodes_created{nodepool=~\"$nodepool\"})", + "expr": "sum by(cluster,nodepool) (karpenter_nodes_created_total{nodepool=~\"$nodepool\"})", "format": "time_series", "legendFormat": "{{cluster}}", "range": true, @@ -215,7 +215,7 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "sum by(cluster,nodepool) (karpenter_nodes_terminated{nodepool=~\"$nodepool\"})", + "expr": "sum by(cluster,nodepool) (karpenter_nodes_terminated_total{nodepool=~\"$nodepool\"})", "format": "time_series", "legendFormat": "{{cluster}}", "range": true, @@ -408,7 +408,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "sum by(action,consolidation_type,method)(karpenter_disruption_actions_performed_total)", + "expr": "sum by(action,consolidation_type,method)(karpenter_disruption_decisions_total)", "legendFormat": "{{label_name}}", "range": true, "refId": "A" @@ -417,102 +417,6 @@ "title": "Disruption Actions Performed", "type": "timeseries" }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "See: https://karpenter.sh/v0.35/concepts/disruption/#automated-methods", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 10, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "normal" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 6, - "w": 24, - "x": 0, - "y": 22 - }, - "id": 17, - "options": { - "legend": { - "calcs": [ - "last" - ], - "displayMode": "table", - "placement": "right", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "expr": "sum by(action,consolidation_type,method)(karpenter_disruption_nodes_disrupted_total{nodepool=~\"$nodepool\"})", - "legendFormat": "{{label_name}}", - "range": true, - "refId": "A" - } - ], - "title": "Voluntary Node Disruptions: nodepool \"$nodepool\"", - "type": "timeseries" - }, { "datasource": { "type": "prometheus", @@ -1609,7 +1513,7 @@ "type": "prometheus", "uid": "prometheus" }, - "definition": "label_values(karpenter_disruption_actions_performed_total,method)", + "definition": "label_values(karpenter_disruption_decisions_total,method)", "hide": 0, "includeAll": true, "multi": true, @@ -1617,7 +1521,7 @@ "options": [], "query": { "qryType": 1, - "query": "label_values(karpenter_disruption_actions_performed_total,method)", + "query": "label_values(karpenter_disruption_decisions_total,method)", "refId": "PrometheusVariableQueryEditor-VariableQuery" }, "refresh": 2, diff --git a/website/content/en/docs/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster-fargate.sh b/website/content/en/docs/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster-fargate.sh index fa577d724e9f..08d877b6ded0 100755 --- a/website/content/en/docs/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster-fargate.sh +++ b/website/content/en/docs/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster-fargate.sh @@ -1,4 +1,4 @@ -curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/v"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml > $TEMPOUT \ +curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/v1.0.0/website/content/en/docs/getting-started/getting-started-with-karpenter/cloudformation.yaml > $TEMPOUT \ && aws cloudformation deploy \ --stack-name "Karpenter-${CLUSTER_NAME}" \ --template-file "${TEMPOUT}" \ diff --git a/website/content/en/docs/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster.sh b/website/content/en/docs/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster.sh index 0ab6f5f464bd..7c654b3b1140 100755 --- a/website/content/en/docs/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster.sh +++ b/website/content/en/docs/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster.sh @@ -1,4 +1,4 @@ -curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/v"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml > "${TEMPOUT}" \ +curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/v1.0.0/website/content/en/docs/getting-started/getting-started-with-karpenter/cloudformation.yaml > "${TEMPOUT}" \ && aws cloudformation deploy \ --stack-name "Karpenter-${CLUSTER_NAME}" \ --template-file "${TEMPOUT}" \ diff --git a/website/content/en/docs/getting-started/getting-started-with-karpenter/scripts/step03-iam-cloud-formation.sh b/website/content/en/docs/getting-started/getting-started-with-karpenter/scripts/step03-iam-cloud-formation.sh index 54e826db269b..7b456047096b 100755 --- a/website/content/en/docs/getting-started/getting-started-with-karpenter/scripts/step03-iam-cloud-formation.sh +++ b/website/content/en/docs/getting-started/getting-started-with-karpenter/scripts/step03-iam-cloud-formation.sh @@ -1,6 +1,6 @@ TEMPOUT="$(mktemp)" -curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/v"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml > "${TEMPOUT}" \ +curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/v1.0.0/website/content/en/docs/getting-started/getting-started-with-karpenter/cloudformation.yaml > "${TEMPOUT}" \ && aws cloudformation deploy \ --stack-name "Karpenter-${CLUSTER_NAME}" \ --template-file "${TEMPOUT}" \ diff --git a/website/content/en/docs/getting-started/getting-started-with-karpenter/scripts/step12-add-nodepool.sh b/website/content/en/docs/getting-started/getting-started-with-karpenter/scripts/step12-add-nodepool.sh index 33f1cb553b1b..85213a3457c3 100755 --- a/website/content/en/docs/getting-started/getting-started-with-karpenter/scripts/step12-add-nodepool.sh +++ b/website/content/en/docs/getting-started/getting-started-with-karpenter/scripts/step12-add-nodepool.sh @@ -1,5 +1,5 @@ cat < cloudformation.yaml ``` @@ -162,7 +162,7 @@ For `RunInstances` and `CreateFleet` actions, the Karpenter controller can read The AllowScopedEC2InstanceActionsWithTags Sid allows the [RunInstances](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html), [CreateFleet](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet.html), and [CreateLaunchTemplate](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateLaunchTemplate.html) -actions requested by the Karpenter controller to create all `fleet`, `instance`, `volume`, `network-interface`, `launch-template` or `spot-instances-request` EC2 resources (for the partition and region), and requires that the `kubernetes.io/cluster/${ClusterName}` tag be set to `owned` and a `karpenter.sh/nodepool` tag be set to any value. This ensures that Karpenter is only allowed to create instances for a single EKS cluster. +actions requested by the Karpenter controller to create all `fleet`, `instance`, `volume`, `network-interface`, `launch-template` or `spot-instances-request` EC2 resources (for the partition and region). It also requires that the `kubernetes.io/cluster/${ClusterName}` tag be set to `owned`, `aws:RequestTag/eks:eks-cluster-name` be set to `"${ClusterName}`, and a `karpenter.sh/nodepool` tag be set to any value. This ensures that Karpenter is only allowed to create instances for a single EKS cluster. ```json { @@ -184,6 +184,7 @@ actions requested by the Karpenter controller to create all `fleet`, `instance`, "Condition": { "StringEquals": { "aws:RequestTag/kubernetes.io/cluster/${ClusterName}": "owned" + "aws:RequestTag/eks:eks-cluster-name": "${ClusterName}" }, "StringLike": { "aws:RequestTag/karpenter.sh/nodepool": "*" @@ -196,6 +197,7 @@ actions requested by the Karpenter controller to create all `fleet`, `instance`, The AllowScopedResourceCreationTagging Sid allows EC2 [CreateTags](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateTags.html) actions on `fleet`, `instance`, `volume`, `network-interface`, `launch-template` and `spot-instances-request` resources, While making `RunInstance`, `CreateFleet`, or `CreateLaunchTemplate` calls. Additionally, this ensures that resources can't be tagged arbitrarily by Karpenter after they are created. +Conditions that must be met include that `aws:RequestTag/kubernetes.io/cluster/${ClusterName}` be set to `owned` and `aws:RequestTag/eks:eks-cluster-name` be set to `${ClusterName}`. ```json { @@ -213,6 +215,7 @@ actions on `fleet`, `instance`, `volume`, `network-interface`, `launch-template` "Condition": { "StringEquals": { "aws:RequestTag/kubernetes.io/cluster/${ClusterName}": "owned", + "aws:RequestTag/eks:eks-cluster-name": "${ClusterName}" "ec2:CreateAction": [ "RunInstances", "CreateFleet", @@ -229,6 +232,7 @@ actions on `fleet`, `instance`, `volume`, `network-interface`, `launch-template` #### AllowScopedResourceTagging The AllowScopedResourceTagging Sid allows EC2 [CreateTags](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateTags.html) actions on all instances created by Karpenter after their creation. It enforces that Karpenter is only able to update the tags on cluster instances it is operating on through the `kubernetes.io/cluster/${ClusterName}`" and `karpenter.sh/nodepool` tags. +Likewise, `RequestTag/eks:eks-cluster-name` must be set to `${ClusterName}`, if it exists, and `TagKeys` must equal `eks:eks-cluster-name`, `karpenter.sh/nodeclaim`, and `Name`, for all values. ```json { "Sid": "AllowScopedResourceTagging", @@ -242,8 +246,12 @@ The AllowScopedResourceTagging Sid allows EC2 [CreateTags](https://docs.aws.amaz "StringLike": { "aws:ResourceTag/karpenter.sh/nodepool": "*" }, + "StringEqualsIfExists": { + "aws:RequestTag/eks:eks-cluster-name": "${ClusterName}" + }, "ForAllValues:StringEquals": { "aws:TagKeys": [ + "eks:eks-cluster-name", "karpenter.sh/nodeclaim", "Name" ] @@ -310,7 +318,7 @@ This allows the Karpenter controller to do any of those read-only actions across #### AllowSSMReadActions -The AllowSSMReadActions Sid allows the Karpenter controller to read SSM parameters (`ssm:GetParameter`) from the current region for SSM parameters generated by ASW services. +The AllowSSMReadActions Sid allows the Karpenter controller to get SSM parameters (`ssm:GetParameter`) from the current region for SSM parameters generated by AWS services. **NOTE**: If potentially sensitive information is stored in SSM parameters, you could consider restricting access to these messages further. ```json @@ -376,7 +384,7 @@ This gives EC2 permission explicit permission to use the `KarpenterNodeRole-${Cl #### AllowScopedInstanceProfileCreationActions The AllowScopedInstanceProfileCreationActions Sid gives the Karpenter controller permission to create a new instance profile with [`iam:CreateInstanceProfile`](https://docs.aws.amazon.com/IAM/latest/APIReference/API_CreateInstanceProfile.html), -provided that the request is made to a cluster with `kubernetes.io/cluster/${ClusterName}` set to owned and is made in the current region. +provided that the request is made to a cluster with `RequestTag` `kubernetes.io/cluster/${ClusterName}` set to `owned`, the `eks:eks-cluster-name` set to `${ClusterName}`, and `topology.kubernetes.io/region` set to the current region. Also, `karpenter.k8s.aws/ec2nodeclass` must be set to some value. This ensures that Karpenter can generate instance profiles on your behalf based on roles specified in your `EC2NodeClasses` that you use to configure Karpenter. ```json @@ -390,6 +398,7 @@ Also, `karpenter.k8s.aws/ec2nodeclass` must be set to some value. This ensures t "Condition": { "StringEquals": { "aws:RequestTag/kubernetes.io/cluster/${ClusterName}": "owned", + "aws:RequestTag/eks:eks-cluster-name": "${ClusterName}", "aws:RequestTag/topology.kubernetes.io/region": "${AWS::Region}" }, "StringLike": { @@ -401,8 +410,8 @@ Also, `karpenter.k8s.aws/ec2nodeclass` must be set to some value. This ensures t #### AllowScopedInstanceProfileTagActions -The AllowScopedInstanceProfileTagActions Sid gives the Karpenter controller permission to tag an instance profile with [`iam:TagInstanceProfile`](https://docs.aws.amazon.com/IAM/latest/APIReference/API_TagInstanceProfile.html), based on the values shown below, -Also, `karpenter.k8s.aws/ec2nodeclass` must be set to some value. This ensures that Karpenter is only able to act on instance profiles that it provisions for this cluster. +The AllowScopedInstanceProfileTagActions Sid gives the Karpenter controller permission to tag an instance profile with [`iam:TagInstanceProfile`](https://docs.aws.amazon.com/IAM/latest/APIReference/API_TagInstanceProfile.html), provided that `ResourceTag` attributes `kubernetes.io/cluster/${ClusterName}` is set to `owned` and `topology.kubernetes.io/region` is set to the current region and `RequestTag` attributes `kubernetes.io/cluster/${ClusterName}` is set to `owned`, `eks:eks-cluster-name` is set to `${ClusterName}`, and `topology.kubernetes.io/region` is set to the current region. +Also, `ResourceTag/karpenter.k8s.aws/ec2nodeclass` and `RequestTag/karpenter.k8s.aws/ec2nodeclass` must be set to some value. This ensures that Karpenter is only able to act on instance profiles that it provisions for this cluster. ```json { @@ -417,6 +426,7 @@ Also, `karpenter.k8s.aws/ec2nodeclass` must be set to some value. This ensures t "aws:ResourceTag/kubernetes.io/cluster/${ClusterName}": "owned", "aws:ResourceTag/topology.kubernetes.io/region": "${AWS::Region}", "aws:RequestTag/kubernetes.io/cluster/${ClusterName}": "owned", + "aws:RequestTag/eks:eks-cluster-name": "${ClusterName}", "aws:RequestTag/topology.kubernetes.io/region": "${AWS::Region}" }, "StringLike": { @@ -456,9 +466,9 @@ Also, `karpenter.k8s.aws/ec2nodeclass` must be set to some value. This permissio } ``` -#### AllowInstanceProfileActions +#### AllowInstanceProfileReadActions -The AllowInstanceProfileActions Sid gives the Karpenter controller permission to perform [`iam:GetInstanceProfile`](https://docs.aws.amazon.com/IAM/latest/APIReference/API_GetInstanceProfile.html) actions to retrieve information about a specified instance profile, including understanding if an instance profile has been provisioned for an `EC2NodeClass` or needs to be re-provisioned. +The AllowInstanceProfileReadActions Sid gives the Karpenter controller permission to perform [`iam:GetInstanceProfile`](https://docs.aws.amazon.com/IAM/latest/APIReference/API_GetInstanceProfile.html) actions to retrieve information about a specified instance profile, including understanding if an instance profile has been provisioned for an `EC2NodeClass` or needs to be re-provisioned. ```json { diff --git a/website/content/en/docs/reference/instance-types.md b/website/content/en/docs/reference/instance-types.md index c01e769d9130..29dd6b22a6e4 100644 --- a/website/content/en/docs/reference/instance-types.md +++ b/website/content/en/docs/reference/instance-types.md @@ -6235,7 +6235,7 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-gpu-memory|91553| |karpenter.k8s.aws/instance-gpu-name|l4| |karpenter.k8s.aws/instance-hypervisor|nitro| - |karpenter.k8s.aws/instance-local-nvme|15200| + |karpenter.k8s.aws/instance-local-nvme|3760| |karpenter.k8s.aws/instance-memory|196608| |karpenter.k8s.aws/instance-network-bandwidth|40000| |karpenter.k8s.aws/instance-size|12xlarge| @@ -6268,7 +6268,7 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-gpu-memory|22888| |karpenter.k8s.aws/instance-gpu-name|l4| |karpenter.k8s.aws/instance-hypervisor|nitro| - |karpenter.k8s.aws/instance-local-nvme|3800| + |karpenter.k8s.aws/instance-local-nvme|1880| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|25000| |karpenter.k8s.aws/instance-size|16xlarge| @@ -6301,7 +6301,7 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-gpu-memory|91553| |karpenter.k8s.aws/instance-gpu-name|l4| |karpenter.k8s.aws/instance-hypervisor|nitro| - |karpenter.k8s.aws/instance-local-nvme|15200| + |karpenter.k8s.aws/instance-local-nvme|3760| |karpenter.k8s.aws/instance-memory|393216| |karpenter.k8s.aws/instance-network-bandwidth|50000| |karpenter.k8s.aws/instance-size|24xlarge| @@ -6334,7 +6334,7 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-gpu-memory|183105| |karpenter.k8s.aws/instance-gpu-name|l4| |karpenter.k8s.aws/instance-hypervisor|nitro| - |karpenter.k8s.aws/instance-local-nvme|60800| + |karpenter.k8s.aws/instance-local-nvme|7520| |karpenter.k8s.aws/instance-memory|786432| |karpenter.k8s.aws/instance-network-bandwidth|100000| |karpenter.k8s.aws/instance-size|48xlarge| @@ -6537,7 +6537,6 @@ below are the resources available with some assumptions and after the instance o |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| |node.kubernetes.io/instance-type|hpc7g.4xlarge| - |topology.k8s.aws/zone-id|6419929671613507071| #### Resources | Resource | Quantity | |--|--| @@ -6564,7 +6563,6 @@ below are the resources available with some assumptions and after the instance o |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| |node.kubernetes.io/instance-type|hpc7g.8xlarge| - |topology.k8s.aws/zone-id|3124717047704565898| #### Resources | Resource | Quantity | |--|--| @@ -6591,7 +6589,6 @@ below are the resources available with some assumptions and after the instance o |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| |node.kubernetes.io/instance-type|hpc7g.16xlarge| - |topology.k8s.aws/zone-id|4594531912622968525| #### Resources | Resource | Quantity | |--|--| @@ -18501,6 +18498,311 @@ below are the resources available with some assumptions and after the instance o |pods|737| |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| +## r8g Family +### `r8g.medium` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|r| + |karpenter.k8s.aws/instance-cpu|1| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|r8g| + |karpenter.k8s.aws/instance-generation|8| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-memory|8192| + |karpenter.k8s.aws/instance-network-bandwidth|520| + |karpenter.k8s.aws/instance-size|medium| + |kubernetes.io/arch|arm64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|r8g.medium| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|940m| + |ephemeral-storage|17Gi| + |memory|7075Mi| + |pods|8| +### `r8g.large` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|r| + |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|r8g| + |karpenter.k8s.aws/instance-generation|8| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-memory|16384| + |karpenter.k8s.aws/instance-network-bandwidth|937| + |karpenter.k8s.aws/instance-size|large| + |kubernetes.io/arch|arm64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|r8g.large| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|1930m| + |ephemeral-storage|17Gi| + |memory|14422Mi| + |pods|29| +### `r8g.xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|r| + |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|r8g| + |karpenter.k8s.aws/instance-generation|8| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-memory|32768| + |karpenter.k8s.aws/instance-network-bandwidth|1876| + |karpenter.k8s.aws/instance-size|xlarge| + |kubernetes.io/arch|arm64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|r8g.xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|3920m| + |ephemeral-storage|17Gi| + |memory|29258Mi| + |pods|58| +### `r8g.2xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|r| + |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|r8g| + |karpenter.k8s.aws/instance-generation|8| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-memory|65536| + |karpenter.k8s.aws/instance-network-bandwidth|3750| + |karpenter.k8s.aws/instance-size|2xlarge| + |kubernetes.io/arch|arm64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|r8g.2xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|7910m| + |ephemeral-storage|17Gi| + |memory|59568Mi| + |pods|58| +### `r8g.4xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|r| + |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|r8g| + |karpenter.k8s.aws/instance-generation|8| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-memory|131072| + |karpenter.k8s.aws/instance-network-bandwidth|7500| + |karpenter.k8s.aws/instance-size|4xlarge| + |kubernetes.io/arch|arm64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|r8g.4xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|15890m| + |ephemeral-storage|17Gi| + |memory|118253Mi| + |pods|234| +### `r8g.8xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|r| + |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|r8g| + |karpenter.k8s.aws/instance-generation|8| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-memory|262144| + |karpenter.k8s.aws/instance-network-bandwidth|15000| + |karpenter.k8s.aws/instance-size|8xlarge| + |kubernetes.io/arch|arm64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|r8g.8xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|31850m| + |ephemeral-storage|17Gi| + |memory|239495Mi| + |pods|234| +### `r8g.12xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|r| + |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|15000| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|r8g| + |karpenter.k8s.aws/instance-generation|8| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-memory|393216| + |karpenter.k8s.aws/instance-network-bandwidth|22500| + |karpenter.k8s.aws/instance-size|12xlarge| + |kubernetes.io/arch|arm64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|r8g.12xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|47810m| + |ephemeral-storage|17Gi| + |memory|360736Mi| + |pods|234| +### `r8g.16xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|r| + |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|20000| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|r8g| + |karpenter.k8s.aws/instance-generation|8| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-memory|524288| + |karpenter.k8s.aws/instance-network-bandwidth|30000| + |karpenter.k8s.aws/instance-size|16xlarge| + |kubernetes.io/arch|arm64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|r8g.16xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|63770m| + |ephemeral-storage|17Gi| + |memory|476445Mi| + |pods|737| +### `r8g.24xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|r| + |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|30000| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|r8g| + |karpenter.k8s.aws/instance-generation|8| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-memory|786432| + |karpenter.k8s.aws/instance-network-bandwidth|40000| + |karpenter.k8s.aws/instance-size|24xlarge| + |kubernetes.io/arch|arm64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|r8g.24xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|95690m| + |ephemeral-storage|17Gi| + |memory|718928Mi| + |pods|737| + |vpc.amazonaws.com/efa|1| +### `r8g.metal-24xl` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|r| + |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|30000| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|r8g| + |karpenter.k8s.aws/instance-generation|8| + |karpenter.k8s.aws/instance-hypervisor|| + |karpenter.k8s.aws/instance-memory|786432| + |karpenter.k8s.aws/instance-network-bandwidth|40000| + |karpenter.k8s.aws/instance-size|metal-24xl| + |kubernetes.io/arch|arm64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|r8g.metal-24xl| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|95690m| + |ephemeral-storage|17Gi| + |memory|718928Mi| + |pods|737| + |vpc.amazonaws.com/efa|1| +### `r8g.48xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|r| + |karpenter.k8s.aws/instance-cpu|192| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|40000| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|r8g| + |karpenter.k8s.aws/instance-generation|8| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-memory|1572864| + |karpenter.k8s.aws/instance-network-bandwidth|50000| + |karpenter.k8s.aws/instance-size|48xlarge| + |kubernetes.io/arch|arm64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|r8g.48xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|191450m| + |ephemeral-storage|17Gi| + |memory|1446378Mi| + |pods|737| + |vpc.amazonaws.com/efa|1| +### `r8g.metal-48xl` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|r| + |karpenter.k8s.aws/instance-cpu|192| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|40000| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|r8g| + |karpenter.k8s.aws/instance-generation|8| + |karpenter.k8s.aws/instance-hypervisor|| + |karpenter.k8s.aws/instance-memory|1572864| + |karpenter.k8s.aws/instance-network-bandwidth|50000| + |karpenter.k8s.aws/instance-size|metal-48xl| + |kubernetes.io/arch|arm64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|r8g.metal-48xl| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|191450m| + |ephemeral-storage|17Gi| + |memory|1446378Mi| + |pods|737| + |vpc.amazonaws.com/efa|1| ## t1 Family ### `t1.micro` #### Labels @@ -19508,6 +19810,7 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-generation|7| |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|12582912| + |karpenter.k8s.aws/instance-network-bandwidth|100000| |karpenter.k8s.aws/instance-size|224xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -19534,6 +19837,7 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-generation|7| |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|16777216| + |karpenter.k8s.aws/instance-network-bandwidth|200000| |karpenter.k8s.aws/instance-size|224xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -19560,6 +19864,7 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-generation|7| |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|25165824| + |karpenter.k8s.aws/instance-network-bandwidth|200000| |karpenter.k8s.aws/instance-size|224xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -19586,6 +19891,7 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-generation|7| |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|33554432| + |karpenter.k8s.aws/instance-network-bandwidth|200000| |karpenter.k8s.aws/instance-size|224xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| diff --git a/website/content/en/docs/reference/metrics.md b/website/content/en/docs/reference/metrics.md index 30e52c681726..674dc859179c 100644 --- a/website/content/en/docs/reference/metrics.md +++ b/website/content/en/docs/reference/metrics.md @@ -10,193 +10,301 @@ description: > Karpenter makes several metrics available in Prometheus format to allow monitoring cluster provisioning status. These metrics are available by default at `karpenter.karpenter.svc.cluster.local:8080/metrics` configurable via the `METRICS_PORT` environment variable documented [here](../settings) ### `karpenter_build_info` A metric with a constant '1' value labeled by version from which karpenter was built. +- Stability Level: STABLE -## Nodepool Metrics +## Nodeclaims Metrics + +### `karpenter_nodeclaims_termination_duration_seconds` +Duration of NodeClaim termination in seconds. +- Stability Level: BETA -### `karpenter_nodepool_usage` -The nodepool usage is the amount of resources that have been provisioned by a particular nodepool. Labeled by nodepool name and resource type. +### `karpenter_nodeclaims_terminated_total` +Number of nodeclaims terminated in total by Karpenter. Labeled by the owning nodepool. +- Stability Level: STABLE -### `karpenter_nodepool_limit` -The nodepool limits are the limits specified on the nodepool that restrict the quantity of resources provisioned. Labeled by nodepool name and resource type. +### `karpenter_nodeclaims_instance_termination_duration_seconds` +Duration of CloudProvider Instance termination in seconds. +- Stability Level: BETA + +### `karpenter_nodeclaims_disrupted_total` +Number of nodeclaims disrupted in total by Karpenter. Labeled by reason the nodeclaim was disrupted and the owning nodepool. +- Stability Level: ALPHA + +### `karpenter_nodeclaims_created_total` +Number of nodeclaims created in total by Karpenter. Labeled by reason the nodeclaim was created and the owning nodepool. +- Stability Level: STABLE ## Nodes Metrics ### `karpenter_nodes_total_pod_requests` -Node total pod requests are the resources requested by non-DaemonSet pods bound to nodes. +Node total pod requests are the resources requested by pods bound to nodes, including the DaemonSet pods. +- Stability Level: BETA ### `karpenter_nodes_total_pod_limits` -Node total pod limits are the resources specified by non-DaemonSet pod limits. +Node total pod limits are the resources specified by pod limits, including the DaemonSet pods. +- Stability Level: BETA ### `karpenter_nodes_total_daemon_requests` Node total daemon requests are the resource requested by DaemonSet pods bound to nodes. +- Stability Level: BETA ### `karpenter_nodes_total_daemon_limits` Node total daemon limits are the resources specified by DaemonSet pod limits. +- Stability Level: BETA -### `karpenter_nodes_termination_time_seconds` +### `karpenter_nodes_termination_duration_seconds` The time taken between a node's deletion request and the removal of its finalizer +- Stability Level: BETA -### `karpenter_nodes_terminated` +### `karpenter_nodes_terminated_total` Number of nodes terminated in total by Karpenter. Labeled by owning nodepool. +- Stability Level: STABLE ### `karpenter_nodes_system_overhead` Node system daemon overhead are the resources reserved for system overhead, the difference between the node's capacity and allocatable values are reported by the status. +- Stability Level: BETA -### `karpenter_nodes_leases_deleted` +### `karpenter_nodes_leases_deleted_total` Number of deleted leaked leases. +- Stability Level: ALPHA -### `karpenter_nodes_eviction_queue_depth` -The number of pods currently waiting for a successful eviction in the eviction queue. - -### `karpenter_nodes_created` +### `karpenter_nodes_created_total` Number of nodes created in total by Karpenter. Labeled by owning nodepool. +- Stability Level: STABLE ### `karpenter_nodes_allocatable` Node allocatable are the resources allocatable by nodes. +- Stability Level: BETA ## Pods Metrics ### `karpenter_pods_state` Pod state is the current state of pods. This metric can be used several ways as it is labeled by the pod name, namespace, owner, node, nodepool name, zone, architecture, capacity type, instance type and pod phase. +- Stability Level: BETA -### `karpenter_pods_startup_time_seconds` +### `karpenter_pods_startup_duration_seconds` The time from pod creation until the pod is running. +- Stability Level: STABLE -## Provisioner Metrics +## Voluntary Disruption Metrics -### `karpenter_provisioner_scheduling_simulation_duration_seconds` -Duration of scheduling simulations used for deprovisioning and provisioning in seconds. +### `karpenter_voluntary_disruption_queue_failures_total` +The number of times that an enqueued disruption decision failed. Labeled by disruption method. +- Stability Level: BETA -### `karpenter_provisioner_scheduling_queue_depth` -The number of pods currently waiting to be scheduled. +### `karpenter_voluntary_disruption_eligible_nodes` +Number of nodes eligible for disruption by Karpenter. Labeled by disruption reason. +- Stability Level: BETA -### `karpenter_provisioner_scheduling_duration_seconds` -Duration of scheduling process in seconds. +### `karpenter_voluntary_disruption_decisions_total` +Number of disruption decisions performed. Labeled by disruption decision, reason, and consolidation type. +- Stability Level: STABLE -## Nodeclaims Metrics +### `karpenter_voluntary_disruption_decision_evaluation_duration_seconds` +Duration of the disruption decision evaluation process in seconds. Labeled by method and consolidation type. +- Stability Level: BETA -### `karpenter_nodeclaims_terminated` -Number of nodeclaims terminated in total by Karpenter. Labeled by reason the nodeclaim was terminated and the owning nodepool. +### `karpenter_voluntary_disruption_consolidation_timeouts_total` +Number of times the Consolidation algorithm has reached a timeout. Labeled by consolidation type. +- Stability Level: BETA -### `karpenter_nodeclaims_registered` -Number of nodeclaims registered in total by Karpenter. Labeled by the owning nodepool. +## Scheduler Metrics -### `karpenter_nodeclaims_launched` -Number of nodeclaims launched in total by Karpenter. Labeled by the owning nodepool. +### `karpenter_scheduler_scheduling_duration_seconds` +Duration of scheduling simulations used for deprovisioning and provisioning in seconds. +- Stability Level: STABLE -### `karpenter_nodeclaims_initialized` -Number of nodeclaims initialized in total by Karpenter. Labeled by the owning nodepool. +### `karpenter_scheduler_queue_depth` +The number of pods currently waiting to be scheduled. +- Stability Level: BETA -### `karpenter_nodeclaims_drifted` -Number of nodeclaims drifted reasons in total by Karpenter. Labeled by drift type of the nodeclaim and the owning nodepool. +## Nodepools Metrics -### `karpenter_nodeclaims_disrupted` -Number of nodeclaims disrupted in total by Karpenter. Labeled by disruption type of the nodeclaim and the owning nodepool. +### `karpenter_nodepools_usage` +The amount of resources that have been provisioned for a nodepool. Labeled by nodepool name and resource type. +- Stability Level: ALPHA -### `karpenter_nodeclaims_created` -Number of nodeclaims created in total by Karpenter. Labeled by reason the nodeclaim was created and the owning nodepool. +### `karpenter_nodepools_limit` +Limits specified on the nodepool that restrict the quantity of resources provisioned. Labeled by nodepool name and resource type. +- Stability Level: ALPHA + +### `karpenter_nodepools_allowed_disruptions` +The number of nodes for a given NodePool that can be concurrently disrupting at a point in time. Labeled by NodePool. Note that allowed disruptions can change very rapidly, as new nodes may be created and others may be deleted at any point. +- Stability Level: ALPHA ## Interruption Metrics -### `karpenter_interruption_received_messages` +### `karpenter_interruption_received_messages_total` Count of messages received from the SQS queue. Broken down by message type and whether the message was actionable. +- Stability Level: STABLE -### `karpenter_interruption_message_latency_time_seconds` -Length of time between message creation in queue and an action taken on the message by the controller. +### `karpenter_interruption_message_queue_duration_seconds` +Amount of time an interruption message is on the queue before it is processed by karpenter. +- Stability Level: STABLE -### `karpenter_interruption_deleted_messages` +### `karpenter_interruption_deleted_messages_total` Count of messages deleted from the SQS queue. - -### `karpenter_interruption_actions_performed` -Number of notification actions performed. Labeled by action - -## Disruption Metrics - -### `karpenter_disruption_replacement_nodeclaim_initialized_seconds` -Amount of time required for a replacement nodeclaim to become initialized. - -### `karpenter_disruption_replacement_nodeclaim_failures_total` -The number of times that Karpenter failed to launch a replacement node for disruption. Labeled by disruption method. - -### `karpenter_disruption_queue_depth` -The number of commands currently being waited on in the disruption orchestration queue. - -### `karpenter_disruption_pods_disrupted_total` -Total number of reschedulable pods disrupted on nodes. Labeled by NodePool, disruption action, method, and consolidation type. - -### `karpenter_disruption_nodes_disrupted_total` -Total number of nodes disrupted. Labeled by NodePool, disruption action, method, and consolidation type. - -### `karpenter_disruption_evaluation_duration_seconds` -Duration of the disruption evaluation process in seconds. Labeled by method and consolidation type. - -### `karpenter_disruption_eligible_nodes` -Number of nodes eligible for disruption by Karpenter. Labeled by disruption method and consolidation type. - -### `karpenter_disruption_consolidation_timeouts_total` -Number of times the Consolidation algorithm has reached a timeout. Labeled by consolidation type. - -### `karpenter_disruption_budgets_allowed_disruptions` -The number of nodes for a given NodePool that can be disrupted at a point in time. Labeled by NodePool. Note that allowed disruptions can change very rapidly, as new nodes may be created and others may be deleted at any point. - -### `karpenter_disruption_actions_performed_total` -Number of disruption actions performed. Labeled by disruption action, method, and consolidation type. - -## Consistency Metrics - -### `karpenter_consistency_errors` -Number of consistency checks that have failed. +- Stability Level: STABLE ## Cluster State Metrics ### `karpenter_cluster_state_synced` Returns 1 if cluster state is synced and 0 otherwise. Synced checks that nodeclaims and nodes that are stored in the APIServer have the same representation as Karpenter's cluster state +- Stability Level: STABLE ### `karpenter_cluster_state_node_count` Current count of nodes in cluster state +- Stability Level: STABLE ## Cloudprovider Metrics ### `karpenter_cloudprovider_instance_type_offering_price_estimate` Instance type offering estimated hourly price used when making informed decisions on node cost calculation, based on instance type, capacity type, and zone. +- Stability Level: BETA ### `karpenter_cloudprovider_instance_type_offering_available` Instance type offering availability, based on instance type, capacity type, and zone +- Stability Level: BETA ### `karpenter_cloudprovider_instance_type_memory_bytes` Memory, in bytes, for a given instance type. +- Stability Level: BETA ### `karpenter_cloudprovider_instance_type_cpu_cores` VCPUs cores for a given instance type. +- Stability Level: BETA ### `karpenter_cloudprovider_errors_total` Total number of errors returned from CloudProvider calls. +- Stability Level: BETA ### `karpenter_cloudprovider_duration_seconds` Duration of cloud provider method calls. Labeled by the controller, method name and provider. +- Stability Level: BETA ## Cloudprovider Batcher Metrics ### `karpenter_cloudprovider_batcher_batch_time_seconds` Duration of the batching window per batcher +- Stability Level: BETA ### `karpenter_cloudprovider_batcher_batch_size` Size of the request batch per batcher +- Stability Level: BETA ## Controller Runtime Metrics +### `controller_runtime_terminal_reconcile_errors_total` +Total number of terminal reconciliation errors per controller +- Stability Level: STABLE + ### `controller_runtime_reconcile_total` Total number of reconciliations per controller +- Stability Level: STABLE ### `controller_runtime_reconcile_time_seconds` Length of time per reconciliation per controller +- Stability Level: STABLE ### `controller_runtime_reconcile_errors_total` Total number of reconciliation errors per controller +- Stability Level: STABLE ### `controller_runtime_max_concurrent_reconciles` Maximum number of concurrent reconciles per controller +- Stability Level: STABLE ### `controller_runtime_active_workers` Number of currently used workers per controller +- Stability Level: STABLE + +## Workqueue Metrics + +### `workqueue_work_duration_seconds` +How long in seconds processing an item from workqueue takes. +- Stability Level: STABLE + +### `workqueue_unfinished_work_seconds` +How many seconds of work has been done that is in progress and hasn't been observed by work_duration. Large values indicate stuck threads. One can deduce the number of stuck threads by observing the rate at which this increases. +- Stability Level: STABLE + +### `workqueue_retries_total` +Total number of retries handled by workqueue +- Stability Level: STABLE + +### `workqueue_queue_duration_seconds` +How long in seconds an item stays in workqueue before being requested +- Stability Level: STABLE + +### `workqueue_longest_running_processor_seconds` +How many seconds has the longest running processor for workqueue been running. +- Stability Level: STABLE + +### `workqueue_depth` +Current depth of workqueue +- Stability Level: STABLE + +### `workqueue_adds_total` +Total number of adds handled by workqueue +- Stability Level: STABLE + +## Status Condition Metrics + +### `operator_status_condition_transitions_total` +The count of transitions of a given object, type and status. +- Stability Level: BETA + +### `operator_status_condition_transition_seconds` +The amount of time a condition was in a given state before transitioning. e.g. Alarm := P99(Updated=False) > 5 minutes +- Stability Level: BETA + +### `operator_status_condition_current_status_seconds` +The current amount of time in seconds that a status condition has been in a specific state. Alarm := P99(Updated=Unknown) > 5 minutes +- Stability Level: BETA + +### `operator_status_condition_count` +The number of an condition for a given object, type and status. e.g. Alarm := Available=False > 0 +- Stability Level: BETA + +## Client Go Metrics + +### `client_go_request_total` +Number of HTTP requests, partitioned by status code and method. +- Stability Level: STABLE + +### `client_go_request_duration_seconds` +Request latency in seconds. Broken down by verb, group, version, kind, and subresource. +- Stability Level: STABLE + +## AWS SDK Go Metrics + +### `aws_sdk_go_request_total` +The total number of AWS SDK Go requests +- Stability Level: STABLE + +### `aws_sdk_go_request_retry_count` +The total number of AWS SDK Go retry attempts per request +- Stability Level: STABLE + +### `aws_sdk_go_request_duration_seconds` +Latency of AWS SDK Go requests +- Stability Level: STABLE + +### `aws_sdk_go_request_attempt_total` +The total number of AWS SDK Go request attempts +- Stability Level: STABLE + +### `aws_sdk_go_request_attempt_duration_seconds` +Latency of AWS SDK Go request attempts +- Stability Level: STABLE + +## Leader Election Metrics + +### `leader_election_slowpath_total` +Total number of slow path exercised in renewing leader leases. 'name' is the string used to identify the lease. Please make sure to group by name. +- Stability Level: STABLE + +### `leader_election_master_status` +Gauge of if the reporting system is master of the relevant lease, 0 indicates backup, 1 indicates master. 'name' is the string used to identify the lease. Please make sure to group by name. +- Stability Level: STABLE diff --git a/website/content/en/docs/reference/settings.md b/website/content/en/docs/reference/settings.md index 2ce600fb8e08..a84521d42b34 100644 --- a/website/content/en/docs/reference/settings.md +++ b/website/content/en/docs/reference/settings.md @@ -12,24 +12,24 @@ Karpenter surfaces environment variables and CLI parameters to allow you to conf | Environment Variable | CLI Flag | Description | |--|--|--| -| ASSUME_ROLE_ARN | \-\-assume-role-arn | Role to assume for calling AWS services.| -| ASSUME_ROLE_DURATION | \-\-assume-role-duration | Duration of assumed credentials in minutes. Default value is 15 minutes. Not used unless aws.assumeRole set. (default = 15m0s)| | BATCH_IDLE_DURATION | \-\-batch-idle-duration | The maximum amount of time with no new pending pods that if exceeded ends the current batching window. If pods arrive faster than this time, the batching window will be extended up to the maxDuration. If they arrive slower, the pods will be batched separately. (default = 1s)| | BATCH_MAX_DURATION | \-\-batch-max-duration | The maximum length of a batch window. The longer this is, the more pods we can consider for provisioning at one time which usually results in fewer but larger nodes. (default = 10s)| | CLUSTER_CA_BUNDLE | \-\-cluster-ca-bundle | Cluster CA bundle for nodes to use for TLS connections with the API server. If not set, this is taken from the controller's TLS configuration.| | CLUSTER_ENDPOINT | \-\-cluster-endpoint | The external kubernetes cluster endpoint for new nodes to connect with. If not specified, will discover the cluster endpoint using DescribeCluster API.| | CLUSTER_NAME | \-\-cluster-name | [REQUIRED] The kubernetes cluster name for resource discovery.| -| DISABLE_WEBHOOK | \-\-disable-webhook | Disable the admission and validation webhooks| +| DISABLE_LEADER_ELECTION | \-\-disable-leader-election | Disable the leader election client before executing the main loop. Disable when running replicated components for high availability is not desired.| +| DISABLE_WEBHOOK | \-\-disable-webhook | Disable the conversion webhooks| | ENABLE_PROFILING | \-\-enable-profiling | Enable the profiling on the metric endpoint| -| FEATURE_GATES | \-\-feature-gates | Optional features can be enabled / disabled using feature gates. Current options are: Drift,SpotToSpotConsolidation (default = Drift=true,SpotToSpotConsolidation=false)| +| FEATURE_GATES | \-\-feature-gates | Optional features can be enabled / disabled using feature gates. Current options are: SpotToSpotConsolidation (default = SpotToSpotConsolidation=false)| | HEALTH_PROBE_PORT | \-\-health-probe-port | The port the health probe endpoint binds to for reporting controller health (default = 8081)| | INTERRUPTION_QUEUE | \-\-interruption-queue | Interruption queue is the name of the SQS queue used for processing interruption events from EC2. Interruption handling is disabled if not specified. Enabling interruption handling may require additional permissions on the controller service account. Additional permissions are outlined in the docs.| | ISOLATED_VPC | \-\-isolated-vpc | If true, then assume we can't reach AWS services which don't have a VPC endpoint. This also has the effect of disabling look-ups to the AWS on-demand pricing endpoint.| | KARPENTER_SERVICE | \-\-karpenter-service | The Karpenter Service name for the dynamic webhook certificate| | KUBE_CLIENT_BURST | \-\-kube-client-burst | The maximum allowed burst of queries to the kube-apiserver (default = 300)| | KUBE_CLIENT_QPS | \-\-kube-client-qps | The smoothed rate of qps to kube-apiserver (default = 200)| -| LEADER_ELECT | \-\-leader-elect | Start leader election client and gain leadership before executing the main loop. Enable this when running replicated components for high availability.| +| LOG_ERROR_OUTPUT_PATHS | \-\-log-error-output-paths | Optional comma separated paths for logging error output (default = stderr)| | LOG_LEVEL | \-\-log-level | Log verbosity level. Can be one of 'debug', 'info', or 'error' (default = info)| +| LOG_OUTPUT_PATHS | \-\-log-output-paths | Optional comma separated paths for directing log output (default = stdout)| | MEMORY_LIMIT | \-\-memory-limit | Memory limit on the container running the controller. The GC soft memory limit is set to 90% of this value. (default = -1)| | METRICS_PORT | \-\-metrics-port | The port the metric endpoint binds to for operating metrics about the controller itself (default = 8080)| | RESERVED_ENIS | \-\-reserved-enis | Reserved ENIs are not included in the calculations for max-pods or kube-reserved. This is most often used in the VPC CNI custom networking setup https://docs.aws.amazon.com/eks/latest/userguide/cni-custom-network.html. (default = 0)| @@ -43,11 +43,11 @@ Karpenter surfaces environment variables and CLI parameters to allow you to conf Karpenter uses [feature gates](https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/#feature-gates-for-alpha-or-beta-features) You can enable the feature gates through the `--feature-gates` CLI environment variable or the `FEATURE_GATES` environment variable in the Karpenter deployment. For example, you can configure drift, spotToSpotConsolidation by setting the CLI argument: `--feature-gates Drift=true,SpotToSpotConsolidation=true`. -| Feature | Default | Stage | Since | Until | -|-------------------------|---------|-------|---------|---------| -| Drift | false | Alpha | v0.21.x | v0.32.x | -| Drift | true | Beta | v0.33.x | | -| SpotToSpotConsolidation | false | Beta | v0.34.x | | +| Feature | Default | Stage | Since | Until | +|-------------------------|---------|--------|---------|---------| +| Drift | false | Alpha | v0.21.x | v0.32.x | +| Drift | true | Beta | v0.33.x | | +| SpotToSpotConsolidation | false | Alpha | v0.34.x | | ### Batching Parameters diff --git a/website/content/en/docs/reference/threat-model.md b/website/content/en/docs/reference/threat-model.md index 63df939b79a6..8625ca478002 100644 --- a/website/content/en/docs/reference/threat-model.md +++ b/website/content/en/docs/reference/threat-model.md @@ -31,11 +31,11 @@ A Cluster Developer has the ability to create pods via `Deployments`, `ReplicaSe Karpenter has permissions to create and manage cloud instances. Karpenter has Kubernetes API permissions to create, update, and remove nodes, as well as evict pods. For a full list of the permissions, see the RBAC rules in the helm chart template. Karpenter also has AWS IAM permissions to create instances with IAM roles. -* [aggregate-clusterrole.yaml](https://github.com/aws/karpenter/blob/v0.37.0/charts/karpenter/templates/aggregate-clusterrole.yaml) -* [clusterrole-core.yaml](https://github.com/aws/karpenter/blob/v0.37.0/charts/karpenter/templates/clusterrole-core.yaml) -* [clusterrole.yaml](https://github.com/aws/karpenter/blob/v0.37.0/charts/karpenter/templates/clusterrole.yaml) -* [rolebinding.yaml](https://github.com/aws/karpenter/blob/v0.37.0/charts/karpenter/templates/rolebinding.yaml) -* [role.yaml](https://github.com/aws/karpenter/blob/v0.37.0/charts/karpenter/templates/role.yaml) +* [aggregate-clusterrole.yaml](https://github.com/aws/karpenter/blob/v1.0.0/charts/karpenter/templates/aggregate-clusterrole.yaml) +* [clusterrole-core.yaml](https://github.com/aws/karpenter/blob/v1.0.0/charts/karpenter/templates/clusterrole-core.yaml) +* [clusterrole.yaml](https://github.com/aws/karpenter/blob/v1.0.0/charts/karpenter/templates/clusterrole.yaml) +* [rolebinding.yaml](https://github.com/aws/karpenter/blob/v1.0.0/charts/karpenter/templates/rolebinding.yaml) +* [role.yaml](https://github.com/aws/karpenter/blob/v1.0.0/charts/karpenter/templates/role.yaml) ## Assumptions diff --git a/website/content/en/docs/tasks/managing-amis.md b/website/content/en/docs/tasks/managing-amis.md index 1ef31f141e08..47d2b3bab9b1 100644 --- a/website/content/en/docs/tasks/managing-amis.md +++ b/website/content/en/docs/tasks/managing-amis.md @@ -58,7 +58,7 @@ For example, you could have: * **Test clusters**: On lower environment clusters, you can run the latest AMIs for your workloads in a safe environment. The `EC2NodeClass` for these clusters could be set with a chosen `amiFamily`, but no `amiSelectorTerms` set. For example, the `NodePool` and `EC2NodeClass` could begin with the following: ```yaml - apiVersion: karpenter.sh/v1beta1 + apiVersion: karpenter.sh/v1 kind: NodePool metadata: name: default @@ -66,11 +66,11 @@ For example, you could have: template: spec: nodeClassRef: - apiVersion: karpenter.k8s.aws/v1beta1 + apiVersion: karpenter.k8s.aws/v1 kind: EC2NodeClass name: default --- - apiVersion: karpenter.k8s.aws/v1beta1 + apiVersion: karpenter.k8s.aws/v1 kind: EC2NodeClass metadata: name: default @@ -120,9 +120,11 @@ You can set Disruption Budgets in a `NodePool` spec. Here is an example: ```yaml +template: + spec: + expireAfter: 1440h disruption: consolidationPolicy: WhenEmpty - expireAfter: 1440h budgets: - nodes: 15% - nodes: "3" @@ -132,7 +134,7 @@ disruption: ``` The `disruption` settings define a few fields that indicate the state of a node that should be disrupted. -The `consolidationPolicy` field indicates that a node should be disrupted if the node is either underutilized (`WhenUnderutilized`) or not running any pods (`WhenEmpty`). +The `consolidationPolicy` field indicates that a node should be disrupted if the node is either empty or underutilized (`WhenEmptyOrUnderutilized`) or not running any pods (`WhenEmpty`). With `expireAfter` set to `1440` hours, the node expires after 60 days. Extending those values causes longer times without disruption. diff --git a/website/content/en/docs/troubleshooting.md b/website/content/en/docs/troubleshooting.md index 6a5ec49e6a5a..c51d4f341ef5 100644 --- a/website/content/en/docs/troubleshooting.md +++ b/website/content/en/docs/troubleshooting.md @@ -75,12 +75,12 @@ If a long cluster name causes the Karpenter node role name to exceed 64 characte Keep in mind that `KarpenterNodeRole-` is just a recommendation from the getting started guide. Instead of using the eksctl role, you can shorten the name to anything you like, as long as it has the right permissions. -### Unknown field in Provisioner spec +### Unknown field in NodePool or EC2NodeClass spec If you are upgrading from an older version of Karpenter, there may have been changes in the CRD between versions. Attempting to utilize newer functionality which is surfaced in newer versions of the CRD may result in the following error message: ``` -error: error validating "STDIN": error validating data: ValidationError(Provisioner.spec): unknown field "" in sh.karpenter.v1alpha5.Provisioner.spec; if you choose to ignore these errors, turn validation off with --validate=false +Error from server (BadRequest): error when creating "STDIN": NodePool in version "v1" cannot be handled as a NodePool: strict decoding error: unknown field "spec.template.spec.nodeClassRef.foo" ``` If you see this error, you can solve the problem by following the [Custom Resource Definition Upgrade Guidance](../upgrade-guide/#custom-resource-definition-crd-upgrades). @@ -91,11 +91,10 @@ Info on whether there has been a change to the CRD between versions of Karpenter `0.16.0` changed the default replicas from 1 to 2. -Karpenter won't launch capacity to run itself (log related to the `karpenter.sh/provisioner-name DoesNotExist requirement`) +Karpenter won't launch capacity to run itself (log related to the `karpenter.sh/nodepool DoesNotExist requirement`) so it can't provision for the second Karpenter pod. -To solve this you can either reduce the replicas back from 2 to 1, or ensure there is enough capacity that isn't being managed by Karpenter -(these are instances with the name `karpenter.sh/provisioner-name/`) to run both pods. +To solve this you can either reduce the replicas back from 2 to 1, or ensure there is enough capacity that isn't being managed by Karpenter to run both pods. To do so on AWS increase the `minimum` and `desired` parameters on the node group autoscaling group to launch at lease 2 instances. @@ -117,7 +116,6 @@ Karpenter `0.26.1` introduced the `karpenter-crd` Helm chart. When installing th - In the case of `invalid ownership metadata; label validation error: missing key "app.kubernetes.io/managed-by": must be set to "Helm"` run: ```shell -KARPENTER_NAMESPACE=kube-system kubectl label crd ec2nodeclasses.karpenter.k8s.aws nodepools.karpenter.sh nodeclaims.karpenter.sh app.kubernetes.io/managed-by=Helm --overwrite ``` @@ -145,52 +143,6 @@ You can fix this by patching the node objects: kubectl get nodes -ojsonpath='{range .items[*].metadata}{@.name}:{@.finalizers}{"\n"}' | grep "karpenter.sh/termination" | cut -d ':' -f 1 | xargs kubectl patch node --type='json' -p='[{"op": "remove", "path": "/metadata/finalizers"}]' ``` -## Webhooks - -### Failed calling webhook "validation.webhook.provisioners.karpenter.sh" - -If you are not able to create a provisioner due to `Internal error occurred: failed calling webhook "validation.webhook.provisioners.karpenter.sh":` - -Webhooks were renamed in `0.19.0`. There's a bug in ArgoCD's upgrade workflow where webhooks are leaked. This results in Provisioner's failing to be validated, since the validation server no longer corresponds to the webhook definition. - -Delete the stale webhooks. - -```text -kubectl delete mutatingwebhookconfigurations defaulting.webhook.provisioners.karpenter.sh -kubectl delete validatingwebhookconfiguration validation.webhook.provisioners.karpenter.sh -``` - -### Failed calling webhook "defaulting.webhook.karpenter.sh" - -The `defaulting.webhook.karpenter.sh` mutating webhook was removed in `0.27.3`. If you are coming from an older version of Karpenter where this webhook existed and the webhook was not managed by Helm, you may need to delete the stale webhook. - -```text -kubectl delete mutatingwebhookconfigurations defaulting.webhook.karpenter.sh -``` - -If you are not able to create a provisioner due to `Error from server (InternalError): error when creating "provisioner.yaml": Internal error occurred: failed calling webhook "defaulting.webhook.karpenter.sh": Post "https://karpenter-webhook.karpenter.svc:443/default-resource?timeout=10s": context deadline exceeded` - -Verify that the karpenter pod is running (should see 2/2 containers with a "Ready" status) - -```text -kubectl get po -A -l app.kubernetes.io/name=karpenter -NAME READY STATUS RESTARTS AGE -karpenter-7b46fb5c-gcr9z 2/2 Running 0 17h -``` - -Karpenter service has endpoints assigned to it - -```text -kubectl get ep -A -l app.kubernetes.io/name=karpenter -NAMESPACE NAME ENDPOINTS AGE -karpenter karpenter 192.168.39.88:8443,192.168.39.88:8080 16d -``` - -Your security groups are not blocking you from reaching your webhook. - -This is especially relevant if you have used `terraform-eks-module` version `>=18` since that version changed its security -approach, and now it's much more restrictive. - ## Provisioning ### Instances with swap volumes fail to register with control plane @@ -202,7 +154,7 @@ Some instance types (c1.medium and m1.small) are given limited amount of memory ``` ##### Solutions -Disabling swap will allow kubelet to join the cluster successfully, however users should be mindful of performance, and consider adjusting the Provisioner requirements to use larger instance types. +Disabling swap will allow kubelet to join the cluster successfully, however users should be mindful of performance, and consider adjusting the NodePool requirements to use larger instance types. ### DaemonSets can result in deployment failures @@ -210,7 +162,7 @@ For Karpenter versions `0.5.3` and earlier, DaemonSets were not properly conside This sometimes caused nodes to be deployed that could not meet the needs of the requested DaemonSets and workloads. This issue no longer occurs after Karpenter version `0.5.3` (see [PR #1155](https://github.com/aws/karpenter/pull/1155)). -If you are using a pre `0.5.3` version of Karpenter, one workaround is to set your provisioner to only use larger instance types that you know will be big enough for the DaemonSet and the workload. +If you are using a pre `0.5.3` version of Karpenter, one workaround is to set your NodePool to only use larger instance types that you know will be big enough for the DaemonSet and the workload. For more information, see [Issue #1084](https://github.com/aws/karpenter/issues/1084). Examples of this behavior are included in [Issue #1180](https://github.com/aws/karpenter/issues/1180). @@ -225,55 +177,24 @@ This behavior is not unique to Karpenter and can also occur with the standard `k To prevent this, you can set LimitRanges on pod deployments on a per-namespace basis. See the Karpenter [Best Practices Guide](https://aws.github.io/aws-eks-best-practices/karpenter/#use-limitranges-to-configure-defaults-for-resource-requests-and-limits) for further information on the use of LimitRanges. -### Missing subnetSelector and securityGroupSelector tags causes provisioning failures - -Starting with Karpenter `0.5.5`, if you are using Karpenter-generated launch template, provisioners require that [subnetSelector]({{}}) and [securityGroupSelector]({{}}) tags be set to match your cluster. -The [Provisioner]({{}}) section in the Karpenter Getting Started Guide uses the following example: - -```text -kind: AWSNodeTemplate -spec: - subnetSelector: - karpenter.sh/discovery: ${CLUSTER_NAME} - securityGroupSelector: - karpenter.sh/discovery: ${CLUSTER_NAME} -``` - -To check your subnet and security group selectors, type the following: - -```bash -aws ec2 describe-subnets --filters Name=tag:karpenter.sh/discovery,Values=${CLUSTER_NAME} -``` - -*Returns subnets matching the selector* - -```bash -aws ec2 describe-security-groups --filters Name=tag:karpenter.sh/discovery,Values=${CLUSTER_NAME} -``` - -*Returns security groups matching the selector* - -Provisioners created without those tags and run in more recent Karpenter versions will fail with this message when you try to run the provisioner: - -```text - field(s): spec.provider.securityGroupSelector, spec.provider.subnetSelector -``` - ### Pods using Security Groups for Pods stuck in "ContainerCreating" state for up to 30 minutes before transitioning to "Running" -When leveraging [Security Groups for Pods](https://docs.aws.amazon.com/eks/latest/userguide/security-groups-for-pods.html), Karpenter will launch nodes as expected but pods will be stuck in "ContainerCreating" state for up to 30 minutes before transitioning to "Running". This is related to an interaction between Karpenter and the [amazon-vpc-resource-controller](https://github.com/aws/amazon-vpc-resource-controller-k8s) when a pod requests `vpc.amazonaws.com/pod-eni` resources. More info can be found in [issue #1252](https://github.com/aws/karpenter/issues/1252). +When leveraging [Security Groups for Pods](https://docs.aws.amazon.com/eks/latest/userguide/security-groups-for-pods.html), Karpenter will launch nodes as expected but pods will be stuck in "ContainerCreating" state for up to 30 minutes before transitioning to "Running". +This is related to an interaction between Karpenter and the [amazon-vpc-resource-controller](https://github.com/aws/amazon-vpc-resource-controller-k8s) when a pod requests `vpc.amazonaws.com/pod-eni` resources. +More info can be found in [issue #1252](https://github.com/aws/karpenter/issues/1252). -To workaround this problem, add the `vpc.amazonaws.com/has-trunk-attached: "false"` label in your Karpenter Provisioner spec and ensure instance-type requirements include [instance-types which support ENI trunking](https://github.com/aws/amazon-vpc-resource-controller-k8s/blob/master/pkg/aws/vpc/limits.go). +To workaround this problem, add the `vpc.amazonaws.com/has-trunk-attached: "false"` label in your Karpenter NodePool spec and ensure instance-type requirements include [instance-types which support ENI trunking](https://github.com/aws/amazon-vpc-resource-controller-k8s/blob/master/pkg/aws/vpc/limits.go). ```yaml -apiVersion: karpenter.sh/v1alpha5 -kind: Provisioner +apiVersion: karpenter.sh/v1 +kind: NodePool metadata: name: default spec: - labels: - vpc.amazonaws.com/has-trunk-attached: "false" - ttlSecondsAfterEmpty: 30 + template + metadata: + labels: + vpc.amazonaws.com/has-trunk-attached: "false" ``` ### Pods using PVCs can hit volume limits and fail to scale-up @@ -306,7 +227,7 @@ The following is a list of known CSI drivers which support a startupTaint to eli These taints should be configured via `startupTaints` on your `NodePool`. For example, to enable this for EBS, add the following to your `NodePool`: ```yaml -apiVersion: karpenter.sh/v1beta1 +apiVersion: karpenter.sh/v1 kind: NodePool spec: template: @@ -330,7 +251,7 @@ time=2023-06-12T19:18:15Z type=Warning reason=FailedCreatePodSandBox from=kubele By default, the number of pods on a node is limited by both the number of networking interfaces (ENIs) that may be attached to an instance type and the number of IP addresses that can be assigned to each ENI. See [IP addresses per network interface per instance type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#AvailableIpPerENI) for a more detailed information on these instance types' limits. -If the max-pods (configured through your NodePool [`kubeletConfiguration`]({{}})) is greater than the number of supported IPs for a given instance type, the CNI will fail to assign an IP to the pod and your pod will be left in a `ContainerCreating` state. +If the max-pods (configured through your EC2NodeClass [`kubeletConfiguration`]({{}})) is greater than the number of supported IPs for a given instance type, the CNI will fail to assign an IP to the pod and your pod will be left in a `ContainerCreating` state. If you've enabled [Security Groups per Pod](https://aws.github.io/aws-eks-best-practices/networking/sgpp/), one of the instance's ENIs is reserved as the trunk interface and uses branch interfaces off of that trunk interface to assign different security groups. If you do not have any `SecurityGroupPolicies` configured for your pods, they will be unable to utilize branch interfaces attached to the trunk interface, and IPs will only be available from the non-trunk ENIs. @@ -342,19 +263,19 @@ Note that Karpenter is not aware if [Security Groups per Pod](https://aws.github To avoid this discrepancy between `maxPods` and the supported pod density of the EC2 instance based on ENIs and allocatable IPs, you can perform one of the following actions on your cluster: 1. Enable [Prefix Delegation](https://www.eksworkshop.com/docs/networking/prefix/) to increase the number of allocatable IPs for the ENIs on each instance type -2. Reduce your `maxPods` value to be under the maximum pod density for the instance types assigned to your Provisioner -3. Remove the `maxPods` value from your [`kubeletConfiguration`]({{}}) if you no longer need it and instead rely on the defaulted values from Karpenter and EKS AMIs. +2. Reduce your `maxPods` value to be under the maximum pod density for the instance types assigned to your NodePods +3. Remove the `maxPods` value from your [`kubeletConfiguration`]({{}}) if you no longer need it and instead rely on the defaulted values from Karpenter and EKS AMIs. -For more information on pod density, view the [Pod Density Section in the NodePools doc]({{}}). +For more information on pod density, view the [Pod Density Section in the NodePools doc]({{}}). #### IP exhaustion in a subnet -When a node is launched by Karpenter, it is assigned to a subnet within your VPC based on the [`subnetSelector`]({{}}) value in your [`AWSNodeTemplate`]({{}})). When a subnet becomes IP address constrained, EC2 may think that it can successfully launch an instance in the subnet; however, when the CNI tries to assign IPs to the pods, there are none remaining. In this case, your pod will stay in a `ContainerCreating` state until an IP address is freed in the subnet and the CNI can assign one to the pod. +When a node is launched by Karpenter, it is assigned to a subnet within your VPC based on the [`subnetSelector`]({{}}) value in your [`EC2NodeClass`]({{}})). When a subnet becomes IP address constrained, EC2 may think that it can successfully launch an instance in the subnet; however, when the CNI tries to assign IPs to the pods, there are none remaining. In this case, your pod will stay in a `ContainerCreating` state until an IP address is freed in the subnet and the CNI can assign one to the pod. ##### Solutions 1. Use `topologySpreadConstraints` on `topology.kubernetes.io/zone` to spread your pods and nodes more evenly across zones -2. Increase the IP address space (CIDR) for the subnets selected by your `AWSNodeTemplate` +2. Increase the IP address space (CIDR) for the subnets selected by your `EC2NodeClass` 3. Use [custom networking](https://www.eksworkshop.com/docs/networking/custom-networking/) to assign separate IP address spaces to your pods and your nodes 4. [Run your EKS cluster on IPv6](https://aws.github.io/aws-eks-best-practices/networking/ipv6/) (Note: IPv6 clusters have some known limitations which should be well-understood before choosing to use one) @@ -480,7 +401,7 @@ Karpenter determines node initialization using three factors: 1. Node readiness 2. Expected resources are registered -3. Provisioner startup taints are removed +3. NodePool startup taints are removed #### Node Readiness @@ -497,9 +418,9 @@ Common resources that don't register and leave nodes in a non-initialized state: 1. `nvidia.com/gpu` (or any gpu-based resource): A GPU instance type that supports the `nvidia.com/gpu` resource is launched but the daemon/daemonset to register the resource on the node doesn't exist 2. `vpc.amazonaws.com/pod-eni`: An instance type is launched by the `ENABLE_POD_ENI` value is set to `false` in the `vpc-cni` plugin. Karpenter will expect that the `vpc.amazonaws.com/pod-eni` will be registered, but it never will. -#### Provisioner startup taints are removed +#### NodePool startup taints are removed -Karpenter expects all startup taints specified in `.spec.startupTaints` of the provisioner to be completely removed from node `.spec.taints` before it will consider the node initialized. +Karpenter expects all startup taints specified in `.spec.template.spec.startupTaints` of the NodePool to be completely removed from node `.spec.taints` before it will consider the node initialized. ### Node NotReady @@ -514,7 +435,7 @@ The easiest way to start debugging is to connect to the instance and get the Kub ```bash # List the nodes managed by Karpenter -kubectl get node -l karpenter.sh/provisioner-name +kubectl get node -l karpenter.sh/nodepool # Extract the instance ID (replace with a node name from the above listing) INSTANCE_ID=$(kubectl get node -ojson | jq -r ".spec.providerID" | cut -d \/ -f5) # Connect to the instance @@ -527,7 +448,7 @@ For Bottlerocket, you'll need to get access to the root filesystem: ```bash # List the nodes managed by Karpenter -kubectl get node -l karpenter.sh/provisioner-name +kubectl get node -l karpenter.sh/nodepool # Extract the instance ID (replace with a node name from the above listing) INSTANCE_ID=$(kubectl get node -ojson | jq -r ".spec.providerID" | cut -d \/ -f5) # Connect to the instance @@ -614,7 +535,7 @@ This means that your CNI plugin is out of date. You can find instructions on how ### Node terminates before ready on failed encrypted EBS volume If you are using a custom launch template and an encrypted EBS volume, the IAM principal launching the node may not have sufficient permissions to use the KMS customer managed key (CMK) for the EC2 EBS root volume. -This issue also applies to [Block Device Mappings]({{}}) specified in the Provisioner. +This issue also applies to [Block Device Mappings]({{}}) specified in the EC2NodeClass. In either case, this results in the node terminating almost immediately upon creation. Keep in mind that it is possible that EBS Encryption can be enabled without your knowledge. diff --git a/website/content/en/docs/upgrading/compatibility.md b/website/content/en/docs/upgrading/compatibility.md index 2ccb7d2c9d1e..01d6b0541c37 100644 --- a/website/content/en/docs/upgrading/compatibility.md +++ b/website/content/en/docs/upgrading/compatibility.md @@ -15,9 +15,9 @@ Before you begin upgrading Karpenter, consider Karpenter compatibility issues re [comment]: <> (the content below is generated from hack/docs/compataiblitymetrix_gen_docs.go) -| KUBERNETES | 1.24 | 1.25 | 1.26 | 1.27 | 1.28 | 1.29 | 1.30 | -|------------|----------|----------|----------|----------|----------|----------|--------| -| karpenter | \>= 0.21 | \>= 0.25 | \>= 0.28 | \>= 0.28 | \>= 0.31 | \>= 0.34 | 0.37.0 | +| KUBERNETES | 1.24 | 1.25 | 1.26 | 1.27 | 1.28 | 1.29 | 1.30 | +|------------|---------------------|----------|----------|----------|----------|----------|------------| +| karpenter | \>= 0.21 \<= 0.37 | \>= 0.25 | \>= 0.28 | \>= 0.28 | \>= 0.31 | \>= 0.34 | \>= 0.37 | [comment]: <> (end docs generated content from hack/docs/compataiblitymetrix_gen_docs.go) diff --git a/website/content/en/docs/upgrading/upgrade-guide.md b/website/content/en/docs/upgrading/upgrade-guide.md index 0c4624b652d4..a4dcaf3ad1c1 100644 --- a/website/content/en/docs/upgrading/upgrade-guide.md +++ b/website/content/en/docs/upgrading/upgrade-guide.md @@ -10,6 +10,10 @@ Karpenter is a controller that runs in your cluster, but it is not tied to a spe Use your existing upgrade mechanisms to upgrade your core add-ons in Kubernetes and keep Karpenter up to date on bug fixes and new features. This guide contains information needed to upgrade to the latest release of Karpenter, along with compatibility issues you need to be aware of when upgrading from earlier Karpenter versions. +{{% alert title="Warning" color="warning" %}} +With the release of Karpenter v1.0.0, the Karpenter team will be dropping support for karpenter versions v0.32 and below. We recommend upgrading to the latest version of Karpenter and keeping Karpenter up-to-date for bug fixes and new features. +{{% /alert %}} + ### CRD Upgrades Karpenter ships with a few Custom Resource Definitions (CRDs). These CRDs are published: @@ -20,23 +24,61 @@ Karpenter ships with a few Custom Resource Definitions (CRDs). These CRDs are pu ``` {{% alert title="Note" color="warning" %}} -If you get the error `invalid ownership metadata; label validation error:` while installing the `karpenter-crd` chart from an older version of Karpenter, follow the [Troubleshooting Guide]({{}}) for details on how to resolve these errors. +If you get the error `invalid ownership metadata; label validation error:` while installing the `karpenter-crd` chart from an older version of Karpenter, follow the [Troubleshooting Guide]({{}}) for details on how to resolve these errors. {{% /alert %}} * As part of the helm chart [karpenter](https://gallery.ecr.aws/karpenter/karpenter) - [source](https://github.com/aws/karpenter/blob/main/charts/karpenter/crds). Helm [does not manage the lifecycle of CRDs using this method](https://helm.sh/docs/chart_best_practices/custom_resource_definitions/), the tool will only install the CRD during the first installation of the Helm chart. Subsequent chart upgrades will not add or remove CRDs, even if the CRDs have changed. When CRDs are changed, we will make a note in the version's upgrade guide. -In general, you can reapply the CRDs in the `crds` directory of the Karpenter Helm chart: - -```shell -kubectl apply -f https://raw.githubusercontent.com/aws/karpenter/v0.37.0/pkg/apis/crds/karpenter.sh_nodepools.yaml -kubectl apply -f https://raw.githubusercontent.com/aws/karpenter/v0.37.0/pkg/apis/crds/karpenter.sh_nodeclaims.yaml -kubectl apply -f https://raw.githubusercontent.com/aws/karpenter/v0.37.0/pkg/apis/crds/karpenter.k8s.aws_ec2nodeclasses.yaml -``` - +### Upgrading to `1.0.0`+ + +{{% alert title="Warning" color="warning" %}} +Karpenter `1.0.0` introduces v1 APIs, including _significant_ changes to the API and upgrade procedures for the Karpenter controllers. **Do not** upgrade to `1.0.0`+ without referencing the [v1 Migration Upgrade Procedure]({{}}). + +This version adds [conversion webhooks](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/#webhook-conversion) to automatically pull the v1 API version of previously applied v1beta1 NodePools, EC2NodeClasses, and NodeClaims. Karpenter will stop serving the v1beta1 API version at v1.1.0 and will drop the conversion webhooks at that time. Migrate all stored manifests to v1 API versions on Karpenter v1.0+. +{{% /alert %}} + +Below is the full changelog for v1, copied from the [v1 Migration Upgrade Procedure]({{}}). + +* Features: + * AMI Selector Terms has a new Alias field which can only be set by itself in `EC2NodeClass.Spec.AMISelectorTerms` + * Disruption Budgets by Reason was added to `NodePool.Spec.Disruption.Budgets` + * TerminationGracePeriod was added to `NodePool.Spec.Template.Spec`. + * LOG_OUTPUT_PATHS and LOG_ERROR_OUTPUT_PATHS environment variables added +* API Rename: NodePool’s ConsolidationPolicy `WhenUnderutilized` is now renamed to `WhenEmptyOrUnderutilized` +* Behavior Changes: + * Expiration is now forceful and begins draining as soon as it’s expired. Karpenter does not wait for replacement capacity to be available before draining, but will start provisioning a replacement as soon as the node is expired and begins draining. + * Karpenter's generated NodeConfig now takes precedence when generating UserData with the AL2023 `amiFamily`. If you're setting any values managed by Karpenter in your AL2023 UserData, configure these through Karpenter natively (e.g. kubelet configuration fields). + * Karpenter now adds a `karpenter.sh/unregistered:NoExecute` taint to nodes in injected UserData when using alias in AMISelectorTerms or non-Custom AMIFamily. When using `amiFamily: Custom`, users will need to add this taint into their UserData, where Karpenter will automatically remove it when provisioning nodes. + * Karpenter now waits for underlying instances to be completely terminated before removing the associated nodes. This means it may take longer for nodes to be deleted and for nodeclaims to get cleaned up. +* API Moves: + * ExpireAfter has moved from the `NodePool.Spec.Disruption` block to `NodePool.Spec.Template.Spec`, and is now a drift-able field. + * `Kubelet` was moved to the EC2NodeClass from the NodePool. +* RBAC changes: added `delete pods` | added `get, patch crds` | added `update nodes` | removed `create nodes` +* Breaking API (Manual Migration Needed): + * Ubuntu is dropped as a first class supported AMI Family + * `karpenter.sh/do-not-consolidate` (annotation), `karpenter.sh/do-not-evict` (annotation), and `karpenter.sh/managed-by` (tag) are all removed. `karpenter.sh/managed-by`, which currently stores the cluster name in its value, will be replaced by eks:eks-cluster-name + * The taint used to mark nodes for disruption and termination changed from `karpenter.sh/disruption=disrupting:NoSchedule` to `karpenter.sh/disrupted:NoSchedule`. It is not recommended to tolerate this taint, however, if you were tolerating it in your applications, you'll need to adjust your taints to reflect this. +* Environment Variable Changes: + * Environment Variable Changes + * LOGGING_CONFIG, ASSUME_ROLE_ARN, ASSUME_ROLE_DURATION Dropped + * LEADER_ELECT renamed to DISABLE_LEADER_ELECTION + * `FEATURE_GATES.DRIFT=true` was dropped and promoted to Stable, and cannot be disabled. + * Users currently opting out of drift, disabling the drift feature flag will no longer be able to do so. +* Defaults changed: + * API: Karpenter will drop support for IMDS access from containers by default on new EC2NodeClasses by updating the default of `httpPutResponseHopLimit` from 2 to 1. + * API: ConsolidateAfter is required. Users couldn’t set this before with ConsolidationPolicy: WhenUnderutilized, where this is now required. Users can set it to 0 to have the same behavior as in v1beta1. + * API: All `NodeClassRef` fields are now all required, and apiVersion has been renamed to group + * API: AMISelectorTerms are required. Setting an Alias cannot be done with any other type of term, and must match the AMI Family that's set or be Custom. + * Helm: Deployment spec TopologySpreadConstraint to have required zonal spread over preferred. Users who had one node running their Karpenter deployments need to either: + * Have two nodes in different zones to ensure both Karpenter replicas schedule + * Scale down their Karpenter replicas from 2 to 1 in the helm chart + * Edit and relax the topology spread constraint in their helm chart from DoNotSchedule to ScheduleAnyway + * Helm/Binary: `controller.METRICS_PORT` default changed back to 8080 + ### Upgrading to `0.37.0`+ {{% alert title="Warning" color="warning" %}} diff --git a/website/content/en/docs/upgrading/v1-migration.md b/website/content/en/docs/upgrading/v1-migration.md new file mode 100644 index 000000000000..69930b2ea9c1 --- /dev/null +++ b/website/content/en/docs/upgrading/v1-migration.md @@ -0,0 +1,428 @@ +--- +title: "v1 Migration" +linkTitle: "v1 Migration" +weight: 30 +description: > + Upgrade information for migrating to v1 +--- + +This migration guide is designed to help you migrate Karpenter from v1beta1 APIs to v1 (v0.33-v0.37). +Use this document as a reference to the changes that were introduced in this release and as a guide to how you need to update the manifests and other Karpenter objects you created in previous Karpenter releases. + +Before you begin upgrading to `v1.0.0`, you should know that: + +* Every Karpenter upgrade from pre-v1.0.0 versions must upgrade to minor version `v1.0.0`. +* You must be upgrading to `v1.0.0` from a version of Karpenter that only supports v1beta1 APIs, e.g. NodePools, NodeClaims, and NodeClasses (v0.33+). +* Karpenter `v1.0.0`+ supports Karpenter v1 and v1beta1 APIs and will not work with earlier Provisioner, AWSNodeTemplate or Machine v1alpha1 APIs. Do not upgrade to `v1.0.0`+ without first [upgrading to `0.32.x`]({{}}) or later and then upgrading to v0.33. +* Version `v1.0.0` adds [conversion webhooks](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/#webhook-conversion) to automatically pull the v1 API version of previously applied v1beta1 NodePools, EC2NodeClasses, and NodeClaims. Karpenter will stop serving the v1beta1 API version at v1.1.0 and will drop the conversion webhooks at that time. You will need to migrate all stored manifests to v1 API versions on Karpenter v1.0+. Keep in mind that this is a conversion and not dual support, which means that resources are updated in-place rather than migrated over from the previous version. +* If you need to rollback the upgrade to v1, you need to upgrade to a special patch version of the minor version you came from. For instance, if you came from v0.33.5, you'll need to downgrade back to v0.33.6. More details on how to do this in [Downgrading]({{}}). +* Validate that you are running at least Kubernetes 1.25. Use the [compatibility matrix]({{}}) to confirm you are on a supported Kubernetes version. +* Karpenter runs a helm post-install-hook as part of upgrading to and from v1.0.0. If you're running Karpenter on a non x86_64 node, you'll need to update your `values.postInstallHook.image` values in your helm `values.yaml` file to point to a compatible image with kubectl. For instance, [an ARM compatible version](https://hub.docker.com/layers/bitnami/kubectl/1.30/images/sha256-d63c6609dd5c336fd036bd303fd4ce5f272e73ddd1923d32c12d62b7149067ed?context=explore). + +See the [Changelog]({{}}) for details about actions you should take before upgrading to v1.0 or v1.1. + +## Upgrade Procedure + +Please read through the entire procedure before beginning the upgrade. There are major changes in this upgrade, so please evaluate the list of breaking changes before continuing. + +{{% alert title="Note" color="warning" %}} +The upgrade guide will first require upgrading to your latest patch version prior to upgrade to v1.0.0. This will be to allow the conversion webhooks to operate and minimize downtime of the Karpenter controller when requesting the Karpenter custom resources. +{{% /alert %}} + +1. Set environment variables for your cluster to upgrade to the latest patch version of the current Karpenter version you're running on: + + ```bash + export KARPENTER_NAMESPACE=kube-system + export KARPENTER_IAM_ROLE_ARN="arn:${AWS_PARTITION}:iam::${AWS_ACCOUNT_ID}:role/${CLUSTER_NAME}-karpenter" + export AWS_PARTITION="aws" # if you are not using standard partitions, you may need to configure to aws-cn / aws-us-gov + export CLUSTER_NAME="${USER}-karpenter-demo" + export AWS_REGION="us-west-2" + export AWS_ACCOUNT_ID="$(aws sts get-caller-identity --query Account --output text)" + ``` + + +2. Determine the current Karpenter version: + ```bash + kubectl get pod -A | grep karpenter + kubectl describe pod -n "${KARPENTER_NAMESPACE}" karpenter-xxxxxxxxxx-xxxxx | grep Image: + ``` + Sample output: + ```bash + Image: public.ecr.aws/karpenter/controller:0.37.1@sha256:157f478f5db1fe999f5e2d27badcc742bf51cc470508b3cebe78224d0947674f + ``` + + The Karpenter version you are running must be between minor version `v0.33` and `v0.37`. To be able to roll back from Karpenter v1, you must rollback to on the following patch release versions for your minor version, which will include the conversion webhooks for a smooth rollback: + + * v0.37.1 + * v0.36.3 + * v0.35.6 + * v0.34.7 + * v0.33.6 + +3. Review for breaking changes between v0.33 and v0.37: If you are already running Karpenter v0.37.x, you can skip this step. If you are running an earlier Karpenter version, you need to review the [Upgrade Guide]({{}}) for each minor release. + +4. Set environment variables for upgrading to the latest patch version. Note that `v0.33.6` and `v0.34.7` both need to include the v prefix, whereas `v0.35+` should not. + + ```bash + export KARPENTER_VERSION= + ``` + +6. Apply the latest patch version of your current minor version's Custom Resource Definitions (CRDs): + + ```bash + helm upgrade --install karpenter-crd oci://public.ecr.aws/karpenter/karpenter-crd --version "${KARPENTER_VERSION}" --namespace "${KARPENTER_NAMESPACE}" --create-namespace \ + --set webhook.enabled=true \ + --set webhook.serviceName=karpenter \ + --set webhook.serviceNamespace="${KARPENTER_NAMESPACE}" \ + --set webhook.port=8443 + ``` + + +7. Upgrade Karpenter to the latest patch version of your current minor version's. At the end of this step, conversion webhooks will run but will not convert any version. + + ```bash + # Service account annotation can be dropped when using pod identity + helm upgrade --install karpenter oci://public.ecr.aws/karpenter/karpenter --version ${KARPENTER_VERSION} --namespace "${KARPENTER_NAMESPACE}" --create-namespace \ + --set serviceAccount.annotations."eks\.amazonaws\.com/role-arn"=${KARPENTER_IAM_ROLE_ARN} \ + --set settings.clusterName=${CLUSTER_NAME} \ + --set settings.interruptionQueue=${CLUSTER_NAME} \ + --set controller.resources.requests.cpu=1 \ + --set controller.resources.requests.memory=1Gi \ + --set controller.resources.limits.cpu=1 \ + --set controller.resources.limits.memory=1Gi \ + --set webhook.enabled=true \ + --set webhook.port=8443 \ + --wait + ``` + +8. Set environment variables for first upgrading to v1.0.0 + + ```bash + export KARPENTER_VERSION=1.0.0 + ``` + + +9. Update your existing policy using the following to the v1.0.0 controller policy: + Notable Changes to the IAM Policy include additional tag-scoping for the `eks:eks-cluster-name` tag for instances and instance profiles. + + ```bash + TEMPOUT=$(mktemp) + curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/v"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml > ${TEMPOUT} \ + && aws cloudformation deploy \ + --stack-name "Karpenter-${CLUSTER_NAME}" \ + --template-file "${TEMPOUT}" \ + --capabilities CAPABILITY_NAMED_IAM \ + --parameter-overrides "ClusterName=${CLUSTER_NAME}" + ``` + +10. Apply the v1.0.0 Custom Resource Definitions (CRDs): + + ```bash + helm upgrade --install karpenter-crd oci://public.ecr.aws/karpenter/karpenter-crd --version "${KARPENTER_VERSION}" --namespace "${KARPENTER_NAMESPACE}" --create-namespace \ + --set webhook.enabled=true \ + --set webhook.serviceName=karpenter \ + --set webhook.serviceNamespace="${KARPENTER_NAMESPACE}" \ + --set webhook.port=8443 + ``` + +11. Upgrade Karpenter to the new version. At the end of this step, conversion webhooks run to convert the Karpenter CRDs to v1. + + ```bash + # Service account annotion can be dropped when using pod identity + helm upgrade --install karpenter oci://public.ecr.aws/karpenter/karpenter --version ${KARPENTER_VERSION} --namespace "${KARPENTER_NAMESPACE}" --create-namespace \ + --set serviceAccount.annotations."eks\.amazonaws\.com/role-arn"=${KARPENTER_IAM_ROLE_ARN} \ + --set settings.clusterName=${CLUSTER_NAME} \ + --set settings.interruptionQueue=${CLUSTER_NAME} \ + --set controller.resources.requests.cpu=1 \ + --set controller.resources.requests.memory=1Gi \ + --set controller.resources.limits.cpu=1 \ + --set controller.resources.limits.memory=1Gi \ + --wait + ``` + + {{% alert title="Note" color="warning" %}} + Karpenter has deprecated and moved a number of Helm values as part of the v1 release. Ensure that you upgrade to the newer version of these helm values during your migration to v1. You can find detail for all the settings that were moved in the [v1 Upgrade Reference]({{}}). + {{% /alert %}} + +12. Once upgraded, you won't need to roll your nodes to be compatible with v1.1.0, except if you have multiple NodePools with different `kubelet`s that are referencing the same EC2NodeClass. Karpenter has moved the `kubelet` to the EC2NodeClass in v1. NodePools with different `kubelet` referencing the same EC2NodeClass will be compatible with v1.0.0, but will not be in v1.1.0. + +When you have completed the migration to `1.0.0` CRDs, Karpenter will be able to serve both the `v1beta1` versions and the `v1` versions of NodePools, NodeClaims, and EC2NodeClasses. +The results of upgrading these CRDs include the following: + +* The storage version of these resources change to v1. After the upgrade, Karpenter starts converting these resources to v1 storage versions in real time. Users should experience no differences from this change. +* You are still able to GET and make updates using the v1beta1 versions, by for example doing `kubectl get nodepools.v1beta1.karpenter.sh`. + + +## Post upgrade considerations + +Your NodePool and EC2NodeClass objects are auto-converted to the new v1 storage version during the upgrade. Consider getting the latest versions of those objects to update any stored manifests where you were previously applying the v1beta1 version. + + * [NodePools]({{}}): Get the latest copy of your NodePool (`kubectl get nodepool default -o yaml > nodepool.yaml`) and review the [Changelog]({{}}) for changes to NodePool objects. Make modifications as needed. + * [EC2NodeClasses]({{}}): Get the latest copy of your EC2NodeClass (`kubectl get ec2nodeclass default -o yaml > ec2nodeclass.yaml`) and review the [Changelog]({{}}) for changes to EC2NodeClass objects. Make modifications as needed. + +When you are satisfied with your NodePool and EC2NodeClass files, apply them as follows: + +```bash +kubectl apply -f nodepool.yaml +kubectl apply -f ec2nodeclass.yaml +``` + +## Changelog +Refer to the [Full Changelog]({{}}) for more. + +Because Karpenter `v1.0.0` will run both `v1` and `v1beta1` versions of NodePools and EC2NodeClasses, you don't immediately have to upgrade the stored manifests that you have to v1. +However, in preparation for later Karpenter upgrades (which will not support `v1beta1`, review the following changes from v1beta1 to v1. + +Karpenter `v1.0.0` changes are divided into two different categories: those you must do before `1.0.0` upgrades and those you must do before `1.1.0` upgrades. + +### Changes required before upgrading to `v1.0.0` + +Apply the following changes to your NodePools and EC2NodeClasses, as appropriate, before upgrading them to v1. + +* **Deprecated annotations, labels and tags are removed for v1.0.0**: For v1, `karpenter.sh/do-not-consolidate` (annotation), `karpenter.sh/do-not-evict +(annotation)`, and `karpenter.sh/managed-by` (tag) all have support removed. +The `karpenter.sh/managed-by`, which currently stores the cluster name in its value, is replaced by `eks:eks-cluster-name`, to allow +for [EKS Pod Identity ABAC policies](https://docs.aws.amazon.com/eks/latest/userguide/pod-id-abac.html). + +* **Zap logging config removed**: Support for setting the Zap logging config was deprecated in beta and is now removed for v1. View the [Logging Configuration Section of the v1beta1 Migration Guide]({{}}) for more details. + +* **metadataOptions could break workloads**: If you have workload pods that are not using `hostNetworking`, the updated default `metadataOptions` could cause your containers to break when you apply new EC2NodeClasses on v1. + +* **Ubuntu AMIFamily Removed**: + + Support for automatic AMI selection and UserData generation for Ubuntu has been dropped with Karpenter `v1.0.0`. + To continue using Ubuntu AMIs you will need to specify an AMI using `amiSelectorTerms`. + + UserData generation can be achieved using the AL2 AMIFamily which has an identical UserData format. + However, compatibility is not guaranteed long-term and changes to either AL2 or Ubuntu's UserData format may introduce incompatibilities. + If this occurs, the Custom AMIFamily should be used for Ubuntu and UserData will need to be entirely maintained by the user. + + If you are upgrading to `v1.0.0` and already have v1beta1 Ubuntu EC2NodeClasses, all you need to do is specify `amiSelectorTerms` and Karpenter will translate your NodeClasses to the v1 equivalent (as shown below). + Failure to specify `amiSelectorTerms` will result in the EC2NodeClass and all referencing NodePools to show as NotReady, causing Karpenter to ignore these NodePools and EC2NodeClasses for Provisioning and Drift. + + ```yaml + # Original v1beta1 EC2NodeClass + version: karpenter.k8s.aws/v1beta1 + kind: EC2NodeClass + spec: + amiFamily: Ubuntu + amiSelectorTerms: + - id: ami-foo + --- + # Conversion Webhook Output + version: karpenter.k8s.aws/v1 + kind: EC2NodeClass + metadata: + annotations: + compatibility.karpenter.k8s.aws/v1beta1-ubuntu: amiFamily,blockDeviceMappings + spec: + amiFamily: AL2 + amiSelectorTerms: + - id: ami-foo + blockDeviceMappings: + - deviceName: '/dev/sda1' + rootVolume: true + ebs: + encrypted: true + volumeType: gp3 + volumeSize: 20Gi + ``` + +* **amiSelectorTerms and amiFamily**: For v1, `amiFamily` is no longer required if you instead specify an `alias` in `amiSelectorTerms` in your `EC2NodeClass`. You need to update your `amiSelectorTerms` and `amiFamily` if you are using: + * A Custom amiFamily. You must ensure that the node you add the `karpenter.sh/unregistered:NoExecute` taint in your UserData. + * An Ubuntu AMI, as described earlier. + +### Before upgrading to `v1.1.0` + +Apply the following changes to your NodePools and EC2NodeClasses, as appropriate, before upgrading them to `v1.1.0` (though okay to make these changes for `1.0.0`) + +* **v1beta1 support gone**: In `v1.1.0`, v1beta1 is not supported. So you need to: + * Migrate all Karpenter yaml files [NodePools]({{}}), [EC2NodeClasses]({{}}) to v1. + * Know that all resources in the cluster also need to be on v1. It's possible (although unlikely) that some resources still may be stored as v1beta1 in ETCD if no writes had been made to them since the v1 upgrade. You could use a tool such as [kube-storage-version-migrator](https://github.com/kubernetes-sigs/kube-storage-version-migrator) to handle this. + * Know that you cannot rollback to v1beta1 once you have upgraded to `v1.1.0`. + +* **Kubelet Configuration**: If you have multiple NodePools pointing to the same EC2NodeClass that have different kubeletConfigurations, +then you have to manually add more EC2NodeClasses and point their NodePools to them. This will induce drift and you will have to roll your cluster. +If you have multiple NodePools pointing to the same EC2NodeClass, but they have the same configuration, then you can proceed with the migration +without having drift or having any additional NodePools or EC2NodeClasses configured. + +* **Remove kubelet annotation from NodePools**: During the upgrade process Karpenter will rely on the `compatibility.karpenter.sh/v1beta1-kubelet-conversion` annotation to determine whether to use the v1beta1 NodePool kubelet configuration or the v1 EC2NodeClass kubelet configuration. The `compatibility.karpenter.sh/v1beta1-kubelet-conversion` NodePool annotation takes precedence over the EC2NodeClass Kubelet configuration when launching nodes. Remove the kubelet-configuration annotation (`compatibility.karpenter.sh/v1beta1-kubelet-conversion`) from your NodePools once you have migrated kubelet from the NodePool to the EC2NodeClass. + +Keep in mind that rollback, without replacing the Karpenter nodes, will not be supported to an earlier version of Karpenter once the annotation is removed. This annotation is only used to support the kubelet configuration migration path, but will not be supported in v1.1. + +### Downgrading + +Once the Karpenter CRDs are upgraded to v1, conversion webhooks are needed to help convert APIs that are stored in etcd from v1 to v1beta1. Also changes to the CRDs will need to at least include the latest version of the CRD in this case being v1. The patch versions of the v1beta1 Karpenter controller that include the conversion wehooks include: + +* v0.37.1 +* v0.36.3 +* v0.35.6 +* v0.34.7 +* v0.33.6 + +{{% alert title="Note" color="warning" %}} +When rolling back from v1, Karpenter will not retain data that was only valid in v1 APIs. For instance, if you were upgrading from v0.33.5 to v1, updated the `NodePool.Spec.Disruption.Budgets` field and then rolled back to v0.33.6, Karpenter would not retain the `NodePool.Spec.Disruption.Budgets` field, as that was introduced in v0.34.x. If you are configuring the kubelet field, and have removed the `compatibility.karpenter.sh/v1beta1-kubelet-conversion` annotation, rollback is not supported without replacing your nodes between EC2NodeClass and NodePool. +{{% /alert %}} + +{{% alert title="Note" color="warning" %}} +Since both v1beta1 and v1 will be served, `kubectl` will default to returning the `v1` version of your CRDs. To interact with the v1beta1 version of your CRDs, you'll need to add the full resource path (including api version) into `kubectl` calls. For example: `k get nodeclaim.v1beta1.karpenter.sh` +{{% /alert %}} + +1. Set environment variables + +```bash +export KARPENTER_NAMESPACE="kube-system" +# Note: v0.33.6 and v0.34.7 include the v prefix, omit it for versions v0.35+ +export KARPENTER_VERSION="" +export KARPENTER_IAM_ROLE_ARN="arn:${AWS_PARTITION}:iam::${AWS_ACCOUNT_ID}:role/${CLUSTER_NAME}-karpenter" +export CLUSTER_NAME="" +export TEMPOUT="$(mktemp)" +``` + +{{% alert title="Warning" color="warning" %}} +If you open a new shell to run steps in this procedure, you need to set some or all of the environment variables again. +To remind yourself of these values, type: + +```bash +echo "${KARPENTER_NAMESPACE}" "${KARPENTER_VERSION}" "${CLUSTER_NAME}" "${TEMPOUT}" +``` + +{{% /alert %}} + +2. Rollback the Karpenter Policy + +**v0.33.6 and v0.34.7:** +```bash +curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml > ${TEMPOUT} \ + && aws cloudformation deploy \ + --stack-name "Karpenter-${CLUSTER_NAME}" \ + --template-file "${TEMPOUT}" \ + --capabilities CAPABILITY_NAMED_IAM \ + --parameter-overrides "ClusterName=${CLUSTER_NAME}" +``` + +**v0.35+:** +```bash +curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/v"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml > ${TEMPOUT} \ + && aws cloudformation deploy \ + --stack-name "Karpenter-${CLUSTER_NAME}" \ + --template-file "${TEMPOUT}" \ + --capabilities CAPABILITY_NAMED_IAM \ + --parameter-overrides "ClusterName=${CLUSTER_NAME}" +``` + +3. Rollback the CRDs + +```bash +helm upgrade --install karpenter-crd oci://public.ecr.aws/karpenter/karpenter-crd --version "${KARPENTER_VERSION}" --namespace "${KARPENTER_NAMESPACE}" --create-namespace \ + --set webhook.enabled=true \ + --set webhook.serviceName=karpenter \ + --set webhook.serviceNamespace="${KARPENTER_NAMESPACE}" \ + --set webhook.port=8443 +``` + +4. Rollback the Karpenter Controller + +```bash +# Service account annotation can be dropped when using pod identity +helm upgrade --install karpenter oci://public.ecr.aws/karpenter/karpenter --version ${KARPENTER_VERSION} --namespace "${KARPENTER_NAMESPACE}" --create-namespace \ + --set serviceAccount.annotations."eks\.amazonaws\.com/role-arn"=${KARPENTER_IAM_ROLE_ARN} \ + --set settings.clusterName=${CLUSTER_NAME} \ + --set settings.interruptionQueue=${CLUSTER_NAME} \ + --set controller.resources.requests.cpu=1 \ + --set controller.resources.requests.memory=1Gi \ + --set controller.resources.limits.cpu=1 \ + --set controller.resources.limits.memory=1Gi \ + --set webhook.enabled=true \ + --set webhook.port=8443 \ + --wait +``` + +Karpenter should now be pulling and operating against the v1beta1 APIVersion as it was prior to the upgrade + +## Full Changelog +* Features: + * AMI Selector Terms has a new Alias field which can only be set by itself in `EC2NodeClass.Spec.AMISelectorTerms` + * Disruption Budgets by Reason was added to `NodePool.Spec.Disruption.Budgets` + * TerminationGracePeriod was added to `NodePool.Spec.Template.Spec`. + * LOG_OUTPUT_PATHS and LOG_ERROR_OUTPUT_PATHS environment variables added +* API Rename: NodePool’s ConsolidationPolicy `WhenUnderutilized` is now renamed to `WhenEmptyOrUnderutilized` +* Behavior Changes: + * Expiration is now forceful and begins draining as soon as it’s expired. Karpenter does not wait for replacement capacity to be available before draining, but will start provisioning a replacement as soon as the node is expired and begins draining. + * Karpenter's generated NodeConfig now takes precedence when generating UserData with the AL2023 `amiFamily`. If you're setting any values managed by Karpenter in your AL2023 UserData, configure these through Karpenter natively (e.g. kubelet configuration fields). + * Karpenter now adds a `karpenter.sh/unregistered:NoExecute` taint to nodes in injected UserData when using alias in AMISelectorTerms or non-Custom AMIFamily. When using `amiFamily: Custom`, users will need to add this taint into their UserData, where Karpenter will automatically remove it when provisioning nodes. + * Discovered standard AL2023 AMIs will no longer be considered compatible with GPU / accelerator workloads. If you're using an AL2023 EC2NodeClass (without AMISelectorTerms) for these workloads, you will need to select your AMI via AMISelectorTerms (non-alias). + * Karpenter now waits for underlying instances to be completely terminated before removing the associated nodes. This means it may take longer for nodes to be deleted and for nodeclaims to get cleaned up. +* API Moves: + * ExpireAfter has moved from the `NodePool.Spec.Disruption` block to `NodePool.Spec.Template.Spec`, and is now a drift-able field. + * `Kubelet` was moved to the EC2NodeClass from the NodePool. +* RBAC changes: added `delete pods` | added `get, patch crds` | added `update nodes` | removed `create nodes` +* Breaking API (Manual Migration Needed): + * Ubuntu is dropped as a first class supported AMI Family + * `karpenter.sh/do-not-consolidate` (annotation), `karpenter.sh/do-not-evict` (annotation), and `karpenter.sh/managed-by` (tag) are all removed. `karpenter.sh/managed-by`, which currently stores the cluster name in its value, will be replaced by eks:eks-cluster-name + * The taint used to mark nodes for disruption and termination changed from `karpenter.sh/disruption=disrupting:NoSchedule` to `karpenter.sh/disrupted:NoSchedule`. It is not recommended to tolerate this taint, however, if you were tolerating it in your applications, you'll need to adjust your taints to reflect this. +* Environment Variable Changes: + * Environment Variable Changes + * LOGGING_CONFIG, ASSUME_ROLE_ARN, ASSUME_ROLE_DURATION Dropped + * LEADER_ELECT renamed to DISABLE_LEADER_ELECTION + * `FEATURE_GATES.DRIFT=true` was dropped and promoted to Stable, and cannot be disabled. + * Users currently opting out of drift, disabling the drift feature flag will no longer be able to do so. +* Defaults changed: + * API: Karpenter will drop support for IMDS access from containers by default on new EC2NodeClasses by updating the default of `httpPutResponseHopLimit` from 2 to 1. + * API: ConsolidateAfter is required. Users couldn’t set this before with ConsolidationPolicy: WhenUnderutilized, where this is now required. Users can set it to 0 to have the same behavior as in v1beta1. + * API: All `NodeClassRef` fields are now all required, and apiVersion has been renamed to group + * API: AMISelectorTerms are required. Setting an Alias cannot be done with any other type of term, and must match the AMI Family that's set or be Custom. + * Helm: Deployment spec TopologySpreadConstraint to have required zonal spread over preferred. Users who had one node running their Karpenter deployments need to either: + * Have two nodes in different zones to ensure both Karpenter replicas schedule + * Scale down their Karpenter replicas from 2 to 1 in the helm chart + * Edit and relax the topology spread constraint in their helm chart from DoNotSchedule to ScheduleAnyway + * Helm/Binary: `controller.METRICS_PORT` default changed back to 8080 + +### Updated metrics + +Changes to Karpenter metrics from v1beta1 to v1 are shown in the following tables. + +This table shows metrics names that changed from v1beta1 to v1: + +| Metric type | v1beta1 metrics name | new v1 metrics name | +|--|--|--| +| Node | karpenter_nodes_termination_time_seconds | karpenter_nodes_termination_duration_seconds | +| Node | karpenter_nodes_terminated | karpenter_nodes_terminated_total | +| Node | karpenter_nodes_leases_deleted | karpenter_nodes_leases_deleted_total | +| Node | karpenter_nodes_created | karpenter_nodes_created_total | +| Pod | karpenter_pods_startup_time_seconds | karpenter_pods_startup_duration_seconds | +| Disruption | karpenter_disruption_replacement_nodeclaim_failures_total | karpenter_voluntary_disruption_queue_failures_total | +| Disruption | karpenter_disruption_evaluation_duration_seconds | karpenter_voluntary_disruption_decision_evaluation_duration_seconds | +| Disruption | karpenter_disruption_eligible_nodes | karpenter_voluntary_disruption_eligible_nodes | +| Disruption | karpenter_disruption_consolidation_timeouts_total | karpenter_voluntary_disruption_consolidation_timeouts_total | +| Disruption | karpenter_disruption_budgets_allowed_disruptions | karpenter_nodepools_allowed_disruptions | +| Disruption | karpenter_disruption_actions_performed_total | karpenter_voluntary_disruption_decisions_total | +| Provisioner | karpenter_provisioner_scheduling_simulation_duration_seconds | karpenter_scheduler_scheduling_duration_seconds | +| Provisioner | karpenter_provisioner_scheduling_queue_depth | karpenter_scheduler_queue_depth | +| Interruption | karpenter_interruption_received_messages | karpenter_interruption_received_messages_total | +| Interruption | karpenter_interruption_deleted_messages | karpenter_interruption_deleted_messages_total | +| Interruption | karpenter_interruption_message_latency_time_seconds | karpenter_interruption_message_queue_duration_seconds | +| NodePool | karpenter_nodepool_usage | karpenter_nodepools_usage | +| NodePool | karpenter_nodepool_limit | karpenter_nodepools_limit | +| NodeClaim | karpenter_nodeclaims_terminated | karpenter_nodeclaims_terminated_total | +| NodeClaim | karpenter_nodeclaims_disrupted | karpenter_nodeclaims_disrupted_total | +| NodeClaim | karpenter_nodeclaims_created | karpenter_nodeclaims_created_total | + +This table shows v1beta1 metrics that were dropped for v1: + +| Metric type | Metric dropped for v1 | +|--|--| +| Disruption | karpenter_disruption_replacement_nodeclaim_initialized_seconds | +| Disruption | karpenter_disruption_queue_depth | +| Disruption | karpenter_disruption_pods_disrupted_total | +| | karpenter_consistency_errors | +| NodeClaim | karpenter_nodeclaims_registered | +| NodeClaim | karpenter_nodeclaims_launched | +| NodeClaim | karpenter_nodeclaims_initialized | +| NodeClaim | karpenter_nodeclaims_drifted | +| Provisioner | karpenter_provisioner_scheduling_duration_seconds | +| Interruption | karpenter_interruption_actions_performed | + +{{% alert title="Note" color="warning" %}} +Karpenter now waits for the underlying instance to be completely terminated before deleting a node and orchestrates this by emitting `NodeClaimNotFoundError`. With this change we expect to see an increase in the `NodeClaimNotFoundError`. Customers can filter out this error by label in order to get accurate values for `karpenter_cloudprovider_errors_total` metric. Use this Prometheus filter expression - `({controller!="node.termination"} or {controller!="nodeclaim.termination"}) and {error!="NodeClaimNotFoundError"}`. +{{% /alert %}} diff --git a/website/content/en/preview/concepts/disruption.md b/website/content/en/preview/concepts/disruption.md index bb6744c46d46..cccb8d297a8c 100644 --- a/website/content/en/preview/concepts/disruption.md +++ b/website/content/en/preview/concepts/disruption.md @@ -239,14 +239,14 @@ spec: budgets: - nodes: "20%" reasons: - - "empty" - - "drifted" + - "Empty" + - "Drifted" - nodes: "5" - nodes: "0" schedule: "@daily" duration: 10m reasons: - - "underutilized" + - "Underutilized" ``` #### Schedule diff --git a/website/content/en/preview/concepts/nodeclasses.md b/website/content/en/preview/concepts/nodeclasses.md index 13df18921c90..939e052da3f7 100644 --- a/website/content/en/preview/concepts/nodeclasses.md +++ b/website/content/en/preview/concepts/nodeclasses.md @@ -19,7 +19,7 @@ spec: template: spec: nodeClassRef: - apiVersion: karpenter.k8s.aws/v1 + group: karpenter.k8s.aws kind: EC2NodeClass name: default --- @@ -97,15 +97,17 @@ spec: # Each term in the array of amiSelectorTerms is ORed together # Within a single term, all conditions are ANDed amiSelectorTerms: - # Select on any AMI that has both the "karpenter.sh/discovery: ${CLUSTER_NAME}" tag - # AND the "environment: test" tag OR any AMI with the "my-ami" name - # OR any AMI with ID "ami-123" - - alias: al2023@v20240625 # Use alias to select a particular EKS optimized AMI + # Select on any AMI that has both the `karpenter.sh/discovery: ${CLUSTER_NAME}` + # AND `environment: test` tags OR any AMI with the name `my-ami` OR an AMI with + # ID `ami-123` - tags: karpenter.sh/discovery: "${CLUSTER_NAME}" environment: test - name: my-ami - id: ami-123 + # Select EKS optimized AL2023 AMIs with version `v20240703`. This term is mutually + # exclusive and can't be specified with other terms. + # - alias: al2023@v20240703 # Optional, propagates tags to underlying EC2 resources tags: @@ -209,9 +211,11 @@ Refer to the [NodePool docs]({{}}) for settings applicable t ## spec.kubelet -Karpenter provides the ability to specify a few additional Kubelet args. These are all optional and provide support for -additional customization and use cases. Adjust these only if you know you need to do so. For more details on kubelet configuration arguments, [see the KubeletConfiguration API specification docs](https://kubernetes.io/docs/reference/config-api/kubelet-config.v1/). -The implemented fields are a subset of the full list of upstream kubelet configuration arguments. Please cut an issue if you'd like to see another field implemented. +Karpenter provides the ability to specify a few additional Kubelet arguments. +These are all optional and provide support for additional customization and use cases. +Adjust these only if you know you need to do so. +For more details on kubelet settings, see the [KubeletConfiguration reference](https://kubernetes.io/docs/reference/config-api/kubelet-config.v1/). +The implemented fields are a subset of the full list of upstream kubelet configuration arguments. ```yaml kubelet: @@ -244,6 +248,44 @@ kubelet: clusterDNS: ["10.0.1.100"] ``` +{{% alert title="Note" color="primary" %}} +If you need to specify a field that isn't present in `spec.kubelet`, you can set it via custom [UserData]({{< ref "#specuserdata" >}}). +For example, if you wanted to configure `maxPods` and `registryPullQPS` you would set the former through `spec.kubelet` and the latter through UserData. +The following example achieves this with AL2023: + +```yaml +apiVersion: karpenter.k8s.aws/v1 +kind: EC2NodeClass +spec: + amiSelectorTerms: + - alias: al2023@v20240807 + kubelet: + maxPods: 42 + userData: | + apiVersion: node.eks.aws/v1alpha1 + kind: NodeConfig + spec: + kubelet: + config: + # Configured through UserData since unavailable in `spec.kubelet` + registryPullQPS: 10 +``` + +Note that when using the `Custom` AMIFamily you will need to specify fields **both** in `spec.kublet` and `spec.userData`. +{{% /alert %}} + +{{% alert title="Warning" color="warning" %}} +The Bottlerocket AMIFamily does not support the following fields: + +* `evictionSoft` +* `evictionSoftGracePeriod` +* `evictionMaxPodGracePeriod` +* `cpuCFSQuota` + +If any of these fields are specified on a Bottlerocket EC2NodeClass, they will be ommited from generated UserData and ignored for scheduling purposes. +Support for these fields can be tracked via GitHub issue [#3722](https://github.com/aws/karpenter-provider-aws/issues/3722). +{{% /alert %}} + #### Pods Per Core An alternative way to dynamically set the maximum density of pods on a node is to use the `.spec.kubelet.podsPerCore` value. Karpenter will calculate the pod density during scheduling by multiplying this value by the number of logical cores (vCPUs) on an instance type. This value will also be passed through to the `--pods-per-core` value on kubelet startup to configure the number of allocatable pods the kubelet can assign to the node instance. @@ -254,10 +296,6 @@ The value generated from `podsPerCore` cannot exceed `maxPods`, meaning, if both `maxPods` may not be set in the `kubelet` of an EC2NodeClass, but may still be restricted by the `ENI_LIMITED_POD_DENSITY` value. You may want to ensure that the `podsPerCore` value that will be used for instance families associated with the EC2NodeClass will not cause unexpected behavior by exceeding the `maxPods` value. {{% /alert %}} -{{% alert title="Pods Per Core on Bottlerocket" color="warning" %}} -Bottlerocket AMIFamily currently does not support `podsPerCore` configuration. If a EC2NodeClass contains a `provider` or `providerRef` to a node template that will launch a Bottlerocket instance, the `podsPerCore` value will be ignored for scheduling and for configuring the kubelet. -{{% /alert %}} - #### Max Pods For small instances that require an increased pod density or large instances that require a reduced pod density, you can override this default value with `.spec.kubelet.maxPods`. This value will be used during Karpenter pod scheduling and passed through to `--max-pods` on kubelet startup. @@ -356,11 +394,10 @@ It's currently not possible to specify custom networking with Windows nodes. AMIFamily dictates the default bootstrapping logic for nodes provisioned through this `EC2NodeClass`. An `amiFamily` is only required if you don't specify a `spec.amiSelectorTerms.alias` object. -For example, if you specify `alias: al2023@v20240625`, the `amiFamily` is implicitly `AL2023`. +For example, if you specify `alias: al2023@v20240807`, the `amiFamily` is implicitly `AL2023`. AMIFamily does not impact which AMI is discovered, only the UserData generation and default BlockDeviceMappings. To automatically discover EKS optimized AMIs, use the new [`alias` field in amiSelectorTerms]({{< ref "#specamiselectorterms" >}}). - {{% alert title="Ubuntu Support Dropped at v1" color="warning" %}} Support for the Ubuntu AMIFamily has been dropped at Karpenter `v1.0.0`. @@ -471,10 +508,6 @@ max-pods = 110 ``` -{{% alert title="Note" color="primary" %}} -Karpenter will automatically query for the appropriate [EKS optimized AMI](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-amis.html) via AWS Systems Manager (SSM). In the case of the `Custom` AMIFamily, no default AMIs are defined. As a result, `amiSelectorTerms` must be specified to inform Karpenter on which custom AMIs are to be used. -{{% /alert %}} - ### Custom The `Custom` AMIFamily ships without any default userData to allow you to configure custom bootstrapping for control planes or images that don't support the default methods from the other families. For this AMIFamily, kubelet must add the taint `karpenter.sh/unregistered:NoExecute` via the `--register-with-taints` flag ([flags](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/#options)) or the KubeletConfiguration spec ([options](https://kubernetes.io/docs/reference/config-api/kubelet-config.v1/#kubelet-config-k8s-io-v1-CredentialProviderConfig) and [docs](https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/)). Karpenter will fail to register nodes that do not have this taint. @@ -659,24 +692,28 @@ For [private clusters](https://docs.aws.amazon.com/eks/latest/userguide/private- AMI Selector Terms are __required__ and are used to configure AMIs for Karpenter to use. AMIs are discovered through alias, id, owner, name, and [tags](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html). -This selection logic is modeled as terms, where each term contains multiple conditions that must all be satisfied for the selector to match. Effectively, all requirements within a single term are ANDed together. It's possible that you may want to select on two different AMIs that have unrelated requirements. In this case, you can specify multiple terms which will be ORed together to form your selection logic. The example below shows how this selection logic is fulfilled. +This selection logic is modeled as terms, where each term contains multiple conditions that must all be satisfied for the selector to match. +Effectively, all requirements within a single term are ANDed together. +It's possible that you may want to select on two different AMIs that have unrelated requirements. +In this case, you can specify multiple terms which will be ORed together to form your selection logic. +The example below shows how this selection logic is fulfilled. ```yaml amiSelectorTerms: - # Select on any AMI that has an al2023 AMI family and 20240625 version, - # and both the "karpenter.sh/discovery: ${CLUSTER_NAME}" tag - # AND the "environment: test" tag OR any AMI with the "my-ami" name - # OR any AMI with ID "ami-123" - - alias: al2023@v20240625 + # Select on any AMI that has both the `karpenter.sh/discovery: ${CLUSTER_NAME}` + # AND `environment: test` tags OR any AMI with the name `my-ami` OR an AMI with + # ID `ami-123` - tags: karpenter.sh/discovery: "${CLUSTER_NAME}" environment: test - name: my-ami - id: ami-123 + # Select EKS optimized AL2023 AMIs with version `v20240807`. This term is mutually + # exclusive and can't be specified with other terms. + # - alias: al2023@v20240807 ``` -An `alias` has the following format: `family@version`. -Use the `alias` field to select an EKS-optimized AMI family and version. Family can be one of the following values: +An `alias` term can be used to select EKS-optimized AMIs. An `alias` is formatted as `family@version`. Family can be one of the following values: * `al2` * `al2023` @@ -686,17 +723,15 @@ Use the `alias` field to select an EKS-optimized AMI family and version. Family The version string can be set to `latest`, or pinned to a specific AMI using the format of that AMI's GitHub release tags. For example, AL2 and AL2023 use dates for their release, so they can be pinned as follows: -``` +```yaml alias: al2023@v20240703 ``` Bottlerocket uses a semantic version for their releases. You can pin bottlerocket as follows: -``` +```yaml alias: bottlerocket@v1.20.4 ``` The Windows family does not support pinning, so only `latest` is supported. -An `alias` is mutually exclusive and may not be specified with any other terms. - To select an AMI by name, use the `name` field in the selector term. To select an AMI by id, use the `id` field in the selector term. To select AMIs that are not owned by `amazon` or the account that Karpenter is running in, use the `owner` field - you can use a combination of account aliases (e.g. `self` `amazon`, `your-aws-account-name`) and account IDs. If owner is not set for `name`, it defaults to `self,amazon`, preventing Karpenter from inadvertently selecting an AMI that is owned by a different account. Tags don't require an owner as tags can only be discovered by the user who created them. @@ -709,7 +744,7 @@ AMIs may be specified by any AWS tag, including `Name`. Selecting by tag or by n If `amiSelectorTerms` match more than one AMI, Karpenter will automatically determine which AMI best fits the workloads on the launched worker node under the following constraints: * When launching nodes, Karpenter automatically determines which architecture a custom AMI is compatible with and will use images that match an instanceType's requirements. - * Note that Karpenter **cannot** detect any requirement other than architecture. If you need to specify different AMIs for different kind of nodes (e.g. accelerated GPU AMIs), you should use a separate `EC2NodeClass`. + * Unless using an alias, Karpenter **cannot** detect requirements other than architecture. If you need to specify different AMIs for different kind of nodes (e.g. accelerated GPU AMIs), you should use a separate `EC2NodeClass`. * If multiple AMIs are found that can be used, Karpenter will choose the latest one. * If no AMIs are found that can be used, then no nodes will be provisioned. {{% /alert %}} @@ -719,7 +754,7 @@ If `amiSelectorTerms` match more than one AMI, Karpenter will automatically dete Select by AMI family and version: ```yaml amiSelectorTerms: - - alias: al2023@v20240625 + - alias: al2023@v20240807 ``` Select all with a specified tag: @@ -1053,8 +1088,8 @@ exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1 --//-- ``` -{{% alert title="Note" color="primary" %}} -You can also set kubelet-config properties by modifying the kubelet-config.json file before the EKS bootstrap script starts the kubelet: +{{% alert title="Tip" color="secondary" %}} +You can set additional kubelet configuration properties, unavailable through `spec.kubelet`, by updating the `kubelet-config.json` file: ```yaml apiVersion: karpenter.k8s.aws/v1 @@ -1062,7 +1097,6 @@ kind: EC2NodeClass metadata: name: kubelet-config-example spec: - ... amiFamily: AL2 userData: | #!/bin/bash @@ -1325,6 +1359,9 @@ spec: ### Custom * No merging is performed, your UserData must perform all setup required of the node to allow it to join the cluster. +* Custom UserData must meet the following requirements to work correctly with Karpenter: + * It must ensure the node is registered with the `karpenter.sh/unregistered:NoExecute` taint (via kubelet configuration field `registerWithTaints`) + * It must set kubelet config options to match those configured in `spec.kubelet` ## spec.detailedMonitoring @@ -1398,11 +1435,12 @@ status: #### Examples -Default AMIs resolved from the AL2 AMIFamily: +AMIs resolved with an AL2 alias: ```yaml spec: - amiFamily: AL2 + amiSelectorTerms: + - alias: al2@v20240807 status: amis: - id: ami-03c3a3dcda64f5b75 @@ -1447,11 +1485,10 @@ status: operator: DoesNotExist ``` -AMIs resolved from [`spec.amiSelectorTerms`]({{< ref "#specamiselectorterms" >}}): +AMIs resolved from tags: ```yaml spec: - amiFamily: AL2 amiSelectorTerms: - tags: karpenter.sh/discovery: "${CLUSTER_NAME}" diff --git a/website/content/en/preview/contributing/documentation-updates.md b/website/content/en/preview/contributing/documentation-updates.md index daa6516e30b1..0eb9db1e10b3 100644 --- a/website/content/en/preview/contributing/documentation-updates.md +++ b/website/content/en/preview/contributing/documentation-updates.md @@ -3,7 +3,7 @@ title: "Documentation Updates" linkTitle: "Documentation Updates" weight: 50 description: > - Infomration helpful for contributing simple documentation updates. + Information helpful for contributing simple documentation updates. --- - Documentation for https://karpenter.sh/docs/ is built under website/content/en/preview/. diff --git a/website/content/en/preview/getting-started/_index.md b/website/content/en/preview/getting-started/_index.md index cebdf66a99fd..27ecde8faf92 100644 --- a/website/content/en/preview/getting-started/_index.md +++ b/website/content/en/preview/getting-started/_index.md @@ -8,6 +8,9 @@ description: > To get started with Karpenter, the [Getting Started with Karpenter]({{< relref "getting-started-with-karpenter" >}}) guide provides an end-to-end procedure for creating a cluster (with `eksctl`) and adding Karpenter. + +See the [AKS Node autoprovisioning article](https://learn.microsoft.com/azure/aks/node-autoprovision) on how to use Karpenter on Azure's AKS or go to the [Karpenter provider for Azure open source repository](https://github.com/Azure/karpenter-provider-azure) for self-hosting on Azure and additional information. + If you prefer, the following instructions use Terraform to create a cluster and add Karpenter: * [Amazon EKS Blueprints for Terraform](https://aws-ia.github.io/terraform-aws-eks-blueprints): Follow a basic [Getting Started](https://aws-ia.github.io/terraform-aws-eks-blueprints/getting-started/) guide and also add modules and add-ons. This includes a [Karpenter](https://aws-ia.github.io/terraform-aws-eks-blueprints/patterns/karpenter/) add-on that lets you bypass the instructions in this guide for setting up Karpenter. diff --git a/website/content/en/preview/getting-started/getting-started-with-karpenter/_index.md b/website/content/en/preview/getting-started/getting-started-with-karpenter/_index.md index e2b943108a1b..4bde420fb1ae 100644 --- a/website/content/en/preview/getting-started/getting-started-with-karpenter/_index.md +++ b/website/content/en/preview/getting-started/getting-started-with-karpenter/_index.md @@ -11,9 +11,10 @@ Karpenter automatically provisions new nodes in response to unschedulable pods. This guide shows how to get started with Karpenter by creating a Kubernetes cluster and installing Karpenter. To use Karpenter, you must be running a supported Kubernetes cluster on a supported cloud provider. -Currently, the following Cloud Providers are supported: -- [AWS](https://github.com/aws/karpenter-provider-aws) -- [Azure](https://github.com/Azure/karpenter-provider-azure) + +The guide below explains how to utilize the [Karpenter provider for AWS](https://github.com/aws/karpenter-provider-aws) with EKS. + +See the [AKS Node autoprovisioning article](https://learn.microsoft.com/azure/aks/node-autoprovision) on how to use Karpenter on Azure's AKS or go to the [Karpenter provider for Azure open source repository](https://github.com/Azure/karpenter-provider-azure) for self-hosting on Azure and additional information. ## Create a cluster and add Karpenter @@ -124,7 +125,7 @@ cosign verify public.ecr.aws/karpenter/karpenter:{{< param "latest_release_versi ``` {{% alert title="DNS Policy Notice" color="warning" %}} -Karpenter uses the `ClusterFirst` pod DNS policy by default. This is the Kubernetes cluster default and this ensures that Karpetner can reach-out to internal Kubernetes services during its lifetime. There may be cases where you do not have the DNS service that you are using on your cluster up-and-running before Karpenter starts up. The most common case of this is you want Karpenter to manage the node capacity where your DNS service pods are running. +Karpenter uses the `ClusterFirst` pod DNS policy by default. This is the Kubernetes cluster default and this ensures that Karpenter can reach-out to internal Kubernetes services during its lifetime. There may be cases where you do not have the DNS service that you are using on your cluster up-and-running before Karpenter starts up. The most common case of this is you want Karpenter to manage the node capacity where your DNS service pods are running. If you need Karpenter to manage the DNS service pods' capacity, this means that DNS won't be running when Karpenter starts-up. In this case, you will need to set the pod DNS policy to `Default` with `--set dnsPolicy=Default`. This will tell Karpenter to use the host's DNS resolution instead of the internal DNS resolution, ensuring that you don't have a dependency on the DNS service pods to run. More details on this issue can be found in the following Github issues: [#2186](https://github.com/aws/karpenter-provider-aws/issues/2186) and [#4947](https://github.com/aws/karpenter-provider-aws/issues/4947). {{% /alert %}} diff --git a/website/content/en/preview/getting-started/getting-started-with-karpenter/scripts/step12-add-nodepool.sh b/website/content/en/preview/getting-started/getting-started-with-karpenter/scripts/step12-add-nodepool.sh index f3625e936810..85213a3457c3 100755 --- a/website/content/en/preview/getting-started/getting-started-with-karpenter/scripts/step12-add-nodepool.sh +++ b/website/content/en/preview/getting-started/getting-started-with-karpenter/scripts/step12-add-nodepool.sh @@ -23,14 +23,15 @@ spec: operator: Gt values: ["2"] nodeClassRef: - apiVersion: karpenter.k8s.aws/v1 + group: karpenter.k8s.aws kind: EC2NodeClass name: default + expireAfter: 720h # 30 * 24h = 720h limits: cpu: 1000 disruption: consolidationPolicy: WhenEmptyOrUnderutilized - expireAfter: 720h # 30 * 24h = 720h + consolidateAfter: 1m --- apiVersion: karpenter.k8s.aws/v1 kind: EC2NodeClass diff --git a/website/content/en/preview/getting-started/migrating-from-cas/scripts/step10-create-nodepool.sh b/website/content/en/preview/getting-started/migrating-from-cas/scripts/step10-create-nodepool.sh index f3625e936810..85213a3457c3 100644 --- a/website/content/en/preview/getting-started/migrating-from-cas/scripts/step10-create-nodepool.sh +++ b/website/content/en/preview/getting-started/migrating-from-cas/scripts/step10-create-nodepool.sh @@ -23,14 +23,15 @@ spec: operator: Gt values: ["2"] nodeClassRef: - apiVersion: karpenter.k8s.aws/v1 + group: karpenter.k8s.aws kind: EC2NodeClass name: default + expireAfter: 720h # 30 * 24h = 720h limits: cpu: 1000 disruption: consolidationPolicy: WhenEmptyOrUnderutilized - expireAfter: 720h # 30 * 24h = 720h + consolidateAfter: 1m --- apiVersion: karpenter.k8s.aws/v1 kind: EC2NodeClass diff --git a/website/content/en/preview/reference/instance-types.md b/website/content/en/preview/reference/instance-types.md index 2a00344fba04..29dd6b22a6e4 100644 --- a/website/content/en/preview/reference/instance-types.md +++ b/website/content/en/preview/reference/instance-types.md @@ -3874,151 +3874,6 @@ below are the resources available with some assumptions and after the instance o |pods|737| |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| -## c7g-flex Family -### `c7g-flex.medium` -#### Labels - | Label | Value | - |--|--| - |karpenter.k8s.aws/instance-category|c| - |karpenter.k8s.aws/instance-cpu|1| - |karpenter.k8s.aws/instance-cpu-manufacturer|aws| - |karpenter.k8s.aws/instance-ebs-bandwidth|10000| - |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| - |karpenter.k8s.aws/instance-family|c7g-flex| - |karpenter.k8s.aws/instance-generation|7| - |karpenter.k8s.aws/instance-hypervisor|nitro| - |karpenter.k8s.aws/instance-memory|2048| - |karpenter.k8s.aws/instance-size|medium| - |kubernetes.io/arch|arm64| - |kubernetes.io/os|linux| - |node.kubernetes.io/instance-type|c7g-flex.medium| -#### Resources - | Resource | Quantity | - |--|--| - |cpu|940m| - |ephemeral-storage|17Gi| - |memory|1392Mi| - |pods|8| -### `c7g-flex.large` -#### Labels - | Label | Value | - |--|--| - |karpenter.k8s.aws/instance-category|c| - |karpenter.k8s.aws/instance-cpu|2| - |karpenter.k8s.aws/instance-cpu-manufacturer|aws| - |karpenter.k8s.aws/instance-ebs-bandwidth|10000| - |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| - |karpenter.k8s.aws/instance-family|c7g-flex| - |karpenter.k8s.aws/instance-generation|7| - |karpenter.k8s.aws/instance-hypervisor|nitro| - |karpenter.k8s.aws/instance-memory|4096| - |karpenter.k8s.aws/instance-size|large| - |kubernetes.io/arch|arm64| - |kubernetes.io/os|linux| - |node.kubernetes.io/instance-type|c7g-flex.large| -#### Resources - | Resource | Quantity | - |--|--| - |cpu|1930m| - |ephemeral-storage|17Gi| - |memory|3055Mi| - |pods|29| -### `c7g-flex.xlarge` -#### Labels - | Label | Value | - |--|--| - |karpenter.k8s.aws/instance-category|c| - |karpenter.k8s.aws/instance-cpu|4| - |karpenter.k8s.aws/instance-cpu-manufacturer|aws| - |karpenter.k8s.aws/instance-ebs-bandwidth|10000| - |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| - |karpenter.k8s.aws/instance-family|c7g-flex| - |karpenter.k8s.aws/instance-generation|7| - |karpenter.k8s.aws/instance-hypervisor|nitro| - |karpenter.k8s.aws/instance-memory|8192| - |karpenter.k8s.aws/instance-size|xlarge| - |kubernetes.io/arch|arm64| - |kubernetes.io/os|linux| - |node.kubernetes.io/instance-type|c7g-flex.xlarge| -#### Resources - | Resource | Quantity | - |--|--| - |cpu|3920m| - |ephemeral-storage|17Gi| - |memory|6525Mi| - |pods|58| -### `c7g-flex.2xlarge` -#### Labels - | Label | Value | - |--|--| - |karpenter.k8s.aws/instance-category|c| - |karpenter.k8s.aws/instance-cpu|8| - |karpenter.k8s.aws/instance-cpu-manufacturer|aws| - |karpenter.k8s.aws/instance-ebs-bandwidth|10000| - |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| - |karpenter.k8s.aws/instance-family|c7g-flex| - |karpenter.k8s.aws/instance-generation|7| - |karpenter.k8s.aws/instance-hypervisor|nitro| - |karpenter.k8s.aws/instance-memory|16384| - |karpenter.k8s.aws/instance-size|2xlarge| - |kubernetes.io/arch|arm64| - |kubernetes.io/os|linux| - |node.kubernetes.io/instance-type|c7g-flex.2xlarge| -#### Resources - | Resource | Quantity | - |--|--| - |cpu|7910m| - |ephemeral-storage|17Gi| - |memory|14103Mi| - |pods|58| -### `c7g-flex.4xlarge` -#### Labels - | Label | Value | - |--|--| - |karpenter.k8s.aws/instance-category|c| - |karpenter.k8s.aws/instance-cpu|16| - |karpenter.k8s.aws/instance-cpu-manufacturer|aws| - |karpenter.k8s.aws/instance-ebs-bandwidth|10000| - |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| - |karpenter.k8s.aws/instance-family|c7g-flex| - |karpenter.k8s.aws/instance-generation|7| - |karpenter.k8s.aws/instance-hypervisor|nitro| - |karpenter.k8s.aws/instance-memory|32768| - |karpenter.k8s.aws/instance-size|4xlarge| - |kubernetes.io/arch|arm64| - |kubernetes.io/os|linux| - |node.kubernetes.io/instance-type|c7g-flex.4xlarge| -#### Resources - | Resource | Quantity | - |--|--| - |cpu|15890m| - |ephemeral-storage|17Gi| - |memory|27322Mi| - |pods|234| -### `c7g-flex.8xlarge` -#### Labels - | Label | Value | - |--|--| - |karpenter.k8s.aws/instance-category|c| - |karpenter.k8s.aws/instance-cpu|32| - |karpenter.k8s.aws/instance-cpu-manufacturer|aws| - |karpenter.k8s.aws/instance-ebs-bandwidth|10000| - |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| - |karpenter.k8s.aws/instance-family|c7g-flex| - |karpenter.k8s.aws/instance-generation|7| - |karpenter.k8s.aws/instance-hypervisor|nitro| - |karpenter.k8s.aws/instance-memory|65536| - |karpenter.k8s.aws/instance-size|8xlarge| - |kubernetes.io/arch|arm64| - |kubernetes.io/os|linux| - |node.kubernetes.io/instance-type|c7g-flex.8xlarge| -#### Resources - | Resource | Quantity | - |--|--| - |cpu|31850m| - |ephemeral-storage|17Gi| - |memory|57632Mi| - |pods|234| ## c7gd Family ### `c7gd.medium` #### Labels @@ -12614,151 +12469,6 @@ below are the resources available with some assumptions and after the instance o |pods|737| |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| -## m7g-flex Family -### `m7g-flex.medium` -#### Labels - | Label | Value | - |--|--| - |karpenter.k8s.aws/instance-category|m| - |karpenter.k8s.aws/instance-cpu|1| - |karpenter.k8s.aws/instance-cpu-manufacturer|aws| - |karpenter.k8s.aws/instance-ebs-bandwidth|10000| - |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| - |karpenter.k8s.aws/instance-family|m7g-flex| - |karpenter.k8s.aws/instance-generation|7| - |karpenter.k8s.aws/instance-hypervisor|nitro| - |karpenter.k8s.aws/instance-memory|4096| - |karpenter.k8s.aws/instance-size|medium| - |kubernetes.io/arch|arm64| - |kubernetes.io/os|linux| - |node.kubernetes.io/instance-type|m7g-flex.medium| -#### Resources - | Resource | Quantity | - |--|--| - |cpu|940m| - |ephemeral-storage|17Gi| - |memory|3286Mi| - |pods|8| -### `m7g-flex.large` -#### Labels - | Label | Value | - |--|--| - |karpenter.k8s.aws/instance-category|m| - |karpenter.k8s.aws/instance-cpu|2| - |karpenter.k8s.aws/instance-cpu-manufacturer|aws| - |karpenter.k8s.aws/instance-ebs-bandwidth|10000| - |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| - |karpenter.k8s.aws/instance-family|m7g-flex| - |karpenter.k8s.aws/instance-generation|7| - |karpenter.k8s.aws/instance-hypervisor|nitro| - |karpenter.k8s.aws/instance-memory|8192| - |karpenter.k8s.aws/instance-size|large| - |kubernetes.io/arch|arm64| - |kubernetes.io/os|linux| - |node.kubernetes.io/instance-type|m7g-flex.large| -#### Resources - | Resource | Quantity | - |--|--| - |cpu|1930m| - |ephemeral-storage|17Gi| - |memory|6844Mi| - |pods|29| -### `m7g-flex.xlarge` -#### Labels - | Label | Value | - |--|--| - |karpenter.k8s.aws/instance-category|m| - |karpenter.k8s.aws/instance-cpu|4| - |karpenter.k8s.aws/instance-cpu-manufacturer|aws| - |karpenter.k8s.aws/instance-ebs-bandwidth|10000| - |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| - |karpenter.k8s.aws/instance-family|m7g-flex| - |karpenter.k8s.aws/instance-generation|7| - |karpenter.k8s.aws/instance-hypervisor|nitro| - |karpenter.k8s.aws/instance-memory|16384| - |karpenter.k8s.aws/instance-size|xlarge| - |kubernetes.io/arch|arm64| - |kubernetes.io/os|linux| - |node.kubernetes.io/instance-type|m7g-flex.xlarge| -#### Resources - | Resource | Quantity | - |--|--| - |cpu|3920m| - |ephemeral-storage|17Gi| - |memory|14103Mi| - |pods|58| -### `m7g-flex.2xlarge` -#### Labels - | Label | Value | - |--|--| - |karpenter.k8s.aws/instance-category|m| - |karpenter.k8s.aws/instance-cpu|8| - |karpenter.k8s.aws/instance-cpu-manufacturer|aws| - |karpenter.k8s.aws/instance-ebs-bandwidth|10000| - |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| - |karpenter.k8s.aws/instance-family|m7g-flex| - |karpenter.k8s.aws/instance-generation|7| - |karpenter.k8s.aws/instance-hypervisor|nitro| - |karpenter.k8s.aws/instance-memory|32768| - |karpenter.k8s.aws/instance-size|2xlarge| - |kubernetes.io/arch|arm64| - |kubernetes.io/os|linux| - |node.kubernetes.io/instance-type|m7g-flex.2xlarge| -#### Resources - | Resource | Quantity | - |--|--| - |cpu|7910m| - |ephemeral-storage|17Gi| - |memory|29258Mi| - |pods|58| -### `m7g-flex.4xlarge` -#### Labels - | Label | Value | - |--|--| - |karpenter.k8s.aws/instance-category|m| - |karpenter.k8s.aws/instance-cpu|16| - |karpenter.k8s.aws/instance-cpu-manufacturer|aws| - |karpenter.k8s.aws/instance-ebs-bandwidth|10000| - |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| - |karpenter.k8s.aws/instance-family|m7g-flex| - |karpenter.k8s.aws/instance-generation|7| - |karpenter.k8s.aws/instance-hypervisor|nitro| - |karpenter.k8s.aws/instance-memory|65536| - |karpenter.k8s.aws/instance-size|4xlarge| - |kubernetes.io/arch|arm64| - |kubernetes.io/os|linux| - |node.kubernetes.io/instance-type|m7g-flex.4xlarge| -#### Resources - | Resource | Quantity | - |--|--| - |cpu|15890m| - |ephemeral-storage|17Gi| - |memory|57632Mi| - |pods|234| -### `m7g-flex.8xlarge` -#### Labels - | Label | Value | - |--|--| - |karpenter.k8s.aws/instance-category|m| - |karpenter.k8s.aws/instance-cpu|32| - |karpenter.k8s.aws/instance-cpu-manufacturer|aws| - |karpenter.k8s.aws/instance-ebs-bandwidth|10000| - |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| - |karpenter.k8s.aws/instance-family|m7g-flex| - |karpenter.k8s.aws/instance-generation|7| - |karpenter.k8s.aws/instance-hypervisor|nitro| - |karpenter.k8s.aws/instance-memory|131072| - |karpenter.k8s.aws/instance-size|8xlarge| - |kubernetes.io/arch|arm64| - |kubernetes.io/os|linux| - |node.kubernetes.io/instance-type|m7g-flex.8xlarge| -#### Resources - | Resource | Quantity | - |--|--| - |cpu|31850m| - |ephemeral-storage|17Gi| - |memory|118253Mi| - |pods|234| ## m7gd Family ### `m7gd.medium` #### Labels diff --git a/website/content/en/preview/reference/settings.md b/website/content/en/preview/reference/settings.md index 3374e416745a..a84521d42b34 100644 --- a/website/content/en/preview/reference/settings.md +++ b/website/content/en/preview/reference/settings.md @@ -43,11 +43,11 @@ Karpenter surfaces environment variables and CLI parameters to allow you to conf Karpenter uses [feature gates](https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/#feature-gates-for-alpha-or-beta-features) You can enable the feature gates through the `--feature-gates` CLI environment variable or the `FEATURE_GATES` environment variable in the Karpenter deployment. For example, you can configure drift, spotToSpotConsolidation by setting the CLI argument: `--feature-gates Drift=true,SpotToSpotConsolidation=true`. -| Feature | Default | Stage | Since | Until | -|-------------------------|---------|-------|---------|---------| -| Drift | false | Alpha | v0.21.x | v0.32.x | -| Drift | true | Beta | v0.33.x | | -| SpotToSpotConsolidation | false | Beta | v0.34.x | | +| Feature | Default | Stage | Since | Until | +|-------------------------|---------|--------|---------|---------| +| Drift | false | Alpha | v0.21.x | v0.32.x | +| Drift | true | Beta | v0.33.x | | +| SpotToSpotConsolidation | false | Alpha | v0.34.x | | ### Batching Parameters diff --git a/website/content/en/preview/troubleshooting.md b/website/content/en/preview/troubleshooting.md index 2627508f2954..c51d4f341ef5 100644 --- a/website/content/en/preview/troubleshooting.md +++ b/website/content/en/preview/troubleshooting.md @@ -75,12 +75,12 @@ If a long cluster name causes the Karpenter node role name to exceed 64 characte Keep in mind that `KarpenterNodeRole-` is just a recommendation from the getting started guide. Instead of using the eksctl role, you can shorten the name to anything you like, as long as it has the right permissions. -### Unknown field in Provisioner spec +### Unknown field in NodePool or EC2NodeClass spec If you are upgrading from an older version of Karpenter, there may have been changes in the CRD between versions. Attempting to utilize newer functionality which is surfaced in newer versions of the CRD may result in the following error message: ``` -error: error validating "STDIN": error validating data: ValidationError(Provisioner.spec): unknown field "" in sh.karpenter.v1alpha5.Provisioner.spec; if you choose to ignore these errors, turn validation off with --validate=false +Error from server (BadRequest): error when creating "STDIN": NodePool in version "v1" cannot be handled as a NodePool: strict decoding error: unknown field "spec.template.spec.nodeClassRef.foo" ``` If you see this error, you can solve the problem by following the [Custom Resource Definition Upgrade Guidance](../upgrade-guide/#custom-resource-definition-crd-upgrades). @@ -91,11 +91,10 @@ Info on whether there has been a change to the CRD between versions of Karpenter `0.16.0` changed the default replicas from 1 to 2. -Karpenter won't launch capacity to run itself (log related to the `karpenter.sh/provisioner-name DoesNotExist requirement`) +Karpenter won't launch capacity to run itself (log related to the `karpenter.sh/nodepool DoesNotExist requirement`) so it can't provision for the second Karpenter pod. -To solve this you can either reduce the replicas back from 2 to 1, or ensure there is enough capacity that isn't being managed by Karpenter -(these are instances with the name `karpenter.sh/provisioner-name/`) to run both pods. +To solve this you can either reduce the replicas back from 2 to 1, or ensure there is enough capacity that isn't being managed by Karpenter to run both pods. To do so on AWS increase the `minimum` and `desired` parameters on the node group autoscaling group to launch at lease 2 instances. @@ -144,52 +143,6 @@ You can fix this by patching the node objects: kubectl get nodes -ojsonpath='{range .items[*].metadata}{@.name}:{@.finalizers}{"\n"}' | grep "karpenter.sh/termination" | cut -d ':' -f 1 | xargs kubectl patch node --type='json' -p='[{"op": "remove", "path": "/metadata/finalizers"}]' ``` -## Webhooks - -### Failed calling webhook "validation.webhook.provisioners.karpenter.sh" - -If you are not able to create a provisioner due to `Internal error occurred: failed calling webhook "validation.webhook.provisioners.karpenter.sh":` - -Webhooks were renamed in `0.19.0`. There's a bug in ArgoCD's upgrade workflow where webhooks are leaked. This results in Provisioner's failing to be validated, since the validation server no longer corresponds to the webhook definition. - -Delete the stale webhooks. - -```text -kubectl delete mutatingwebhookconfigurations defaulting.webhook.provisioners.karpenter.sh -kubectl delete validatingwebhookconfiguration validation.webhook.provisioners.karpenter.sh -``` - -### Failed calling webhook "defaulting.webhook.karpenter.sh" - -The `defaulting.webhook.karpenter.sh` mutating webhook was removed in `0.27.3`. If you are coming from an older version of Karpenter where this webhook existed and the webhook was not managed by Helm, you may need to delete the stale webhook. - -```text -kubectl delete mutatingwebhookconfigurations defaulting.webhook.karpenter.sh -``` - -If you are not able to create a provisioner due to `Error from server (InternalError): error when creating "provisioner.yaml": Internal error occurred: failed calling webhook "defaulting.webhook.karpenter.sh": Post "https://karpenter-webhook.karpenter.svc:443/default-resource?timeout=10s": context deadline exceeded` - -Verify that the karpenter pod is running (should see 2/2 containers with a "Ready" status) - -```text -kubectl get po -A -l app.kubernetes.io/name=karpenter -NAME READY STATUS RESTARTS AGE -karpenter-7b46fb5c-gcr9z 2/2 Running 0 17h -``` - -Karpenter service has endpoints assigned to it - -```text -kubectl get ep -A -l app.kubernetes.io/name=karpenter -NAMESPACE NAME ENDPOINTS AGE -karpenter karpenter 192.168.39.88:8443,192.168.39.88:8080 16d -``` - -Your security groups are not blocking you from reaching your webhook. - -This is especially relevant if you have used `terraform-eks-module` version `>=18` since that version changed its security -approach, and now it's much more restrictive. - ## Provisioning ### Instances with swap volumes fail to register with control plane @@ -201,7 +154,7 @@ Some instance types (c1.medium and m1.small) are given limited amount of memory ``` ##### Solutions -Disabling swap will allow kubelet to join the cluster successfully, however users should be mindful of performance, and consider adjusting the Provisioner requirements to use larger instance types. +Disabling swap will allow kubelet to join the cluster successfully, however users should be mindful of performance, and consider adjusting the NodePool requirements to use larger instance types. ### DaemonSets can result in deployment failures @@ -209,7 +162,7 @@ For Karpenter versions `0.5.3` and earlier, DaemonSets were not properly conside This sometimes caused nodes to be deployed that could not meet the needs of the requested DaemonSets and workloads. This issue no longer occurs after Karpenter version `0.5.3` (see [PR #1155](https://github.com/aws/karpenter/pull/1155)). -If you are using a pre `0.5.3` version of Karpenter, one workaround is to set your provisioner to only use larger instance types that you know will be big enough for the DaemonSet and the workload. +If you are using a pre `0.5.3` version of Karpenter, one workaround is to set your NodePool to only use larger instance types that you know will be big enough for the DaemonSet and the workload. For more information, see [Issue #1084](https://github.com/aws/karpenter/issues/1084). Examples of this behavior are included in [Issue #1180](https://github.com/aws/karpenter/issues/1180). @@ -224,55 +177,24 @@ This behavior is not unique to Karpenter and can also occur with the standard `k To prevent this, you can set LimitRanges on pod deployments on a per-namespace basis. See the Karpenter [Best Practices Guide](https://aws.github.io/aws-eks-best-practices/karpenter/#use-limitranges-to-configure-defaults-for-resource-requests-and-limits) for further information on the use of LimitRanges. -### Missing subnetSelector and securityGroupSelector tags causes provisioning failures - -Starting with Karpenter `0.5.5`, if you are using Karpenter-generated launch template, provisioners require that [subnetSelector]({{}}) and [securityGroupSelector]({{}}) tags be set to match your cluster. -The [Provisioner]({{}}) section in the Karpenter Getting Started Guide uses the following example: - -```text -kind: AWSNodeTemplate -spec: - subnetSelector: - karpenter.sh/discovery: ${CLUSTER_NAME} - securityGroupSelector: - karpenter.sh/discovery: ${CLUSTER_NAME} -``` - -To check your subnet and security group selectors, type the following: - -```bash -aws ec2 describe-subnets --filters Name=tag:karpenter.sh/discovery,Values=${CLUSTER_NAME} -``` - -*Returns subnets matching the selector* - -```bash -aws ec2 describe-security-groups --filters Name=tag:karpenter.sh/discovery,Values=${CLUSTER_NAME} -``` - -*Returns security groups matching the selector* - -Provisioners created without those tags and run in more recent Karpenter versions will fail with this message when you try to run the provisioner: - -```text - field(s): spec.provider.securityGroupSelector, spec.provider.subnetSelector -``` - ### Pods using Security Groups for Pods stuck in "ContainerCreating" state for up to 30 minutes before transitioning to "Running" -When leveraging [Security Groups for Pods](https://docs.aws.amazon.com/eks/latest/userguide/security-groups-for-pods.html), Karpenter will launch nodes as expected but pods will be stuck in "ContainerCreating" state for up to 30 minutes before transitioning to "Running". This is related to an interaction between Karpenter and the [amazon-vpc-resource-controller](https://github.com/aws/amazon-vpc-resource-controller-k8s) when a pod requests `vpc.amazonaws.com/pod-eni` resources. More info can be found in [issue #1252](https://github.com/aws/karpenter/issues/1252). +When leveraging [Security Groups for Pods](https://docs.aws.amazon.com/eks/latest/userguide/security-groups-for-pods.html), Karpenter will launch nodes as expected but pods will be stuck in "ContainerCreating" state for up to 30 minutes before transitioning to "Running". +This is related to an interaction between Karpenter and the [amazon-vpc-resource-controller](https://github.com/aws/amazon-vpc-resource-controller-k8s) when a pod requests `vpc.amazonaws.com/pod-eni` resources. +More info can be found in [issue #1252](https://github.com/aws/karpenter/issues/1252). -To workaround this problem, add the `vpc.amazonaws.com/has-trunk-attached: "false"` label in your Karpenter Provisioner spec and ensure instance-type requirements include [instance-types which support ENI trunking](https://github.com/aws/amazon-vpc-resource-controller-k8s/blob/master/pkg/aws/vpc/limits.go). +To workaround this problem, add the `vpc.amazonaws.com/has-trunk-attached: "false"` label in your Karpenter NodePool spec and ensure instance-type requirements include [instance-types which support ENI trunking](https://github.com/aws/amazon-vpc-resource-controller-k8s/blob/master/pkg/aws/vpc/limits.go). ```yaml -apiVersion: karpenter.sh/v1alpha5 -kind: Provisioner +apiVersion: karpenter.sh/v1 +kind: NodePool metadata: name: default spec: - labels: - vpc.amazonaws.com/has-trunk-attached: "false" - ttlSecondsAfterEmpty: 30 + template + metadata: + labels: + vpc.amazonaws.com/has-trunk-attached: "false" ``` ### Pods using PVCs can hit volume limits and fail to scale-up @@ -329,7 +251,7 @@ time=2023-06-12T19:18:15Z type=Warning reason=FailedCreatePodSandBox from=kubele By default, the number of pods on a node is limited by both the number of networking interfaces (ENIs) that may be attached to an instance type and the number of IP addresses that can be assigned to each ENI. See [IP addresses per network interface per instance type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#AvailableIpPerENI) for a more detailed information on these instance types' limits. -If the max-pods (configured through your Provisioner [`kubeletConfiguration`]({{}})) is greater than the number of supported IPs for a given instance type, the CNI will fail to assign an IP to the pod and your pod will be left in a `ContainerCreating` state. +If the max-pods (configured through your EC2NodeClass [`kubeletConfiguration`]({{}})) is greater than the number of supported IPs for a given instance type, the CNI will fail to assign an IP to the pod and your pod will be left in a `ContainerCreating` state. If you've enabled [Security Groups per Pod](https://aws.github.io/aws-eks-best-practices/networking/sgpp/), one of the instance's ENIs is reserved as the trunk interface and uses branch interfaces off of that trunk interface to assign different security groups. If you do not have any `SecurityGroupPolicies` configured for your pods, they will be unable to utilize branch interfaces attached to the trunk interface, and IPs will only be available from the non-trunk ENIs. @@ -341,19 +263,19 @@ Note that Karpenter is not aware if [Security Groups per Pod](https://aws.github To avoid this discrepancy between `maxPods` and the supported pod density of the EC2 instance based on ENIs and allocatable IPs, you can perform one of the following actions on your cluster: 1. Enable [Prefix Delegation](https://www.eksworkshop.com/docs/networking/prefix/) to increase the number of allocatable IPs for the ENIs on each instance type -2. Reduce your `maxPods` value to be under the maximum pod density for the instance types assigned to your Provisioner -3. Remove the `maxPods` value from your [`kubeletConfiguration`]({{}}) if you no longer need it and instead rely on the defaulted values from Karpenter and EKS AMIs. +2. Reduce your `maxPods` value to be under the maximum pod density for the instance types assigned to your NodePods +3. Remove the `maxPods` value from your [`kubeletConfiguration`]({{}}) if you no longer need it and instead rely on the defaulted values from Karpenter and EKS AMIs. -For more information on pod density, view the [Pod Density Section in the NodePools doc]({{}}). +For more information on pod density, view the [Pod Density Section in the NodePools doc]({{}}). #### IP exhaustion in a subnet -When a node is launched by Karpenter, it is assigned to a subnet within your VPC based on the [`subnetSelector`]({{}}) value in your [`AWSNodeTemplate`]({{}})). When a subnet becomes IP address constrained, EC2 may think that it can successfully launch an instance in the subnet; however, when the CNI tries to assign IPs to the pods, there are none remaining. In this case, your pod will stay in a `ContainerCreating` state until an IP address is freed in the subnet and the CNI can assign one to the pod. +When a node is launched by Karpenter, it is assigned to a subnet within your VPC based on the [`subnetSelector`]({{}}) value in your [`EC2NodeClass`]({{}})). When a subnet becomes IP address constrained, EC2 may think that it can successfully launch an instance in the subnet; however, when the CNI tries to assign IPs to the pods, there are none remaining. In this case, your pod will stay in a `ContainerCreating` state until an IP address is freed in the subnet and the CNI can assign one to the pod. ##### Solutions 1. Use `topologySpreadConstraints` on `topology.kubernetes.io/zone` to spread your pods and nodes more evenly across zones -2. Increase the IP address space (CIDR) for the subnets selected by your `AWSNodeTemplate` +2. Increase the IP address space (CIDR) for the subnets selected by your `EC2NodeClass` 3. Use [custom networking](https://www.eksworkshop.com/docs/networking/custom-networking/) to assign separate IP address spaces to your pods and your nodes 4. [Run your EKS cluster on IPv6](https://aws.github.io/aws-eks-best-practices/networking/ipv6/) (Note: IPv6 clusters have some known limitations which should be well-understood before choosing to use one) @@ -479,7 +401,7 @@ Karpenter determines node initialization using three factors: 1. Node readiness 2. Expected resources are registered -3. Provisioner startup taints are removed +3. NodePool startup taints are removed #### Node Readiness @@ -496,9 +418,9 @@ Common resources that don't register and leave nodes in a non-initialized state: 1. `nvidia.com/gpu` (or any gpu-based resource): A GPU instance type that supports the `nvidia.com/gpu` resource is launched but the daemon/daemonset to register the resource on the node doesn't exist 2. `vpc.amazonaws.com/pod-eni`: An instance type is launched by the `ENABLE_POD_ENI` value is set to `false` in the `vpc-cni` plugin. Karpenter will expect that the `vpc.amazonaws.com/pod-eni` will be registered, but it never will. -#### Provisioner startup taints are removed +#### NodePool startup taints are removed -Karpenter expects all startup taints specified in `.spec.startupTaints` of the provisioner to be completely removed from node `.spec.taints` before it will consider the node initialized. +Karpenter expects all startup taints specified in `.spec.template.spec.startupTaints` of the NodePool to be completely removed from node `.spec.taints` before it will consider the node initialized. ### Node NotReady @@ -513,7 +435,7 @@ The easiest way to start debugging is to connect to the instance and get the Kub ```bash # List the nodes managed by Karpenter -kubectl get node -l karpenter.sh/provisioner-name +kubectl get node -l karpenter.sh/nodepool # Extract the instance ID (replace with a node name from the above listing) INSTANCE_ID=$(kubectl get node -ojson | jq -r ".spec.providerID" | cut -d \/ -f5) # Connect to the instance @@ -526,7 +448,7 @@ For Bottlerocket, you'll need to get access to the root filesystem: ```bash # List the nodes managed by Karpenter -kubectl get node -l karpenter.sh/provisioner-name +kubectl get node -l karpenter.sh/nodepool # Extract the instance ID (replace with a node name from the above listing) INSTANCE_ID=$(kubectl get node -ojson | jq -r ".spec.providerID" | cut -d \/ -f5) # Connect to the instance @@ -613,7 +535,7 @@ This means that your CNI plugin is out of date. You can find instructions on how ### Node terminates before ready on failed encrypted EBS volume If you are using a custom launch template and an encrypted EBS volume, the IAM principal launching the node may not have sufficient permissions to use the KMS customer managed key (CMK) for the EC2 EBS root volume. -This issue also applies to [Block Device Mappings]({{}}) specified in the Provisioner. +This issue also applies to [Block Device Mappings]({{}}) specified in the EC2NodeClass. In either case, this results in the node terminating almost immediately upon creation. Keep in mind that it is possible that EBS Encryption can be enabled without your knowledge. diff --git a/website/content/en/preview/upgrading/compatibility.md b/website/content/en/preview/upgrading/compatibility.md index 2ccb7d2c9d1e..01d6b0541c37 100644 --- a/website/content/en/preview/upgrading/compatibility.md +++ b/website/content/en/preview/upgrading/compatibility.md @@ -15,9 +15,9 @@ Before you begin upgrading Karpenter, consider Karpenter compatibility issues re [comment]: <> (the content below is generated from hack/docs/compataiblitymetrix_gen_docs.go) -| KUBERNETES | 1.24 | 1.25 | 1.26 | 1.27 | 1.28 | 1.29 | 1.30 | -|------------|----------|----------|----------|----------|----------|----------|--------| -| karpenter | \>= 0.21 | \>= 0.25 | \>= 0.28 | \>= 0.28 | \>= 0.31 | \>= 0.34 | 0.37.0 | +| KUBERNETES | 1.24 | 1.25 | 1.26 | 1.27 | 1.28 | 1.29 | 1.30 | +|------------|---------------------|----------|----------|----------|----------|----------|------------| +| karpenter | \>= 0.21 \<= 0.37 | \>= 0.25 | \>= 0.28 | \>= 0.28 | \>= 0.31 | \>= 0.34 | \>= 0.37 | [comment]: <> (end docs generated content from hack/docs/compataiblitymetrix_gen_docs.go) diff --git a/website/content/en/preview/upgrading/upgrade-guide.md b/website/content/en/preview/upgrading/upgrade-guide.md index 8fbc20aba9c3..a4dcaf3ad1c1 100644 --- a/website/content/en/preview/upgrading/upgrade-guide.md +++ b/website/content/en/preview/upgrading/upgrade-guide.md @@ -24,7 +24,7 @@ Karpenter ships with a few Custom Resource Definitions (CRDs). These CRDs are pu ``` {{% alert title="Note" color="warning" %}} -If you get the error `invalid ownership metadata; label validation error:` while installing the `karpenter-crd` chart from an older version of Karpenter, follow the [Troubleshooting Guide]({{}}) for details on how to resolve these errors. +If you get the error `invalid ownership metadata; label validation error:` while installing the `karpenter-crd` chart from an older version of Karpenter, follow the [Troubleshooting Guide]({{}}) for details on how to resolve these errors. {{% /alert %}} * As part of the helm chart [karpenter](https://gallery.ecr.aws/karpenter/karpenter) - [source](https://github.com/aws/karpenter/blob/main/charts/karpenter/crds). Helm [does not manage the lifecycle of CRDs using this method](https://helm.sh/docs/chart_best_practices/custom_resource_definitions/), the tool will only install the CRD during the first installation of the Helm chart. Subsequent chart upgrades will not add or remove CRDs, even if the CRDs have changed. When CRDs are changed, we will make a note in the version's upgrade guide. @@ -53,6 +53,7 @@ Below is the full changelog for v1, copied from the [v1 Migration Upgrade Proced * Expiration is now forceful and begins draining as soon as it’s expired. Karpenter does not wait for replacement capacity to be available before draining, but will start provisioning a replacement as soon as the node is expired and begins draining. * Karpenter's generated NodeConfig now takes precedence when generating UserData with the AL2023 `amiFamily`. If you're setting any values managed by Karpenter in your AL2023 UserData, configure these through Karpenter natively (e.g. kubelet configuration fields). * Karpenter now adds a `karpenter.sh/unregistered:NoExecute` taint to nodes in injected UserData when using alias in AMISelectorTerms or non-Custom AMIFamily. When using `amiFamily: Custom`, users will need to add this taint into their UserData, where Karpenter will automatically remove it when provisioning nodes. + * Karpenter now waits for underlying instances to be completely terminated before removing the associated nodes. This means it may take longer for nodes to be deleted and for nodeclaims to get cleaned up. * API Moves: * ExpireAfter has moved from the `NodePool.Spec.Disruption` block to `NodePool.Spec.Template.Spec`, and is now a drift-able field. * `Kubelet` was moved to the EC2NodeClass from the NodePool. diff --git a/website/content/en/preview/upgrading/v1-migration.md b/website/content/en/preview/upgrading/v1-migration.md index b18336cabacf..69930b2ea9c1 100644 --- a/website/content/en/preview/upgrading/v1-migration.md +++ b/website/content/en/preview/upgrading/v1-migration.md @@ -17,6 +17,7 @@ Before you begin upgrading to `v1.0.0`, you should know that: * Version `v1.0.0` adds [conversion webhooks](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/#webhook-conversion) to automatically pull the v1 API version of previously applied v1beta1 NodePools, EC2NodeClasses, and NodeClaims. Karpenter will stop serving the v1beta1 API version at v1.1.0 and will drop the conversion webhooks at that time. You will need to migrate all stored manifests to v1 API versions on Karpenter v1.0+. Keep in mind that this is a conversion and not dual support, which means that resources are updated in-place rather than migrated over from the previous version. * If you need to rollback the upgrade to v1, you need to upgrade to a special patch version of the minor version you came from. For instance, if you came from v0.33.5, you'll need to downgrade back to v0.33.6. More details on how to do this in [Downgrading]({{}}). * Validate that you are running at least Kubernetes 1.25. Use the [compatibility matrix]({{}}) to confirm you are on a supported Kubernetes version. +* Karpenter runs a helm post-install-hook as part of upgrading to and from v1.0.0. If you're running Karpenter on a non x86_64 node, you'll need to update your `values.postInstallHook.image` values in your helm `values.yaml` file to point to a compatible image with kubectl. For instance, [an ARM compatible version](https://hub.docker.com/layers/bitnami/kubectl/1.30/images/sha256-d63c6609dd5c336fd036bd303fd4ce5f272e73ddd1923d32c12d62b7149067ed?context=explore). See the [Changelog]({{}}) for details about actions you should take before upgrading to v1.0 or v1.1. @@ -25,7 +26,7 @@ See the [Changelog]({{}}) for details about actions you shoul Please read through the entire procedure before beginning the upgrade. There are major changes in this upgrade, so please evaluate the list of breaking changes before continuing. {{% alert title="Note" color="warning" %}} -The upgrade guide will first require upgrading to your latest patch version prior to upgrade to v1.0.0. This will be to allow the conversion webhooks to operate and minimize downtime of the Karpenter controller when requesting the Karpenter custom resources. +The upgrade guide will first require upgrading to your latest patch version prior to upgrade to v1.0.0. This will be to allow the conversion webhooks to operate and minimize downtime of the Karpenter controller when requesting the Karpenter custom resources. {{% /alert %}} 1. Set environment variables for your cluster to upgrade to the latest patch version of the current Karpenter version you're running on: @@ -60,7 +61,7 @@ The upgrade guide will first require upgrading to your latest patch version prio 3. Review for breaking changes between v0.33 and v0.37: If you are already running Karpenter v0.37.x, you can skip this step. If you are running an earlier Karpenter version, you need to review the [Upgrade Guide]({{}}) for each minor release. -4. Set environment variables for upgrading to the latest patch version: +4. Set environment variables for upgrading to the latest patch version. Note that `v0.33.6` and `v0.34.7` both need to include the v prefix, whereas `v0.35+` should not. ```bash export KARPENTER_VERSION= @@ -80,7 +81,7 @@ The upgrade guide will first require upgrading to your latest patch version prio 7. Upgrade Karpenter to the latest patch version of your current minor version's. At the end of this step, conversion webhooks will run but will not convert any version. ```bash - # Service account annotation can be dropped when using pod identity + # Service account annotation can be dropped when using pod identity helm upgrade --install karpenter oci://public.ecr.aws/karpenter/karpenter --version ${KARPENTER_VERSION} --namespace "${KARPENTER_NAMESPACE}" --create-namespace \ --set serviceAccount.annotations."eks\.amazonaws\.com/role-arn"=${KARPENTER_IAM_ROLE_ARN} \ --set settings.clusterName=${CLUSTER_NAME} \ @@ -106,7 +107,7 @@ The upgrade guide will first require upgrading to your latest patch version prio ```bash TEMPOUT=$(mktemp) - curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/v"${KARPENTER_VERSION}"/website/content/en/docs/getting-started/getting-started-with-karpenter/cloudformation.yaml > ${TEMPOUT} \ + curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/v"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml > ${TEMPOUT} \ && aws cloudformation deploy \ --stack-name "Karpenter-${CLUSTER_NAME}" \ --template-file "${TEMPOUT}" \ @@ -127,7 +128,7 @@ The upgrade guide will first require upgrading to your latest patch version prio 11. Upgrade Karpenter to the new version. At the end of this step, conversion webhooks run to convert the Karpenter CRDs to v1. ```bash - # Service account annotion can be dropped when using pod identity + # Service account annotion can be dropped when using pod identity helm upgrade --install karpenter oci://public.ecr.aws/karpenter/karpenter --version ${KARPENTER_VERSION} --namespace "${KARPENTER_NAMESPACE}" --create-namespace \ --set serviceAccount.annotations."eks\.amazonaws\.com/role-arn"=${KARPENTER_IAM_ROLE_ARN} \ --set settings.clusterName=${CLUSTER_NAME} \ @@ -251,7 +252,7 @@ Keep in mind that rollback, without replacing the Karpenter nodes, will not be s ### Downgrading -Once the Karpenter CRDs are upgraded to v1, conversion webhooks are needed to help convert APIs that are stored in etcd from v1 to v1beta1. Also changes to the CRDs will need to at least include the latest version of the CRD in this case being v1. The patch versions of the v1beta1 Karpenter controller that include the conversion wehooks include: +Once the Karpenter CRDs are upgraded to v1, conversion webhooks are needed to help convert APIs that are stored in etcd from v1 to v1beta1. Also changes to the CRDs will need to at least include the latest version of the CRD in this case being v1. The patch versions of the v1beta1 Karpenter controller that include the conversion wehooks include: * v0.37.1 * v0.36.3 @@ -271,6 +272,7 @@ Since both v1beta1 and v1 will be served, `kubectl` will default to returning th ```bash export KARPENTER_NAMESPACE="kube-system" +# Note: v0.33.6 and v0.34.7 include the v prefix, omit it for versions v0.35+ export KARPENTER_VERSION="" export KARPENTER_IAM_ROLE_ARN="arn:${AWS_PARTITION}:iam::${AWS_ACCOUNT_ID}:role/${CLUSTER_NAME}-karpenter" export CLUSTER_NAME="" @@ -289,8 +291,19 @@ echo "${KARPENTER_NAMESPACE}" "${KARPENTER_VERSION}" "${CLUSTER_NAME}" "${TEMPOU 2. Rollback the Karpenter Policy +**v0.33.6 and v0.34.7:** ```bash -curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/v"${KARPENTER_VERSION}"/website/content/en/docs/getting-started/getting-started-with-karpenter/cloudformation.yaml > ${TEMPOUT} \ +curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml > ${TEMPOUT} \ + && aws cloudformation deploy \ + --stack-name "Karpenter-${CLUSTER_NAME}" \ + --template-file "${TEMPOUT}" \ + --capabilities CAPABILITY_NAMED_IAM \ + --parameter-overrides "ClusterName=${CLUSTER_NAME}" +``` + +**v0.35+:** +```bash +curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/v"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml > ${TEMPOUT} \ && aws cloudformation deploy \ --stack-name "Karpenter-${CLUSTER_NAME}" \ --template-file "${TEMPOUT}" \ @@ -308,10 +321,10 @@ helm upgrade --install karpenter-crd oci://public.ecr.aws/karpenter/karpenter-cr --set webhook.port=8443 ``` -4. Rollback the Karpenter Controller +4. Rollback the Karpenter Controller ```bash -# Service account annotation can be dropped when using pod identity +# Service account annotation can be dropped when using pod identity helm upgrade --install karpenter oci://public.ecr.aws/karpenter/karpenter --version ${KARPENTER_VERSION} --namespace "${KARPENTER_NAMESPACE}" --create-namespace \ --set serviceAccount.annotations."eks\.amazonaws\.com/role-arn"=${KARPENTER_IAM_ROLE_ARN} \ --set settings.clusterName=${CLUSTER_NAME} \ @@ -338,6 +351,8 @@ Karpenter should now be pulling and operating against the v1beta1 APIVersion as * Expiration is now forceful and begins draining as soon as it’s expired. Karpenter does not wait for replacement capacity to be available before draining, but will start provisioning a replacement as soon as the node is expired and begins draining. * Karpenter's generated NodeConfig now takes precedence when generating UserData with the AL2023 `amiFamily`. If you're setting any values managed by Karpenter in your AL2023 UserData, configure these through Karpenter natively (e.g. kubelet configuration fields). * Karpenter now adds a `karpenter.sh/unregistered:NoExecute` taint to nodes in injected UserData when using alias in AMISelectorTerms or non-Custom AMIFamily. When using `amiFamily: Custom`, users will need to add this taint into their UserData, where Karpenter will automatically remove it when provisioning nodes. + * Discovered standard AL2023 AMIs will no longer be considered compatible with GPU / accelerator workloads. If you're using an AL2023 EC2NodeClass (without AMISelectorTerms) for these workloads, you will need to select your AMI via AMISelectorTerms (non-alias). + * Karpenter now waits for underlying instances to be completely terminated before removing the associated nodes. This means it may take longer for nodes to be deleted and for nodeclaims to get cleaned up. * API Moves: * ExpireAfter has moved from the `NodePool.Spec.Disruption` block to `NodePool.Spec.Template.Spec`, and is now a drift-able field. * `Kubelet` was moved to the EC2NodeClass from the NodePool. diff --git a/website/content/en/v0.35/reference/metrics.md b/website/content/en/v0.35/reference/metrics.md deleted file mode 100644 index b7cf1366f50f..000000000000 --- a/website/content/en/v0.35/reference/metrics.md +++ /dev/null @@ -1,193 +0,0 @@ ---- -title: "Metrics" -linkTitle: "Metrics" -weight: 7 - -description: > - Inspect Karpenter Metrics ---- - -Karpenter makes several metrics available in Prometheus format to allow monitoring cluster provisioning status. These metrics are available by default at `karpenter.karpenter.svc.cluster.local:8000/metrics` configurable via the `METRICS_PORT` environment variable documented [here](../settings) -### `karpenter_build_info` -A metric with a constant '1' value labeled by version from which karpenter was built. - -## Nodepool Metrics - -### `karpenter_nodepool_usage` -The nodepool usage is the amount of resources that have been provisioned by a particular nodepool. Labeled by nodepool name and resource type. - -### `karpenter_nodepool_limit` -The nodepool limits are the limits specified on the nodepool that restrict the quantity of resources provisioned. Labeled by nodepool name and resource type. - -## Nodes Metrics - -### `karpenter_nodes_total_pod_requests` -Node total pod requests are the resources requested by non-DaemonSet pods bound to nodes. - -### `karpenter_nodes_total_pod_limits` -Node total pod limits are the resources specified by non-DaemonSet pod limits. - -### `karpenter_nodes_total_daemon_requests` -Node total daemon requests are the resource requested by DaemonSet pods bound to nodes. - -### `karpenter_nodes_total_daemon_limits` -Node total daemon limits are the resources specified by DaemonSet pod limits. - -### `karpenter_nodes_termination_time_seconds` -The time taken between a node's deletion request and the removal of its finalizer - -### `karpenter_nodes_terminated` -Number of nodes terminated in total by Karpenter. Labeled by owning nodepool. - -### `karpenter_nodes_system_overhead` -Node system daemon overhead are the resources reserved for system overhead, the difference between the node's capacity and allocatable values are reported by the status. - -### `karpenter_nodes_leases_deleted` -Number of deleted leaked leases. - -### `karpenter_nodes_created` -Number of nodes created in total by Karpenter. Labeled by owning nodepool. - -### `karpenter_nodes_allocatable` -Node allocatable are the resources allocatable by nodes. - -## Pods Metrics - -### `karpenter_pods_state` -Pod state is the current state of pods. This metric can be used several ways as it is labeled by the pod name, namespace, owner, node, nodepool name, zone, architecture, capacity type, instance type and pod phase. - -### `karpenter_pods_startup_time_seconds` -The time from pod creation until the pod is running. - -## Provisioner Metrics - -### `karpenter_provisioner_scheduling_simulation_duration_seconds` -Duration of scheduling simulations used for deprovisioning and provisioning in seconds. - -### `karpenter_provisioner_scheduling_duration_seconds` -Duration of scheduling process in seconds. - -## Nodeclaims Metrics - -### `karpenter_nodeclaims_terminated` -Number of nodeclaims terminated in total by Karpenter. Labeled by reason the nodeclaim was terminated and the owning nodepool. - -### `karpenter_nodeclaims_registered` -Number of nodeclaims registered in total by Karpenter. Labeled by the owning nodepool. - -### `karpenter_nodeclaims_launched` -Number of nodeclaims launched in total by Karpenter. Labeled by the owning nodepool. - -### `karpenter_nodeclaims_initialized` -Number of nodeclaims initialized in total by Karpenter. Labeled by the owning nodepool. - -### `karpenter_nodeclaims_drifted` -Number of nodeclaims drifted reasons in total by Karpenter. Labeled by drift type of the nodeclaim and the owning nodepool. - -### `karpenter_nodeclaims_disrupted` -Number of nodeclaims disrupted in total by Karpenter. Labeled by disruption type of the nodeclaim and the owning nodepool. - -### `karpenter_nodeclaims_created` -Number of nodeclaims created in total by Karpenter. Labeled by reason the nodeclaim was created and the owning nodepool. - -## Interruption Metrics - -### `karpenter_interruption_received_messages` -Count of messages received from the SQS queue. Broken down by message type and whether the message was actionable. - -### `karpenter_interruption_message_latency_time_seconds` -Length of time between message creation in queue and an action taken on the message by the controller. - -### `karpenter_interruption_deleted_messages` -Count of messages deleted from the SQS queue. - -### `karpenter_interruption_actions_performed` -Number of notification actions performed. Labeled by action - -## Disruption Metrics - -### `karpenter_disruption_replacement_nodeclaim_initialized_seconds` -Amount of time required for a replacement nodeclaim to become initialized. - -### `karpenter_disruption_replacement_nodeclaim_failures_total` -The number of times that Karpenter failed to launch a replacement node for disruption. Labeled by disruption method. - -### `karpenter_disruption_queue_depth` -The number of commands currently being waited on in the disruption orchestration queue. - -### `karpenter_disruption_pods_disrupted_total` -Total number of reschedulable pods disrupted on nodes. Labeled by NodePool, disruption action, method, and consolidation type. - -### `karpenter_disruption_nodes_disrupted_total` -Total number of nodes disrupted. Labeled by NodePool, disruption action, method, and consolidation type. - -### `karpenter_disruption_evaluation_duration_seconds` -Duration of the disruption evaluation process in seconds. Labeled by method and consolidation type. - -### `karpenter_disruption_eligible_nodes` -Number of nodes eligible for disruption by Karpenter. Labeled by disruption method and consolidation type. - -### `karpenter_disruption_consolidation_timeouts_total` -Number of times the Consolidation algorithm has reached a timeout. Labeled by consolidation type. - -### `karpenter_disruption_budgets_allowed_disruptions` -The number of nodes for a given NodePool that can be disrupted at a point in time. Labeled by NodePool. Note that allowed disruptions can change very rapidly, as new nodes may be created and others may be deleted at any point. - -### `karpenter_disruption_actions_performed_total` -Number of disruption actions performed. Labeled by disruption action, method, and consolidation type. - -## Consistency Metrics - -### `karpenter_consistency_errors` -Number of consistency checks that have failed. - -## Cluster State Metrics - -### `karpenter_cluster_state_synced` -Returns 1 if cluster state is synced and 0 otherwise. Synced checks that nodeclaims and nodes that are stored in the APIServer have the same representation as Karpenter's cluster state - -### `karpenter_cluster_state_node_count` -Current count of nodes in cluster state - -## Cloudprovider Metrics - -### `karpenter_cloudprovider_instance_type_price_estimate` -Estimated hourly price used when making informed decisions on node cost calculation. This is updated once on startup and then every 12 hours. - -### `karpenter_cloudprovider_instance_type_memory_bytes` -Memory, in bytes, for a given instance type. - -### `karpenter_cloudprovider_instance_type_cpu_cores` -VCPUs cores for a given instance type. - -### `karpenter_cloudprovider_errors_total` -Total number of errors returned from CloudProvider calls. - -### `karpenter_cloudprovider_duration_seconds` -Duration of cloud provider method calls. Labeled by the controller, method name and provider. - -## Cloudprovider Batcher Metrics - -### `karpenter_cloudprovider_batcher_batch_time_seconds` -Duration of the batching window per batcher - -### `karpenter_cloudprovider_batcher_batch_size` -Size of the request batch per batcher - -## Controller Runtime Metrics - -### `controller_runtime_reconcile_total` -Total number of reconciliations per controller - -### `controller_runtime_reconcile_time_seconds` -Length of time per reconciliation per controller - -### `controller_runtime_reconcile_errors_total` -Total number of reconciliation errors per controller - -### `controller_runtime_max_concurrent_reconciles` -Maximum number of concurrent reconciles per controller - -### `controller_runtime_active_workers` -Number of currently used workers per controller - diff --git a/website/content/en/v0.36/getting-started/getting-started-with-karpenter/_index.md b/website/content/en/v0.36/getting-started/getting-started-with-karpenter/_index.md index 5fbe1e7c0438..b4f5f5fcf72e 100644 --- a/website/content/en/v0.36/getting-started/getting-started-with-karpenter/_index.md +++ b/website/content/en/v0.36/getting-started/getting-started-with-karpenter/_index.md @@ -122,7 +122,7 @@ cosign verify public.ecr.aws/karpenter/karpenter:0.36.2 \ ``` {{% alert title="DNS Policy Notice" color="warning" %}} -Karpenter uses the `ClusterFirst` pod DNS policy by default. This is the Kubernetes cluster default and this ensures that Karpetner can reach-out to internal Kubernetes services during its lifetime. There may be cases where you do not have the DNS service that you are using on your cluster up-and-running before Karpenter starts up. The most common case of this is you want Karpenter to manage the node capacity where your DNS service pods are running. +Karpenter uses the `ClusterFirst` pod DNS policy by default. This is the Kubernetes cluster default and this ensures that Karpenter can reach-out to internal Kubernetes services during its lifetime. There may be cases where you do not have the DNS service that you are using on your cluster up-and-running before Karpenter starts up. The most common case of this is you want Karpenter to manage the node capacity where your DNS service pods are running. If you need Karpenter to manage the DNS service pods' capacity, this means that DNS won't be running when Karpenter starts-up. In this case, you will need to set the pod DNS policy to `Default` with `--set dnsPolicy=Default`. This will tell Karpenter to use the host's DNS resolution instead of the internal DNS resolution, ensuring that you don't have a dependency on the DNS service pods to run. More details on this issue can be found in the following Github issues: [#2186](https://github.com/aws/karpenter-provider-aws/issues/2186) and [#4947](https://github.com/aws/karpenter-provider-aws/issues/4947). {{% /alert %}} diff --git a/website/content/en/v0.36/reference/settings.md b/website/content/en/v0.36/reference/settings.md index 753e9ef7b707..ec6279c014fb 100644 --- a/website/content/en/v0.36/reference/settings.md +++ b/website/content/en/v0.36/reference/settings.md @@ -43,11 +43,11 @@ Karpenter surfaces environment variables and CLI parameters to allow you to conf Karpenter uses [feature gates](https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/#feature-gates-for-alpha-or-beta-features) You can enable the feature gates through the `--feature-gates` CLI environment variable or the `FEATURE_GATES` environment variable in the Karpenter deployment. For example, you can configure drift, spotToSpotConsolidation by setting the CLI argument: `--feature-gates Drift=true,SpotToSpotConsolidation=true`. -| Feature | Default | Stage | Since | Until | -|-------------------------|---------|-------|---------|---------| -| Drift | false | Alpha | v0.21.x | v0.32.x | -| Drift | true | Beta | v0.33.x | | -| SpotToSpotConsolidation | false | Beta | v0.34.x | | +| Feature | Default | Stage | Since | Until | +|-------------------------|---------|--------|---------|---------| +| Drift | false | Alpha | v0.21.x | v0.32.x | +| Drift | true | Beta | v0.33.x | | +| SpotToSpotConsolidation | false | Alpha | v0.34.x | | ### Batching Parameters diff --git a/website/content/en/v0.36/upgrading/upgrade-guide.md b/website/content/en/v0.36/upgrading/upgrade-guide.md index 9720ffd86b5e..c3e52cab6fac 100644 --- a/website/content/en/v0.36/upgrading/upgrade-guide.md +++ b/website/content/en/v0.36/upgrading/upgrade-guide.md @@ -20,7 +20,7 @@ Karpenter ships with a few Custom Resource Definitions (CRDs). These CRDs are pu ``` {{% alert title="Note" color="warning" %}} -If you get the error `invalid ownership metadata; label validation error:` while installing the `karpenter-crd` chart from an older version of Karpenter, follow the [Troubleshooting Guide]({{}}) for details on how to resolve these errors. +If you get the error `invalid ownership metadata; label validation error:` while installing the `karpenter-crd` chart from an older version of Karpenter, follow the [Troubleshooting Guide]({{}}) for details on how to resolve these errors. {{% /alert %}} * As part of the helm chart [karpenter](https://gallery.ecr.aws/karpenter/karpenter) - [source](https://github.com/aws/karpenter/blob/main/charts/karpenter/crds). Helm [does not manage the lifecycle of CRDs using this method](https://helm.sh/docs/chart_best_practices/custom_resource_definitions/), the tool will only install the CRD during the first installation of the Helm chart. Subsequent chart upgrades will not add or remove CRDs, even if the CRDs have changed. When CRDs are changed, we will make a note in the version's upgrade guide. diff --git a/website/content/en/v0.37/getting-started/getting-started-with-karpenter/_index.md b/website/content/en/v0.37/getting-started/getting-started-with-karpenter/_index.md index 26b6b5de8ad8..1264f6c46cde 100644 --- a/website/content/en/v0.37/getting-started/getting-started-with-karpenter/_index.md +++ b/website/content/en/v0.37/getting-started/getting-started-with-karpenter/_index.md @@ -122,7 +122,7 @@ cosign verify public.ecr.aws/karpenter/karpenter:0.37.0 \ ``` {{% alert title="DNS Policy Notice" color="warning" %}} -Karpenter uses the `ClusterFirst` pod DNS policy by default. This is the Kubernetes cluster default and this ensures that Karpetner can reach-out to internal Kubernetes services during its lifetime. There may be cases where you do not have the DNS service that you are using on your cluster up-and-running before Karpenter starts up. The most common case of this is you want Karpenter to manage the node capacity where your DNS service pods are running. +Karpenter uses the `ClusterFirst` pod DNS policy by default. This is the Kubernetes cluster default and this ensures that Karpenter can reach-out to internal Kubernetes services during its lifetime. There may be cases where you do not have the DNS service that you are using on your cluster up-and-running before Karpenter starts up. The most common case of this is you want Karpenter to manage the node capacity where your DNS service pods are running. If you need Karpenter to manage the DNS service pods' capacity, this means that DNS won't be running when Karpenter starts-up. In this case, you will need to set the pod DNS policy to `Default` with `--set dnsPolicy=Default`. This will tell Karpenter to use the host's DNS resolution instead of the internal DNS resolution, ensuring that you don't have a dependency on the DNS service pods to run. More details on this issue can be found in the following Github issues: [#2186](https://github.com/aws/karpenter-provider-aws/issues/2186) and [#4947](https://github.com/aws/karpenter-provider-aws/issues/4947). {{% /alert %}} diff --git a/website/content/en/v0.37/reference/settings.md b/website/content/en/v0.37/reference/settings.md index 4c339ced6af1..8ec32d885e0a 100644 --- a/website/content/en/v0.37/reference/settings.md +++ b/website/content/en/v0.37/reference/settings.md @@ -43,11 +43,11 @@ Karpenter surfaces environment variables and CLI parameters to allow you to conf Karpenter uses [feature gates](https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/#feature-gates-for-alpha-or-beta-features) You can enable the feature gates through the `--feature-gates` CLI environment variable or the `FEATURE_GATES` environment variable in the Karpenter deployment. For example, you can configure drift, spotToSpotConsolidation by setting the CLI argument: `--feature-gates Drift=true,SpotToSpotConsolidation=true`. -| Feature | Default | Stage | Since | Until | -|-------------------------|---------|-------|---------|---------| -| Drift | false | Alpha | v0.21.x | v0.32.x | -| Drift | true | Beta | v0.33.x | | -| SpotToSpotConsolidation | false | Beta | v0.34.x | | +| Feature | Default | Stage | Since | Until | +|-------------------------|---------|--------|---------|---------| +| Drift | false | Alpha | v0.21.x | v0.32.x | +| Drift | true | Beta | v0.33.x | | +| SpotToSpotConsolidation | false | Alpha | v0.34.x | | ### Batching Parameters diff --git a/website/content/en/v0.37/upgrading/upgrade-guide.md b/website/content/en/v0.37/upgrading/upgrade-guide.md index 0c4624b652d4..55604ae1e46f 100644 --- a/website/content/en/v0.37/upgrading/upgrade-guide.md +++ b/website/content/en/v0.37/upgrading/upgrade-guide.md @@ -20,7 +20,7 @@ Karpenter ships with a few Custom Resource Definitions (CRDs). These CRDs are pu ``` {{% alert title="Note" color="warning" %}} -If you get the error `invalid ownership metadata; label validation error:` while installing the `karpenter-crd` chart from an older version of Karpenter, follow the [Troubleshooting Guide]({{}}) for details on how to resolve these errors. +If you get the error `invalid ownership metadata; label validation error:` while installing the `karpenter-crd` chart from an older version of Karpenter, follow the [Troubleshooting Guide]({{}}) for details on how to resolve these errors. {{% /alert %}} * As part of the helm chart [karpenter](https://gallery.ecr.aws/karpenter/karpenter) - [source](https://github.com/aws/karpenter/blob/main/charts/karpenter/crds). Helm [does not manage the lifecycle of CRDs using this method](https://helm.sh/docs/chart_best_practices/custom_resource_definitions/), the tool will only install the CRD during the first installation of the Helm chart. Subsequent chart upgrades will not add or remove CRDs, even if the CRDs have changed. When CRDs are changed, we will make a note in the version's upgrade guide. diff --git a/website/content/en/v0.35/_index.md b/website/content/en/v1.0/_index.md similarity index 100% rename from website/content/en/v0.35/_index.md rename to website/content/en/v1.0/_index.md diff --git a/website/content/en/v0.35/concepts/_index.md b/website/content/en/v1.0/concepts/_index.md similarity index 100% rename from website/content/en/v0.35/concepts/_index.md rename to website/content/en/v1.0/concepts/_index.md diff --git a/website/content/en/v0.35/concepts/disruption.md b/website/content/en/v1.0/concepts/disruption.md similarity index 82% rename from website/content/en/v0.35/concepts/disruption.md rename to website/content/en/v1.0/concepts/disruption.md index ccefdd91db0a..cccb8d297a8c 100644 --- a/website/content/en/v0.35/concepts/disruption.md +++ b/website/content/en/v1.0/concepts/disruption.md @@ -1,7 +1,7 @@ --- title: "Disruption" linkTitle: "Disruption" -weight: 4 +weight: 50 description: > Understand different ways Karpenter disrupts nodes --- @@ -13,7 +13,7 @@ The finalizer blocks deletion of the node object while the Termination Controlle ### Disruption Controller -Karpenter automatically discovers disruptable nodes and spins up replacements when needed. Karpenter disrupts nodes by executing one [automated method](#automated-methods) at a time, in order of Expiration, Drift, and then Consolidation. Each method varies slightly, but they all follow the standard disruption process. Karpenter uses [disruption budgets]({{}}) to control the speed of disruption. +Karpenter automatically discovers disruptable nodes and spins up replacements when needed. Karpenter disrupts nodes by executing one [automated method](#automated-methods) at a time, first doing Drift then Consolidation. Each method varies slightly, but they all follow the standard disruption process. Karpenter uses [disruption budgets]({{}}) to control the speed at which these disruptions begin. 1. Identify a list of prioritized candidates for the disruption method. * If there are [pods that cannot be evicted](#pod-eviction) on the node, Karpenter will ignore the node and try disrupting it later. * If there are no disruptable nodes, continue to the next disruption method. @@ -61,11 +61,10 @@ By adding the finalizer, Karpenter improves the default Kubernetes process of no When you run `kubectl delete node` on a node without a finalizer, the node is deleted without triggering the finalization logic. The instance will continue running in EC2, even though there is no longer a node object for it. The kubelet isn’t watching for its own existence, so if a node is deleted, the kubelet doesn’t terminate itself. All the pod objects get deleted by a garbage collection process later, because the pods’ node is gone. {{% /alert %}} -## Automated Methods +## Automated Graceful Methods -Automated methods can be rate limited through [NodePool Disruption Budgets]({{}}) +Automated graceful methods, can be rate limited through [NodePool Disruption Budgets]({{}}) -* **Expiration**: Karpenter will mark nodes as expired and disrupt them after they have lived a set number of seconds, based on the NodePool's `spec.disruption.expireAfter` value. You can use node expiry to periodically recycle nodes due to security concerns. * [**Consolidation**]({{}}): Karpenter works to actively reduce cluster cost by identifying when: * Nodes can be removed because the node is empty * Nodes can be removed as their workloads will run on other nodes in the cluster. @@ -74,18 +73,22 @@ Automated methods can be rate limited through [NodePool Disruption Budgets]({{}}): Karpenter will watch for upcoming interruption events that could affect your nodes (health events, spot interruption, etc.) and will taint, drain, and terminate the node(s) ahead of the event to reduce workload disruption. {{% alert title="Defaults" color="secondary" %}} -Disruption is configured through the NodePool's disruption block by the `consolidationPolicy`, `expireAfter` and `consolidateAfter` fields. Karpenter will configure these fields with the following values by default if they are not set: +Disruption is configured through the NodePool's disruption block by the `consolidationPolicy`, and `consolidateAfter` fields. `expireAfter` can also be used to control disruption. Karpenter will configure these fields with the following values by default if they are not set: ```yaml spec: disruption: - consolidationPolicy: WhenUnderutilized - expireAfter: 720h + consolidationPolicy: WhenEmptyOrUnderutilized + template: + spec: + expireAfter: 720h ``` {{% /alert %}} ### Consolidation +Consolidation is configured by `consolidationPolicy` and `consolidateAfter`. `consolidationPolicy` determines the pre-conditions for nodes to be considered consolidatable, and are `whenEmpty` or `whenEmptyOrUnderutilized`. If a node has no running non-daemon pods, it is considered empty. `consolidateAfter` can be set to indicate how long Karpenter should wait after a pod schedules or is removed from the node before considering the node consolidatable. With `whenEmptyOrUnderutilized`, Karpenter will consider a node consolidatable when its `consolidateAfter` has been reached, empty or not. + Karpenter has two mechanisms for cluster consolidation: 1. **Deletion** - A node is eligible for deletion if all of its pods can run on free capacity of other nodes in the cluster. 2. **Replace** - A node can be replaced if all of its pods can run on a combination of free capacity of other nodes in the cluster and a single lower price replacement node. @@ -165,6 +168,13 @@ Karpenter will add the `Drifted` status condition on NodeClaims if the NodeClaim 1. The `Drift` feature gate is not enabled but the NodeClaim is drifted, Karpenter will remove the status condition. 2. The NodeClaim isn't drifted, but has the status condition, Karpenter will remove it. +## Automated Forceful Methods + +Automated forceful methods will begin draining nodes as soon as the condition is met. Note that these methods blow past NodePool Disruption Budgets, and do not wait for a pre-spin replacement node to be healthy for the pods to reschedule, unlike the graceful methods mentioned above. Use Pod Disruption Budgets and `do-not-disrupt` on your nodes to rate-limit the speed at which your applications are disrupted. + +### Expiration +Karpenter will disrupt nodes as soon as they're expired after they've lived for the duration of the NodePool's `spec.template.spec.expireAfter`. You can use expiration to periodically recycle nodes due to security concern. + ### Interruption If interruption-handling is enabled, Karpenter will watch for upcoming involuntary interruption events that would cause disruption to your workloads. These interruption events include: @@ -190,9 +200,20 @@ To enable interruption handling, configure the `--interruption-queue` CLI argume ## Controls -### Disruption Budgets +### TerminationGracePeriod + +This is the duration of time that a node can be draining before it's forcibly deleted. A node begins draining when it's deleted. Pods will be deleted preemptively based on its TerminationGracePeriodSeconds before this terminationGracePeriod ends to give as much time to cleanup as possible. Note that if your pod's terminationGracePeriodSeconds is larger than this terminationGracePeriod, Karpenter may forcibly delete the pod before it has its full terminationGracePeriod to cleanup. -You can rate limit Karpenter's disruption through the NodePool's `spec.disruption.budgets`. If undefined, Karpenter will default to one budget with `nodes: 10%`. Budgets will consider nodes that are actively being deleted for any reason, and will only block Karpenter from disrupting nodes voluntarily through expiration, drift, emptiness, and consolidation. +This is especially useful in combination with `nodepool.spec.template.spec.expireAfter` to define an absolute maximum on the lifetime of a node, where a node is deleted at `expireAfter` and finishes draining within the `terminationGracePeriod` thereafter. Pods blocking eviction like PDBs and do-not-disrupt will block full draining until the `terminationGracePeriod` is reached. + +For instance, a NodeClaim with `terminationGracePeriod` set to `1h` and an `expireAfter` set to `23h` will begin draining after it's lived for `23h`. Let's say a `do-not-disrupt` pod has `TerminationGracePeriodSeconds` set to `300` seconds. If the node hasn't been fully drained after `55m`, Karpenter will delete the pod to allow it's full `terminationGracePeriodSeconds` to cleanup. If no pods are blocking draining, Karpenter will cleanup the node as soon as the node is fully drained, rather than waiting for the NodeClaim's `terminationGracePeriod` to finish. + +### NodePool Disruption Budgets + +You can rate limit Karpenter's disruption through the NodePool's `spec.disruption.budgets`. If undefined, Karpenter will default to one budget with `nodes: 10%`. Budgets will consider nodes that are actively being deleted for any reason, and will only block Karpenter from disrupting nodes voluntarily through drift, emptiness, and consolidation. Note that NodePool Disruption Budgets do not prevent Karpenter from cleaning up expired or drifted nodes. + +#### Reasons +Karpenter allows specifying if a budget applies to any of `Drifted`, `Underutilized`, or `Empty`. When a budget has no reasons, it's assumed that it applies to all reasons. When calculating allowed disruptions for a given reason, Karpenter will take the minimum of the budgets that have listed the reason or have left reasons undefined. #### Nodes When calculating if a budget will block nodes from disruption, Karpenter lists the total number of nodes owned by a NodePool, subtracting out the nodes owned by that NodePool that are currently being deleted and nodes that are NotReady. If the number of nodes being deleted by Karpenter or any other processes is greater than the number of allowed disruptions, disruption for this node will not proceed. @@ -200,25 +221,32 @@ When calculating if a budget will block nodes from disruption, Karpenter lists t If the budget is configured with a percentage value, such as `20%`, Karpenter will calculate the number of allowed disruptions as `allowed_disruptions = roundup(total * percentage) - total_deleting - total_notready`. If otherwise defined as a non-percentage value, Karpenter will simply subtract the number of nodes from the total `(total - non_percentage_value) - total_deleting - total_notready`. For multiple budgets in a NodePool, Karpenter will take the minimum value (most restrictive) of each of the budgets. For example, the following NodePool with three budgets defines the following requirements: -- The first budget will only allow 20% of nodes owned by that NodePool to be disrupted. For instance, if there were 19 nodes owned by the NodePool, 4 disruptions would be allowed, rounding up from `19 * .2 = 3.8`. +- The first budget will only allow 20% of nodes owned by that NodePool to be disrupted if it's empty or drifted. For instance, if there were 19 nodes owned by the NodePool, 4 empty or drifted nodes could be disrupted, rounding up from `19 * .2 = 3.8`. - The second budget acts as a ceiling to the previous budget, only allowing 5 disruptions when there are more than 25 nodes. -- The last budget only blocks disruptions during the first 10 minutes of the day, where 0 disruptions are allowed. +- The last budget only blocks disruptions during the first 10 minutes of the day, where 0 disruptions are allowed, only applying to underutilized nodes. ```yaml -apiVersion: karpenter.sh/v1beta1 +apiVersion: karpenter.sh/v1 kind: NodePool metadata: name: default spec: + template: + spec: + expireAfter: 720h # 30 * 24h = 720h disruption: - consolidationPolicy: WhenUnderutilized - expireAfter: 720h # 30 * 24h = 720h + consolidationPolicy: WhenEmptyOrUnderutilized budgets: - nodes: "20%" + reasons: + - "Empty" + - "Drifted" - nodes: "5" - nodes: "0" schedule: "@daily" duration: 10m + reasons: + - "Underutilized" ``` #### Schedule @@ -290,7 +318,7 @@ metadata: To disable disruption for all nodes launched by a NodePool, you can configure its `.spec.disruption.budgets`. Setting a budget of zero nodes will prevent any of those nodes from being considered for voluntary disruption. ```yaml -apiVersion: karpenter.sh/v1beta1 +apiVersion: karpenter.sh/v1 kind: NodePool metadata: name: default diff --git a/website/content/en/v1.0/concepts/nodeclaims.md b/website/content/en/v1.0/concepts/nodeclaims.md new file mode 100644 index 000000000000..325d98bd5124 --- /dev/null +++ b/website/content/en/v1.0/concepts/nodeclaims.md @@ -0,0 +1,357 @@ +--- +title: "NodeClaims" +linkTitle: "NodeClaims" +weight: 30 +description: > + Understand NodeClaims +--- + +Karpenter uses NodeClaims to manage the lifecycle of Kubernetes Nodes with the underlying cloud provider. +Karpenter will create and delete NodeClaims in response to the demands of Pods in the cluster. +It does this by evaluating the requirements of pending pods, finding a compatible [NodePool]({{< ref "./nodepools" >}}) and [NodeClass]({{< ref "./nodeclasses" >}}) pair, and creating a NodeClaim which meets both sets of requirements. +Although NodeClaims are immutable resources managed by Karpenter, you can monitor NodeClaims to keep track of the status of your Nodes. + +In addition to tracking the lifecycle of Nodes, NodeClaims serve as requests for capacity. +Karpenter creates NodeClaims in response to provisioning and disruption needs (pre-spin). Whenever Karpenter +creates a NodeClaim, it asks the cloud provider to create the instance (launch), register and link the created node +with the NodeClaim (registration), and wait for the node and its resources to be ready (initialization). + +This page describes how NodeClaims integrate throughout Karpenter and the cloud provider implementation. + +If you want to learn more about the nodes being managed by Karpenter, you can either look directly at the NodeClaim or at the nodes they are associated with: + +* Checking NodeClaims: +If something goes wrong in the process of creating a node, you can look at the NodeClaim +to see where the node creation process might have failed. `kubectl get nodeclaims` will show you the NodeClaims +for the cluster, and its linked node. Using `kubectl describe nodeclaim ` will show the status of a particular NodeClaim. +For example, if the node is NotReady, you might see statuses indicating that the NodeClaim failed to launch, register, or initialize. +There will be logs emitted by the Karpenter controller to indicate this too. + +* Checking nodes: +Use commands such as `kubectl get node` and `kubectl describe node ` to see the actual resources, +labels, and other attributes associated with a particular node. + +## NodeClaim roles in node creation + +NodeClaims provide a critical role in the Karpenter workflow for provisioning capacity, and in node disruptions. + +The following diagram illustrates how NodeClaims interact with other components during Karpenter-driven node creation. + +![nodeclaim-node-creation](/nodeclaims.png) + +{{% alert title="Note" color="primary" %}} +Configure the `KARPENTER_NAMESPACE` environment variable to the namespace where you've installed Karpenter (`kube-system` is the default). Follow along with the Karpenter logs in your cluster and do the following: + +```bash +export KARPENTER_NAMESPACE="kube-system" +kubectl logs -f -n "${KARPENTER_NAMESPACE}" \ + -l app.kubernetes.io/name=karpenter +``` +In a separate terminal, start some pods that would require Karpenter to create nodes to handle those pods. +For example, start up some inflate pods as described in [Scale up deployment]({{< ref "../getting-started/getting-started-with-karpenter/#6-scale-up-deployment" >}}). +{{% /alert %}} + +As illustrated in the previous diagram, Karpenter interacts with NodeClaims and related components when creating a node: + +1. Watches for pods and monitors NodePools and NodeClasses: + * Checks the pod scheduling constraints and resource requests. + * Cross-references the requirements with the existing NodePools and NodeClasses, (e.g. zones, arch, os) + + **Example log:** + ```json + { + "level": "INFO", + "time": "2024-06-22T02:24:16.114Z", + "message": "found provisionable pod(s)", + "commit": "490ef94", + "Pods": "default/inflate-66fb68585c-xvs86, default/inflate-66fb68585c-hpcdz, default/inflate-66fb68585c-8xztf,01234567adb205c7e default/inflate-66fb68585c-t29d8, default/inflate-66fb68585c-nxflz", + "duration": "100.761702ms" + } + ``` + +2. Computes the shape and size of a NodeClaim (or NodeClaims) to create in the cluster to fit the set of pods from step 1. + + **Example log:** + ```json + { + "level": "INFO", + "time": "2024-06-22T02:24:16.114Z", + "message": "computed new nodeclaim(s) to fit pod(s)", + "controller": "provisioner", + "nodeclaims": 1, + "pods": 5 + } + ``` + +3. Creates the NodeClaim object in the cluster. + + **Example log:** + ```json + { + "level": "INFO", + "time": "2024-06-22T02:24:16.128Z", + "message": "created nodeclaim", + "controller": "provisioner", + "NodePool": { + "name":"default" + }, + "NodeClaim": { + "name":"default-sfpsl" + }, + "requests": { + "cpu":"5150m", + "pods":"8" + }, + "instance-types": "c3.2xlarge, c4.2xlarge, c4.4xlarge, c5.2xlarge, c5.4xlarge and 55 other(s)" + } + ``` + +4. Finds the new NodeClaim and translates it into an API call to create a cloud provider instance, logging + the response of the API call. + + If the API response is an unrecoverable error, such as an Insufficient Capacity Error, Karpenter will delete the NodeClaim, mark that instance type as temporarily unavailable, and create another NodeClaim if necessary. + + **Example log:** + ```json + { + "level": "INFO", + "time": "2024-06-22T02:24:19.028Z", + "message": "launched nodeclaim", + "controller": "nodeclaim.lifecycle", + "NodeClaim": { + "name": "default-sfpsl" + }, + "provider-id": "aws:///us-west-2b/i-01234567adb205c7e", + "instance-type": "c3.2xlarge", + "zone": "us-west-2b", + "capacity-type": "spot", + "allocatable": { + "cpu": "7910m", + "ephemeral-storage": "17Gi", + "memory": "13215Mi", + "pods": "58" + } + } + ``` + +5. Karpenter watches for the instance to register itself with the cluster as a node, and updates the node's + labels, annotations, taints, owner refs, and finalizer to match what was defined in the NodePool and NodeClaim. Once this step is + completed, Karpenter will remove the `karpenter.sh/unregistered` taint from the Node. + + If this fails to succeed within 15 minutes, Karpenter will remove the NodeClaim from the cluster and delete + the underlying instance, creating another NodeClaim if necessary. + + **Example log:** + ```json + { + "level": "INFO", + "time": "2024-06-22T02:26:19.028Z", + "message": "registered nodeclaim", + "controller": "nodeclaim.lifecycle", + "NodeClaim": { + "name": "default-sfpsl" + }, + "provider-id": "aws:///us-west-2b/i-01234567adb205c7e", + "Node": { + "name": "ip-xxx-xxx-xx-xxx.us-west-2.compute.internal" + } + } + ``` + +6. Karpenter continues to watch the node, waiting until the node becomes ready, has all its startup taints removed, + and has all requested resources registered on the node. + + **Example log:** + ```json + { + "level": "INFO", + "time": "2024-06-22T02:24:52.642Z", + "message": "initialized nodeclaim", + "controller": "nodeclaim.lifecycle", + "NodeClaim": { + "name": "default-sfpsl" + }, + "provider-id": "aws:///us-west-2b/i-01234567adb205c7e", + "Node": { + "name": "ip-xxx-xxx-xx-xxx.us-west-2.compute.internal" + }, + "allocatable": { + "cpu": "7910m", + "ephemeral-storage": "18242267924", + "hugepages-2Mi": "0", + "memory": "14320468Ki", + "pods": "58" + } + } + ``` + +## NodeClaim example +The following is an example of a NodeClaim. Keep in mind that you cannot modify a NodeClaim. +To see the contents of a NodeClaim, get the name of your NodeClaim, then run `kubectl describe` to see its contents: + +``` +kubectl get nodeclaim +NAME TYPE ZONE NODE READY AGE +default-m6pzn c7i-flex.2xlarge us-west-1a ip-xxx-xxx-xx-xxx.us-west-1.compute.internal True 7m50s + +kubectl describe nodeclaim default-m6pzn +``` +Starting at the bottom of this example, here are some highlights of what the NodeClaim contains: + +* The Node Name (ip-xxx-xxx-xx-xxx.us-west-1.compute.internal) and Provider ID (aws:///us-west-1a/i-xxxxxxxxxxxxxxxxx) identify the instance that is fulfilling this NodeClaim. +* Image ID (ami-0ccbbed159cce4e37) represents the operating system image running on the node. +* Status shows the resources that are available on the node (CPU, memory, and so on) as well as the conditions associated with the node. The conditions show the status of the node, including whether the node is launched, registered, and initialized. This is particularly useful if Pods are not deploying to the node and you want to determine the cause. +* Spec contains the metadata required for Karpenter to launch and manage an instance. This includes any scheduling requirements, resource requirements, the NodeClass reference, taints, and immutable disruption fields (expireAfter and terminationGracePeriod). +* Additional information includes annotations and labels which should be synced to the Node, creation metadata, the termination finalizer, and the owner reference. + +``` +Name: default-x9wxq +Namespace: +Labels: karpenter.k8s.aws/instance-category=c + karpenter.k8s.aws/instance-cpu=8 + karpenter.k8s.aws/instance-cpu-manufacturer=amd + karpenter.k8s.aws/instance-ebs-bandwidth=3170 + karpenter.k8s.aws/instance-encryption-in-transit-supported=true + karpenter.k8s.aws/instance-family=c5a + karpenter.k8s.aws/instance-generation=5 + karpenter.k8s.aws/instance-hypervisor=nitro + karpenter.k8s.aws/instance-memory=16384 + karpenter.k8s.aws/instance-network-bandwidth=2500 + karpenter.k8s.aws/instance-size=2xlarge + karpenter.sh/capacity-type=spot + karpenter.sh/nodepool=default + kubernetes.io/arch=amd64 + kubernetes.io/os=linux + node.kubernetes.io/instance-type=c5a.2xlarge + topology.k8s.aws/zone-id=usw2-az3 + topology.kubernetes.io/region=us-west-2 + topology.kubernetes.io/zone=us-west-2c +Annotations: compatibility.karpenter.k8s.aws/cluster-name-tagged: true + compatibility.karpenter.k8s.aws/kubelet-drift-hash: 15379597991425564585 + karpenter.k8s.aws/ec2nodeclass-hash: 5763643673275251833 + karpenter.k8s.aws/ec2nodeclass-hash-version: v3 + karpenter.k8s.aws/tagged: true + karpenter.sh/nodepool-hash: 377058807571762610 + karpenter.sh/nodepool-hash-version: v3 +API Version: karpenter.sh/v1 +Kind: NodeClaim +Metadata: + Creation Timestamp: 2024-08-07T05:37:30Z + Finalizers: + karpenter.sh/termination + Generate Name: default- + Generation: 1 + Owner References: + API Version: karpenter.sh/v1 + Block Owner Deletion: true + Kind: NodePool + Name: default + UID: 6b9c6781-ac05-4a4c-ad6a-7551a07b2ce7 + Resource Version: 19600526 + UID: 98a2ba32-232d-45c4-b7c0-b183cfb13d93 +Spec: + Expire After: 720h0m0s + Node Class Ref: + Group: + Kind: EC2NodeClass + Name: default + Requirements: + Key: kubernetes.io/arch + Operator: In + Values: + amd64 + Key: kubernetes.io/os + Operator: In + Values: + linux + Key: karpenter.sh/capacity-type + Operator: In + Values: + spot + Key: karpenter.k8s.aws/instance-category + Operator: In + Values: + c + m + r + Key: karpenter.k8s.aws/instance-generation + Operator: Gt + Values: + 2 + Key: karpenter.sh/nodepool + Operator: In + Values: + default + Key: node.kubernetes.io/instance-type + Operator: In + Values: + c3.xlarge + c4.xlarge + c5.2xlarge + c5.xlarge + c5a.xlarge + c5ad.2xlarge + c5ad.xlarge + c5d.2xlarge + Resources: + Requests: + Cpu: 3150m + Pods: 6 + Startup Taints: + Effect: NoSchedule + Key: app.dev/example-startup + Taints: + Effect: NoSchedule + Key: app.dev/example + Termination Grace Period: 1h0m0s +Status: + Allocatable: + Cpu: 7910m + Ephemeral - Storage: 17Gi + Memory: 14162Mi + Pods: 58 + vpc.amazonaws.com/pod-eni: 38 + Capacity: + Cpu: 8 + Ephemeral - Storage: 20Gi + Memory: 15155Mi + Pods: 58 + vpc.amazonaws.com/pod-eni: 38 + Conditions: + Last Transition Time: 2024-08-07T05:38:08Z + Message: + Reason: Consolidatable + Status: True + Type: Consolidatable + Last Transition Time: 2024-08-07T05:38:07Z + Message: + Reason: Initialized + Status: True + Type: Initialized + Last Transition Time: 2024-08-07T05:37:33Z + Message: + Reason: Launched + Status: True + Type: Launched + Last Transition Time: 2024-08-07T05:38:07Z + Message: + Reason: Ready + Status: True + Type: Ready + Last Transition Time: 2024-08-07T05:37:55Z + Message: + Reason: Registered + Status: True + Type: Registered + Image ID: ami-08946d4d49fc3f27b + Node Name: ip-xxx-xxx-xxx-xxx.us-west-2.compute.internal + Provider ID: aws:///us-west-2c/i-01234567890123 +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Launched 70s karpenter Status condition transitioned, Type: Launched, Status: Unknown -> True, Reason: Launched + Normal DisruptionBlocked 70s karpenter Cannot disrupt NodeClaim: state node doesn't contain both a node and a nodeclaim + Normal Registered 48s karpenter Status condition transitioned, Type: Registered, Status: Unknown -> True, Reason: Registered + Normal Initialized 36s karpenter Status condition transitioned, Type: Initialized, Status: Unknown -> True, Reason: Initialized + Normal Ready 36s karpenter Status condition transitioned, Type: Ready, Status: Unknown -> True, Reason: Ready +``` diff --git a/website/content/en/v0.35/concepts/nodeclasses.md b/website/content/en/v1.0/concepts/nodeclasses.md similarity index 64% rename from website/content/en/v0.35/concepts/nodeclasses.md rename to website/content/en/v1.0/concepts/nodeclasses.md index 5f5324027f48..4749988e69e9 100644 --- a/website/content/en/v0.35/concepts/nodeclasses.md +++ b/website/content/en/v1.0/concepts/nodeclasses.md @@ -1,4 +1,4 @@ - --- +--- title: "NodeClasses" linkTitle: "NodeClasses" weight: 2 @@ -11,7 +11,7 @@ Each NodePool must reference an EC2NodeClass using `spec.template.spec.nodeClass Multiple NodePools may point to the same EC2NodeClass. ```yaml -apiVersion: karpenter.sh/v1beta1 +apiVersion: karpenter.sh/v1 kind: NodePool metadata: name: default @@ -19,15 +19,43 @@ spec: template: spec: nodeClassRef: - apiVersion: karpenter.k8s.aws/v1beta1 + group: karpenter.k8s.aws kind: EC2NodeClass name: default --- -apiVersion: karpenter.k8s.aws/v1beta1 +apiVersion: karpenter.k8s.aws/v1 kind: EC2NodeClass metadata: name: default spec: + kubelet: + podsPerCore: 2 + maxPods: 20 + systemReserved: + cpu: 100m + memory: 100Mi + ephemeral-storage: 1Gi + kubeReserved: + cpu: 200m + memory: 100Mi + ephemeral-storage: 3Gi + evictionHard: + memory.available: 5% + nodefs.available: 10% + nodefs.inodesFree: 10% + evictionSoft: + memory.available: 500Mi + nodefs.available: 15% + nodefs.inodesFree: 15% + evictionSoftGracePeriod: + memory.available: 1m + nodefs.available: 1m30s + nodefs.inodesFree: 2m + evictionMaxPodGracePeriod: 60 + imageGCHighThresholdPercent: 85 + imageGCLowThresholdPercent: 80 + cpuCFSQuota: true + clusterDNS: ["10.0.1.100"] # Required, resolves a default ami and userdata amiFamily: AL2 @@ -66,25 +94,20 @@ spec: # Must specify one of "role" or "instanceProfile" for Karpenter to launch nodes instanceProfile: "KarpenterNodeInstanceProfile-${CLUSTER_NAME}" - # Optional, discovers amis to override the amiFamily's default amis # Each term in the array of amiSelectorTerms is ORed together # Within a single term, all conditions are ANDed amiSelectorTerms: - # Select on any AMI that has both the "karpenter.sh/discovery: ${CLUSTER_NAME}" tag - # AND the "environment: test" tag OR any AMI with the "my-ami" name - # OR any AMI with ID "ami-123" + # Select on any AMI that has both the `karpenter.sh/discovery: ${CLUSTER_NAME}` + # AND `environment: test` tags OR any AMI with the name `my-ami` OR an AMI with + # ID `ami-123` - tags: karpenter.sh/discovery: "${CLUSTER_NAME}" environment: test - name: my-ami - id: ami-123 - - # Optional, use instance-store volumes for node ephemeral-storage - instanceStorePolicy: RAID0 - - # Optional, overrides autogenerated userdata with a merge semantic - userData: | - echo "Hello world" + # Select EKS optimized AL2023 AMIs with version `v20240703`. This term is mutually + # exclusive and can't be specified with other terms. + # - alias: al2023@v20240703 # Optional, propagates tags to underlying EC2 resources tags: @@ -95,7 +118,7 @@ spec: metadataOptions: httpEndpoint: enabled httpProtocolIPv6: disabled - httpPutResponseHopLimit: 2 + httpPutResponseHopLimit: 1 # This is changed to disable IMDS access from containers not on the host network httpTokens: required # Optional, configures storage devices for the instance @@ -111,6 +134,13 @@ spec: throughput: 125 snapshotID: snap-0123456789 + # Optional, use instance-store volumes for node ephemeral-storage + instanceStorePolicy: RAID0 + + # Optional, overrides autogenerated userdata with a merge semantic + userData: | + echo "Hello world" + # Optional, configures detailed monitoring for the instance detailedMonitoring: true @@ -159,12 +189,244 @@ status: # Generated instance profile name from "role" instanceProfile: "${CLUSTER_NAME}-0123456778901234567789" + conditions: + - lastTransitionTime: "2024-02-02T19:54:34Z" + status: "True" + type: InstanceProfileReady + - lastTransitionTime: "2024-02-02T19:54:34Z" + status: "True" + type: SubnetsReady + - lastTransitionTime: "2024-02-02T19:54:34Z" + status: "True" + type: SecurityGroupsReady + - lastTransitionTime: "2024-02-02T19:54:34Z" + status: "True" + type: AMIsReady + - lastTransitionTime: "2024-02-02T19:54:34Z" + status: "True" + type: Ready +``` +Refer to the [NodePool docs]({{}}) for settings applicable to all providers. To explore various `EC2NodeClass` configurations, refer to the examples provided [in the Karpenter Github repository](https://github.com/aws/karpenter/blob/v1.0.0/examples/v1/). + + +## spec.kubelet + +Karpenter provides the ability to specify a few additional Kubelet arguments. +These are all optional and provide support for additional customization and use cases. +Adjust these only if you know you need to do so. +For more details on kubelet settings, see the [KubeletConfiguration reference](https://kubernetes.io/docs/reference/config-api/kubelet-config.v1/). +The implemented fields are a subset of the full list of upstream kubelet configuration arguments. + +```yaml +kubelet: + podsPerCore: 2 + maxPods: 20 + systemReserved: + cpu: 100m + memory: 100Mi + ephemeral-storage: 1Gi + kubeReserved: + cpu: 200m + memory: 100Mi + ephemeral-storage: 3Gi + evictionHard: + memory.available: 5% + nodefs.available: 10% + nodefs.inodesFree: 10% + evictionSoft: + memory.available: 500Mi + nodefs.available: 15% + nodefs.inodesFree: 15% + evictionSoftGracePeriod: + memory.available: 1m + nodefs.available: 1m30s + nodefs.inodesFree: 2m + evictionMaxPodGracePeriod: 60 + imageGCHighThresholdPercent: 85 + imageGCLowThresholdPercent: 80 + cpuCFSQuota: true + clusterDNS: ["10.0.1.100"] ``` -Refer to the [NodePool docs]({{}}) for settings applicable to all providers. To explore various `EC2NodeClass` configurations, refer to the examples provided [in the Karpenter Github repository](https://github.com/aws/karpenter/blob/v0.35.0/examples/v1beta1/). + +{{% alert title="Note" color="primary" %}} +If you need to specify a field that isn't present in `spec.kubelet`, you can set it via custom [UserData]({{< ref "#specuserdata" >}}). +For example, if you wanted to configure `maxPods` and `registryPullQPS` you would set the former through `spec.kubelet` and the latter through UserData. +The following example achieves this with AL2023: + +```yaml +apiVersion: karpenter.k8s.aws/v1 +kind: EC2NodeClass +spec: + amiSelectorTerms: + - alias: al2023@v20240807 + kubelet: + maxPods: 42 + userData: | + apiVersion: node.eks.aws/v1alpha1 + kind: NodeConfig + spec: + kubelet: + config: + # Configured through UserData since unavailable in `spec.kubelet` + registryPullQPS: 10 +``` + +Note that when using the `Custom` AMIFamily you will need to specify fields **both** in `spec.kublet` and `spec.userData`. +{{% /alert %}} + +{{% alert title="Warning" color="warning" %}} +The Bottlerocket AMIFamily does not support the following fields: + +* `evictionSoft` +* `evictionSoftGracePeriod` +* `evictionMaxPodGracePeriod` +* `cpuCFSQuota` + +If any of these fields are specified on a Bottlerocket EC2NodeClass, they will be ommited from generated UserData and ignored for scheduling purposes. +Support for these fields can be tracked via GitHub issue [#3722](https://github.com/aws/karpenter-provider-aws/issues/3722). +{{% /alert %}} + +#### Pods Per Core + +An alternative way to dynamically set the maximum density of pods on a node is to use the `.spec.kubelet.podsPerCore` value. Karpenter will calculate the pod density during scheduling by multiplying this value by the number of logical cores (vCPUs) on an instance type. This value will also be passed through to the `--pods-per-core` value on kubelet startup to configure the number of allocatable pods the kubelet can assign to the node instance. + +The value generated from `podsPerCore` cannot exceed `maxPods`, meaning, if both are set, the minimum of the `podsPerCore` dynamic pod density and the static `maxPods` value will be used for scheduling. + +{{% alert title="Note" color="primary" %}} +`maxPods` may not be set in the `kubelet` of an EC2NodeClass, but may still be restricted by the `ENI_LIMITED_POD_DENSITY` value. You may want to ensure that the `podsPerCore` value that will be used for instance families associated with the EC2NodeClass will not cause unexpected behavior by exceeding the `maxPods` value. +{{% /alert %}} + +#### Max Pods + +For small instances that require an increased pod density or large instances that require a reduced pod density, you can override this default value with `.spec.kubelet.maxPods`. This value will be used during Karpenter pod scheduling and passed through to `--max-pods` on kubelet startup. + +{{% alert title="Note" color="primary" %}} +When using small instance types, it may be necessary to enable [prefix assignment mode](https://aws.amazon.com/blogs/containers/amazon-vpc-cni-increases-pods-per-node-limits/) in the AWS VPC CNI plugin to support a higher pod density per node. Prefix assignment mode was introduced in AWS VPC CNI v1.9 and allows ENIs to manage a broader set of IP addresses. Much higher pod densities are supported as a result. +{{% /alert %}} + +{{% alert title="Windows Support Notice" color="warning" %}} +Presently, Windows worker nodes do not support using more than one ENI. +As a consequence, the number of IP addresses, and subsequently, the number of pods that a Windows worker node can support is limited by the number of IPv4 addresses available on the primary ENI. +Currently, Karpenter will only consider individual secondary IP addresses when calculating the pod density limit. +{{% /alert %}} + +### Reserved Resources + +Karpenter will automatically configure the system and kube reserved resource requests on the fly on your behalf. These requests are used to configure your node and to make scheduling decisions for your pods. If you have specific requirements or know that you will have additional capacity requirements, you can optionally override the `--system-reserved` configuration defaults with the `.spec.kubelet.systemReserved` values and the `--kube-reserved` configuration defaults with the `.spec.kubelet.kubeReserved` values. + +{{% alert title="Note" color="primary" %}} +Karpenter considers these reserved resources when computing the allocatable ephemeral storage on a given instance type. +If `kubeReserved` is not specified, Karpenter will compute the default reserved [CPU](https://github.com/awslabs/amazon-eks-ami/blob/db28da15d2b696bc08ac3aacc9675694f4a69933/files/bootstrap.sh#L251) and [memory](https://github.com/awslabs/amazon-eks-ami/blob/db28da15d2b696bc08ac3aacc9675694f4a69933/files/bootstrap.sh#L235) resources for the purpose of ephemeral storage computation. +These defaults are based on the defaults on Karpenter's supported AMI families, which are not the same as the kubelet defaults. +You should be aware of the CPU and memory default calculation when using Custom AMI Families. If they don't align, there may be a difference in Karpenter's computed allocatable ephemeral storage and the actually ephemeral storage available on the node. +{{% /alert %}} + +### Eviction Thresholds + +The kubelet supports eviction thresholds by default. When enough memory or file system pressure is exerted on the node, the kubelet will begin to evict pods to ensure that system daemons and other system processes can continue to run in a healthy manner. + +Kubelet has the notion of [hard evictions](https://kubernetes.io/docs/concepts/scheduling-eviction/node-pressure-eviction/#hard-eviction-thresholds) and [soft evictions](https://kubernetes.io/docs/concepts/scheduling-eviction/node-pressure-eviction/#soft-eviction-thresholds). In hard evictions, pods are evicted as soon as a threshold is met, with no grace period to terminate. Soft evictions, on the other hand, provide an opportunity for pods to be terminated gracefully. They do so by sending a termination signal to pods that are planning to be evicted and allowing those pods to terminate up to their grace period. + +Karpenter supports [hard evictions](https://kubernetes.io/docs/concepts/scheduling-eviction/node-pressure-eviction/#hard-eviction-thresholds) through the `.spec.kubelet.evictionHard` field and [soft evictions](https://kubernetes.io/docs/concepts/scheduling-eviction/node-pressure-eviction/#soft-eviction-thresholds) through the `.spec.kubelet.evictionSoft` field. `evictionHard` and `evictionSoft` are configured by listing [signal names](https://kubernetes.io/docs/concepts/scheduling-eviction/node-pressure-eviction/#eviction-signals) with either percentage values or resource values. + +```yaml +kubelet: + evictionHard: + memory.available: 500Mi + nodefs.available: 10% + nodefs.inodesFree: 10% + imagefs.available: 5% + imagefs.inodesFree: 5% + pid.available: 7% + evictionSoft: + memory.available: 1Gi + nodefs.available: 15% + nodefs.inodesFree: 15% + imagefs.available: 10% + imagefs.inodesFree: 10% + pid.available: 10% +``` + +#### Supported Eviction Signals + +| Eviction Signal | Description | +|--------------------|---------------------------------------------------------------------------------| +| memory.available | memory.available := node.status.capacity[memory] - node.stats.memory.workingSet | +| nodefs.available | nodefs.available := node.stats.fs.available | +| nodefs.inodesFree | nodefs.inodesFree := node.stats.fs.inodesFree | +| imagefs.available | imagefs.available := node.stats.runtime.imagefs.available | +| imagefs.inodesFree | imagefs.inodesFree := node.stats.runtime.imagefs.inodesFree | +| pid.available | pid.available := node.stats.rlimit.maxpid - node.stats.rlimit.curproc | + +For more information on eviction thresholds, view the [Node-pressure Eviction](https://kubernetes.io/docs/concepts/scheduling-eviction/node-pressure-eviction) section of the official Kubernetes docs. + +#### Soft Eviction Grace Periods + +Soft eviction pairs an eviction threshold with a specified grace period. With soft eviction thresholds, the kubelet will only begin evicting pods when the node exceeds its soft eviction threshold over the entire duration of its grace period. For example, if you specify `evictionSoft[memory.available]` of `500Mi` and a `evictionSoftGracePeriod[memory.available]` of `1m30`, the node must have less than `500Mi` of available memory over a minute and a half in order for the kubelet to begin evicting pods. + +Optionally, you can specify an `evictionMaxPodGracePeriod` which defines the administrator-specified maximum pod termination grace period to use during soft eviction. If a namespace-owner had specified a pod `terminationGracePeriodInSeconds` on pods in their namespace, the minimum of `evictionPodGracePeriod` and `terminationGracePeriodInSeconds` would be used. + +```yaml +kubelet: + evictionSoftGracePeriod: + memory.available: 1m + nodefs.available: 1m30s + nodefs.inodesFree: 2m + imagefs.available: 1m30s + imagefs.inodesFree: 2m + pid.available: 2m + evictionMaxPodGracePeriod: 60 +``` + +### Pod Density + +By default, the number of pods on a node is limited by both the number of networking interfaces (ENIs) that may be attached to an instance type and the number of IP addresses that can be assigned to each ENI. See [IP addresses per network interface per instance type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#AvailableIpPerENI) for a more detailed information on these instance types' limits. + +{{% alert title="Note" color="primary" %}} +By default, the VPC CNI allocates IPs for a node and pods from the same subnet. With [VPC CNI Custom Networking](https://aws.github.io/aws-eks-best-practices/networking/custom-networking), the pods will receive IP addresses from another subnet dedicated to pod IPs. This approach makes it easier to manage IP addresses and allows for separate Network Access Control Lists (NACLs) applied to your pods. VPC CNI Custom Networking reduces the pod density of a node since one of the ENI attachments will be used for the node and cannot share the allocated IPs on the interface to pods. Karpenter supports VPC CNI Custom Networking and similar CNI setups where the primary node interface is separated from the pods interfaces through a global [setting](./settings.md#configmap) within the karpenter-global-settings configmap: `aws.reservedENIs`. In the common case, `aws.reservedENIs` should be set to `"1"` if using Custom Networking. +{{% /alert %}} + +{{% alert title="Windows Support Notice" color="warning" %}} +It's currently not possible to specify custom networking with Windows nodes. +{{% /alert %}} ## spec.amiFamily -AMIFamily is a required field, dictating both the default bootstrapping logic for nodes provisioned through this `EC2NodeClass` but also selecting a group of recommended, latest AMIs by default. Currently, Karpenter supports `amiFamily` values `AL2`, `AL2023`, `Bottlerocket`, `Ubuntu`, `Windows2019`, `Windows2022` and `Custom`. GPUs are only supported by default with `AL2` and `Bottlerocket`. The `AL2` amiFamily does not support ARM64 GPU instance types unless you specify custom [`amiSelectorTerms`]({{}}). Default bootstrapping logic is shown below for each of the supported families. +AMIFamily dictates the default bootstrapping logic for nodes provisioned through this `EC2NodeClass`. +An `amiFamily` is only required if you don't specify a `spec.amiSelectorTerms.alias` object. +For example, if you specify `alias: al2023@v20240807`, the `amiFamily` is implicitly `AL2023`. + +AMIFamily does not impact which AMI is discovered, only the UserData generation and default BlockDeviceMappings. To automatically discover EKS optimized AMIs, use the new [`alias` field in amiSelectorTerms]({{< ref "#specamiselectorterms" >}}). + +{{% alert title="Ubuntu Support Dropped at v1" color="warning" %}} + +Support for the Ubuntu AMIFamily has been dropped at Karpenter `v1.0.0`. +This means Karpenter no longer supports automatic AMI discovery and UserData generation for Ubuntu. +To continue using Ubuntu AMIs, you will need to select Ubuntu AMIs using `amiSelectorTerms`. + +Additionally, you will need to either maintain UserData yourself using the `Custom` AMIFamily, or you can use the `AL2` AMIFamily and custom `blockDeviceMappings` (as shown below). +The `AL2` family has an identical UserData format, but this compatibility isn't guaranteed long term. +Changes to AL2's or Ubuntu's UserData format could result in incompatibility, at which point the `Custom` AMIFamily must be used. + +**Ubuntu NodeClass Example:** +```yaml +apiVersion: karpenter.k8s.aws/v1 +kind: EC2NodeClass +spec: + amiFamily: AL2 + amiSelectorTerms: + - id: ami-placeholder + blockDeviceMappings: + - deviceName: '/dev/sda1' + rootVolume: true + ebs: + encrypted: true + volumeType: gp3 + volumeSize: 20Gi +``` + +{{% /alert %}} + ### AL2 @@ -228,24 +490,6 @@ max-pods = 110 'karpenter.sh/nodepool' = 'test' ``` -### Ubuntu - -```bash -MIME-Version: 1.0 -Content-Type: multipart/mixed; boundary="//" - ---// -Content-Type: text/x-shellscript; charset="us-ascii" - -#!/bin/bash -xe -exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1 -/etc/eks/bootstrap.sh 'test-cluster' --apiserver-endpoint 'https://test-cluster' --b64-cluster-ca 'ca-bundle' \ ---dns-cluster-ip '10.100.0.10' \ ---use-max-pods false \ ---kubelet-extra-args '--node-labels="karpenter.sh/capacity-type=on-demand,karpenter.sh/nodepool=test" --max-pods=110' ---//-- -``` - ### Windows2019 ```powershell @@ -264,13 +508,9 @@ exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1 ``` -{{% alert title="Note" color="primary" %}} -Karpenter will automatically query for the appropriate [EKS optimized AMI](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-amis.html) via AWS Systems Manager (SSM). In the case of the `Custom` AMIFamily, no default AMIs are defined. As a result, `amiSelectorTerms` must be specified to inform Karpenter on which custom AMIs are to be used. -{{% /alert %}} - ### Custom -The `Custom` AMIFamily ships without any default userData to allow you to configure custom bootstrapping for control planes or images that don't support the default methods from the other families. +The `Custom` AMIFamily ships without any default userData to allow you to configure custom bootstrapping for control planes or images that don't support the default methods from the other families. For this AMIFamily, kubelet must add the taint `karpenter.sh/unregistered:NoExecute` via the `--register-with-taints` flag ([flags](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/#options)) or the KubeletConfiguration spec ([options](https://kubernetes.io/docs/reference/config-api/kubelet-config.v1/#kubelet-config-k8s-io-v1-CredentialProviderConfig) and [docs](https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/)). Karpenter will fail to register nodes that do not have this taint. ## spec.subnetSelectorTerms @@ -427,29 +667,72 @@ spec: - id: "sg-06e0cf9c198874591" ``` -## spec.amiSelectorTerms +## spec.role + +`Role` is an optional field and tells Karpenter which IAM identity nodes should assume. You must specify one of `role` or `instanceProfile` when creating a Karpenter `EC2NodeClass`. If using the [Karpenter Getting Started Guide]({{}}) to deploy Karpenter, you can use the `KarpenterNodeRole-$CLUSTER_NAME` role provisioned by that process. -AMI Selector Terms are used to configure custom AMIs for Karpenter to use, where the AMIs are discovered through ids, owners, name, and [tags](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html). **When you specify `amiSelectorTerms`, you fully override the default AMIs that are selected on by your EC2NodeClass [`amiFamily`]({{< ref "#specamifamily" >}}).** +```yaml +spec: + role: "KarpenterNodeRole-$CLUSTER_NAME" +``` + +## spec.instanceProfile + +`InstanceProfile` is an optional field and tells Karpenter which IAM identity nodes should assume. You must specify one of `role` or `instanceProfile` when creating a Karpenter `EC2NodeClass`. If you use the `instanceProfile` field instead of `role`, Karpenter will not manage the InstanceProfile on your behalf; instead, it expects that you have pre-provisioned an IAM instance profile and assigned it a role. + +You can provision and assign a role to an IAM instance profile using [CloudFormation](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-instanceprofile.html) or by using the [`aws iam create-instance-profile`](https://docs.aws.amazon.com/cli/latest/reference/iam/create-instance-profile.html) and [`aws iam add-role-to-instance-profile`](https://docs.aws.amazon.com/cli/latest/reference/iam/add-role-to-instance-profile.html) commands in the CLI. {{% alert title="Note" color="primary" %}} -[`amiFamily`]({{< ref "#specamifamily" >}}) determines the bootstrapping mode, while `amiSelectorTerms` specifies specific AMIs to be used. Therefore, you need to ensure consistency between [`amiFamily`]({{< ref "#specamifamily" >}}) and `amiSelectorTerms` to avoid conflicts during bootstrapping. + +For [private clusters](https://docs.aws.amazon.com/eks/latest/userguide/private-clusters.html) that do not have access to the public internet, using `spec.instanceProfile` is required. `spec.role` cannot be used since Karpenter needs to access IAM endpoints to manage a generated instance profile. IAM [doesn't support private endpoints](https://docs.aws.amazon.com/vpc/latest/privatelink/aws-services-privatelink-support.html) to enable accessing the service without going to the public internet. + {{% /alert %}} -This selection logic is modeled as terms, where each term contains multiple conditions that must all be satisfied for the selector to match. Effectively, all requirements within a single term are ANDed together. It's possible that you may want to select on two different AMIs that have unrelated requirements. In this case, you can specify multiple terms which will be ORed together to form your selection logic. The example below shows how this selection logic is fulfilled. +## spec.amiSelectorTerms + +AMI Selector Terms are __required__ and are used to configure AMIs for Karpenter to use. AMIs are discovered through alias, id, owner, name, and [tags](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html). + +This selection logic is modeled as terms, where each term contains multiple conditions that must all be satisfied for the selector to match. +Effectively, all requirements within a single term are ANDed together. +It's possible that you may want to select on two different AMIs that have unrelated requirements. +In this case, you can specify multiple terms which will be ORed together to form your selection logic. +The example below shows how this selection logic is fulfilled. ```yaml amiSelectorTerms: - # Select on any AMI that has both the "karpenter.sh/discovery: ${CLUSTER_NAME}" tag - # AND the "environment: test" tag OR any AMI with the "my-ami" name - # OR any AMI with ID "ami-123" + # Select on any AMI that has both the `karpenter.sh/discovery: ${CLUSTER_NAME}` + # AND `environment: test` tags OR any AMI with the name `my-ami` OR an AMI with + # ID `ami-123` - tags: karpenter.sh/discovery: "${CLUSTER_NAME}" environment: test - name: my-ami - id: ami-123 + # Select EKS optimized AL2023 AMIs with version `v20240807`. This term is mutually + # exclusive and can't be specified with other terms. + # - alias: al2023@v20240807 ``` -This field is optional, and Karpenter will use the latest EKS-optimized AMIs for the AMIFamily if no amiSelectorTerms are specified. To select an AMI by name, use the `name` field in the selector term. To select an AMI by id, use the `id` field in the selector term. To ensure that AMIs are owned by the expected owner, use the `owner` field - you can use a combination of account aliases (e.g. `self` `amazon`, `your-aws-account-name`) and account IDs. +An `alias` term can be used to select EKS-optimized AMIs. An `alias` is formatted as `family@version`. Family can be one of the following values: + +* `al2` +* `al2023` +* `bottlerocket` +* `windows2019` +* `windows2022` + +The version string can be set to `latest`, or pinned to a specific AMI using the format of that AMI's GitHub release tags. +For example, AL2 and AL2023 use dates for their release, so they can be pinned as follows: +```yaml +alias: al2023@v20240703 +``` +Bottlerocket uses a semantic version for their releases. You can pin bottlerocket as follows: +```yaml +alias: bottlerocket@v1.20.4 +``` +The Windows family does not support pinning, so only `latest` is supported. + +To select an AMI by name, use the `name` field in the selector term. To select an AMI by id, use the `id` field in the selector term. To select AMIs that are not owned by `amazon` or the account that Karpenter is running in, use the `owner` field - you can use a combination of account aliases (e.g. `self` `amazon`, `your-aws-account-name`) and account IDs. If owner is not set for `name`, it defaults to `self,amazon`, preventing Karpenter from inadvertently selecting an AMI that is owned by a different account. Tags don't require an owner as tags can only be discovered by the user who created them. @@ -461,14 +744,21 @@ AMIs may be specified by any AWS tag, including `Name`. Selecting by tag or by n If `amiSelectorTerms` match more than one AMI, Karpenter will automatically determine which AMI best fits the workloads on the launched worker node under the following constraints: * When launching nodes, Karpenter automatically determines which architecture a custom AMI is compatible with and will use images that match an instanceType's requirements. - * Note that Karpenter **cannot** detect any requirement other than architecture. If you need to specify different AMIs for different kind of nodes (e.g. accelerated GPU AMIs), you should use a separate `EC2NodeClass`. + * Unless using an alias, Karpenter **cannot** detect requirements other than architecture. If you need to specify different AMIs for different kind of nodes (e.g. accelerated GPU AMIs), you should use a separate `EC2NodeClass`. * If multiple AMIs are found that can be used, Karpenter will choose the latest one. * If no AMIs are found that can be used, then no nodes will be provisioned. {{% /alert %}} #### Examples +Select by AMI family and version: +```yaml + amiSelectorTerms: + - alias: al2023@v20240807 +``` + Select all with a specified tag: + ```yaml amiSelectorTerms: - tags: @@ -519,27 +809,6 @@ Specify using ids: - id: "ami-456" ``` -## spec.role - -`Role` is an optional field and tells Karpenter which IAM identity nodes should assume. You must specify one of `role` or `instanceProfile` when creating a Karpenter `EC2NodeClass`. If using the [Karpenter Getting Started Guide]({{}}) to deploy Karpenter, you can use the `KarpenterNodeRole-$CLUSTER_NAME` role provisioned by that process. - -```yaml -spec: - role: "KarpenterNodeRole-$CLUSTER_NAME" -``` - -## spec.instanceProfile - -`InstanceProfile` is an optional field and tells Karpenter which IAM identity nodes should assume. You must specify one of `role` or `instanceProfile` when creating a Karpenter `EC2NodeClass`. If you use the `instanceProfile` field instead of `role`, Karpenter will not manage the InstanceProfile on your behalf; instead, it expects that you have pre-provisioned an IAM instance profile and assigned it a role. - -You can provision and assign a role to an IAM instance profile using [CloudFormation](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-instanceprofile.html) or by using the [`aws iam create-instance-profile`](https://docs.aws.amazon.com/cli/latest/reference/iam/create-instance-profile.html) and [`aws iam add-role-to-instance-profile`](https://docs.aws.amazon.com/cli/latest/reference/iam/add-role-to-instance-profile.html) commands in the CLI. - -{{% alert title="Note" color="primary" %}} - -For [private clusters](https://docs.aws.amazon.com/eks/latest/userguide/private-clusters.html) that do not have access to the public internet, using `spec.instanceProfile` is required. `spec.role` cannot be used since Karpenter needs to access IAM endpoints to manage a generated instance profile. IAM [doesn't support private endpoints](https://docs.aws.amazon.com/vpc/latest/privatelink/aws-services-privatelink-support.html) to enable accessing the service without going to the public internet. - -{{% /alert %}} - ## spec.tags Karpenter adds tags to all resources it creates, including EC2 Instances, EBS volumes, and Launch Templates. The default set of tags are listed below. @@ -550,6 +819,7 @@ karpenter.sh/nodeclaim: karpenter.sh/nodepool: karpenter.k8s.aws/ec2nodeclass: kubernetes.io/cluster/: owned +eks:eks-cluster-name: ``` Additional tags can be added in the tags section, which will be merged with the default tags specified above. @@ -578,7 +848,7 @@ spec: metadataOptions: httpEndpoint: enabled httpProtocolIPv6: disabled - httpPutResponseHopLimit: 2 + httpPutResponseHopLimit: 1 httpTokens: required ``` @@ -643,17 +913,6 @@ spec: encrypted: true ``` -### Ubuntu -```yaml -spec: - blockDeviceMappings: - - deviceName: /dev/sda1 - ebs: - volumeSize: 20Gi - volumeType: gp3 - encrypted: true -``` - ### Windows2019/Windows2022 ```yaml spec: @@ -707,7 +966,7 @@ Since the Kubelet & Containerd will be using the instance-store filesystem, you You can control the UserData that is applied to your worker nodes via this field. This allows you to run custom scripts or pass-through custom configuration to Karpenter instances on start-up. ```yaml -apiVersion: karpenter.k8s.aws/v1beta1 +apiVersion: karpenter.k8s.aws/v1 kind: EC2NodeClass metadata: name: bottlerocket-example @@ -731,7 +990,7 @@ See [Node NotReady]({{< ref "../troubleshooting/#node-notready" >}}) troubleshoo {{% /alert %}} ```yaml -apiVersion: karpenter.k8s.aws/v1beta1 +apiVersion: karpenter.k8s.aws/v1 kind: EC2NodeClass metadata: name: al2-example @@ -749,11 +1008,11 @@ spec: chown -R ec2-user ~ec2-user/.ssh ``` -For more examples on configuring fields for different AMI families, see the [examples here](https://github.com/aws/karpenter/blob/v0.35.0/examples/v1beta1/). +For more examples on configuring fields for different AMI families, see the [examples here](https://github.com/aws/karpenter/blob/v1.0.0/examples/v1). Karpenter will merge the userData you specify with the default userData for that AMIFamily. See the [AMIFamily]({{< ref "#specamifamily" >}}) section for more details on these defaults. View the sections below to understand the different merge strategies for each AMIFamily. -### AL2/Ubuntu +### AL2 * Your UserData can be in the [MIME multi part archive](https://cloudinit.readthedocs.io/en/latest/topics/format.html#mime-multi-part-archive) format. * Karpenter will transform your custom user-data as a MIME part, if necessary, and then merge a final MIME part to the end of your UserData parts which will bootstrap the worker node. Karpenter will have full control over all the parameters being passed to the bootstrap script. @@ -829,16 +1088,15 @@ exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1 --//-- ``` -{{% alert title="Note" color="primary" %}} -You can also set kubelet-config properties by modifying the kubelet-config.json file before the EKS bootstrap script starts the kubelet: +{{% alert title="Tip" color="secondary" %}} +You can set additional kubelet configuration properties, unavailable through `spec.kubelet`, by updating the `kubelet-config.json` file: ```yaml -apiVersion: karpenter.k8s.aws/v1beta1 +apiVersion: karpenter.k8s.aws/v1 kind: EC2NodeClass metadata: name: kubelet-config-example spec: - ... amiFamily: AL2 userData: | #!/bin/bash @@ -850,7 +1108,12 @@ spec: * Your UserData may be in one of three formats: a [MIME multi part archive](https://cloudinit.readthedocs.io/en/latest/topics/format.html#mime-multi-part-archive), a NodeConfig YAML / JSON string, or a shell script. * Karpenter will transform your custom UserData into a MIME part, if necessary, and then create a MIME multi-part archive. This archive will consist of a generated NodeConfig, containing Karpenter's default values, followed by the transformed custom UserData. For more information on the NodeConfig spec, refer to the [AL2023 EKS Optimized AMI docs](https://awslabs.github.io/amazon-eks-ami/nodeadm/doc/examples/). -* If a value is specified both in the Karpenter generated NodeConfig and the same value is specified in the custom user data, the value in the custom user data will take precedence. + +{{% alert title="Warning" color="warning" %}} +Any values configured by the Karpenter generated NodeConfig object will take precedent over values specifed in `spec.userData`. +This includes cluster name, cluster CIDR, cluster endpoint, certificate authority, taints, labels, and any value in [spec.kubelet]({{< ref "#speckubelet" >}}). +These fields must be configured natively through Karpenter rather than through UserData. +{{% /alert %}} #### Passed-in UserData (NodeConfig) @@ -870,7 +1133,16 @@ MIME-Version: 1.0 Content-Type: multipart/mixed; boundary="//" --// -# Karpenter Generated NodeConfig +Content-Type: application/node.eks.aws + +apiVersion: node.eks.aws/v1alpha1 +kind: NodeConfig +spec: + kubelet: + config: + maxPods: 42 + +--// Content-Type: application/node.eks.aws # Karpenter Generated NodeConfig @@ -890,15 +1162,6 @@ spec: flags: - --node-labels="karpenter.sh/capacity-type=on-demand,karpenter.sh/nodepool=default" ---// -Content-Type: application/node.eks.aws - -apiVersion: node.eks.aws/v1alpha1 -kind: NodeConfig -spec: - kubelet: - config: - maxPods: 42 --//-- ``` @@ -915,6 +1178,12 @@ echo "Hello, AL2023!" MIME-Version: 1.0 Content-Type: multipart/mixed; boundary="//" +--// +Content-Type: text/x-shellscript; charset="us-ascii" + +#!/bin/bash +echo "Hello, AL2023!" + --// Content-Type: application/node.eks.aws @@ -935,11 +1204,6 @@ spec: flags: - --node-labels="karpenter.sh/capacity-type=on-demand,karpenter.sh/nodepool=default" ---// -Content-Type: text/x-shellscript; charset="us-ascii" - -#!/bin/bash -echo "Hello, AL2023!" --//-- ``` @@ -949,6 +1213,12 @@ echo "Hello, AL2023!" MIME-Version: 1.0 Content-Type: multipart/mixed; boundary="//" +--// +Content-Type: text/x-shellscript; charset="us-ascii" + +#!/bin/bash +echo "Hello, AL2023!" + --// Content-Type: application/node.eks.aws @@ -959,11 +1229,6 @@ spec: config: maxPods: 42 --// -Content-Type: text/x-shellscript; charset="us-ascii" - -#!/bin/bash -echo "Hello, AL2023!" ---// ``` #### Merged UserData (MIME) @@ -975,6 +1240,21 @@ Content-Type: multipart/mixed; boundary="//" --// Content-Type: application/node.eks.aws +apiVersion: node.eks.aws/v1alpha1 +kind: NodeConfig +spec: + kubelet: + config: + maxPods: 42 +--// +Content-Type: text/x-shellscript; charset="us-ascii" + +#!/bin/bash +echo "Hello, AL2023!" + +--// +Content-Type: application/node.eks.aws + # Karpenter Generated NodeConfig apiVersion: node.eks.aws/v1alpha1 kind: NodeConfig @@ -992,32 +1272,20 @@ spec: flags: - --node-labels="karpenter.sh/capacity-type=on-demand,karpenter.sh/nodepool=default" ---// -Content-Type: application/node.eks.aws - -apiVersion: node.eks.aws/v1alpha1 -kind: NodeConfig -spec: - kubelet: - config: - maxPods: 42 ---// -Content-Type: text/x-shellscript; charset="us-ascii" - -#!/bin/bash -echo "Hello, AL2023!" --//-- ``` ### Bottlerocket * Your UserData must be valid TOML. -* Karpenter will automatically merge settings to ensure successful bootstrap including `cluster-name`, `api-server` and `cluster-certificate`. Any labels and taints that need to be set based on pod requirements will also be specified in the final merged UserData. - * All Kubelet settings that Karpenter applies will override the corresponding settings in the provided UserData. For example, if you've specified `settings.kubernetes.cluster-name`, it will be overridden. - * If MaxPods is specified via the binary arg to Karpenter, the value will override anything specified in the UserData. - * If ClusterDNS is specified via `spec.kubeletConfiguration`, then that value will override anything specified in the UserData. * Unknown TOML fields will be ignored when the final merged UserData is generated by Karpenter. +{{% alert title="Warning" color="warning" %}} +Any values configured by Karpenter will take precedent over values specifed in `spec.userData`. +This includes cluster name, cluster endpoint, cluster certificate, taints, labels, and any value in [spec.kubelet]({{< ref "#speckubelet" >}}). +These fields must be configured natively through Karpenter rather than through UserData. +{{% /alert %}} + Consider the following example to understand how your custom UserData settings will be merged in. #### Passed-in UserData @@ -1091,6 +1359,9 @@ spec: ### Custom * No merging is performed, your UserData must perform all setup required of the node to allow it to join the cluster. +* Custom UserData must meet the following requirements to work correctly with Karpenter: + * It must ensure the node is registered with the `karpenter.sh/unregistered:NoExecute` taint (via kubelet configuration field `registerWithTaints`) + * It must set kubelet config options to match those configured in `spec.kubelet` ## spec.detailedMonitoring @@ -1103,7 +1374,10 @@ spec: ## spec.associatePublicIPAddress -A boolean field that controls whether instances created by Karpenter for this EC2NodeClass will have an associated public IP address. This overrides the `MapPublicIpOnLaunch` setting applied to the subnet the node is launched in. If this field is not set, the `MapPublicIpOnLaunch` field will be respected. +You can explicitly set `AssociatePublicIPAddress: false` when you are only launching into private subnets. +Previously, Karpenter auto-set `associatePublicIPAddress` on the primary ENI to false if a user’s subnet options were all private subnets. +This value is a boolean field that controls whether instances created by Karpenter for this EC2NodeClass will have an associated public IP address. This overrides the `MapPublicIpOnLaunch` setting applied to the subnet the node is launched in. If this field is not set, the `MapPublicIpOnLaunch` field will be respected. + {{% alert title="Note" color="warning" %}} If a `NodeClaim` requests `vpc.amazonaws.com/efa` resources, `spec.associatePublicIPAddress` is respected. However, if this `NodeClaim` requests **multiple** EFA resources and the value for `spec.associatePublicIPAddress` is true, the instance will fail to launch. This is due to an EC2 restriction which @@ -1161,11 +1435,12 @@ status: #### Examples -Default AMIs resolved from the AL2 AMIFamily: +AMIs resolved with an AL2 alias: ```yaml spec: - amiFamily: AL2 + amiSelectorTerms: + - alias: al2@v20240807 status: amis: - id: ami-03c3a3dcda64f5b75 @@ -1210,11 +1485,10 @@ status: operator: DoesNotExist ``` -AMIs resolved from [`spec.amiSelectorTerms`]({{< ref "#specamiselectorterms" >}}): +AMIs resolved from tags: ```yaml spec: - amiFamily: AL2 amiSelectorTerms: - tags: karpenter.sh/discovery: "${CLUSTER_NAME}" @@ -1246,3 +1520,33 @@ spec: status: instanceProfile: "${CLUSTER_NAME}-0123456778901234567789" ``` + +## status.conditions + +[`status.conditions`]({{< ref "#statusconditions" >}}) indicates EC2NodeClass readiness. This will be `Ready` when Karpenter successfully discovers AMIs, Instance Profile, Subnets, Cluster CIDR (AL2023 only) and SecurityGroups for the EC2NodeClass. + +```yaml +spec: + role: "KarpenterNodeRole-${CLUSTER_NAME}" +status: + conditions: + Last Transition Time: 2024-05-06T06:04:45Z + Message: Ready + Reason: Ready + Status: True + Type: Ready +``` + +If any of the underlying conditions are not resolved then `Status` is `False` and `Message` indicates the dependency that was not resolved. + +```yaml +spec: + role: "KarpenterNodeRole-${CLUSTER_NAME}" +status: + conditions: + Last Transition Time: 2024-05-06T06:19:46Z + Message: unable to resolve instance profile for node class + Reason: NodeClassNotReady + Status: False + Type: Ready +``` diff --git a/website/content/en/v0.35/concepts/nodepools.md b/website/content/en/v1.0/concepts/nodepools.md similarity index 55% rename from website/content/en/v0.35/concepts/nodepools.md rename to website/content/en/v1.0/concepts/nodepools.md index 03cc87370502..69198bb0a6b7 100644 --- a/website/content/en/v0.35/concepts/nodepools.md +++ b/website/content/en/v1.0/concepts/nodepools.md @@ -1,7 +1,7 @@ --- title: "NodePools" linkTitle: "NodePools" -weight: 1 +weight: 10 description: > Configure Karpenter with NodePools --- @@ -22,10 +22,15 @@ Here are things you should know about NodePools: * If Karpenter encounters a startup taint in the NodePool it will be applied to nodes that are provisioned, but pods do not need to tolerate the taint. Karpenter assumes that the taint is temporary and some other system will remove the taint. * It is recommended to create NodePools that are mutually exclusive. So no Pod should match multiple NodePools. If multiple NodePools are matched, Karpenter will use the NodePool with the highest [weight](#specweight). -For some example `NodePool` configurations, see the [examples in the Karpenter GitHub repository](https://github.com/aws/karpenter/blob/v0.35.0/examples/v1beta1/). + +{{% alert title="Note" color="primary" %}} +Objects for setting Kubelet features have been moved from the NodePool spec to the EC2NodeClasses spec, to not require other Karpenter providers to support those features. +{{% /alert %}} + +For some example `NodePool` configurations, see the [examples in the Karpenter GitHub repository](https://github.com/aws/karpenter/blob/v1.0.0/examples/v1/). ```yaml -apiVersion: karpenter.sh/v1beta1 +apiVersion: karpenter.sh/v1 kind: NodePool metadata: name: default @@ -46,7 +51,7 @@ spec: spec: # References the Cloud Provider's NodeClass resource, see your cloud provider specific documentation nodeClassRef: - apiVersion: karpenter.k8s.aws/v1beta1 + group: karpenter.k8s.aws # Updated since only a single version will be served kind: EC2NodeClass name: default @@ -63,6 +68,21 @@ spec: - key: example.com/another-taint effect: NoSchedule + # The amount of time a Node can live on the cluster before being removed + # Avoiding long-running Nodes helps to reduce security vulnerabilities as well as to reduce the chance of issues that can plague Nodes with long uptimes such as file fragmentation or memory leaks from system processes + # You can choose to disable expiration entirely by setting the string value 'Never' here + + # Note: changing this value in the nodepool will drift the nodeclaims. + expireAfter: 720h | Never + + # The amount of time that a node can be draining before it's forcibly deleted. A node begins draining when a delete call is made against it, starting + # its finalization flow. Pods with TerminationGracePeriodSeconds will be deleted preemptively before this terminationGracePeriod ends to give as much time to cleanup as possible. + # If your pod's terminationGracePeriodSeconds is larger than this terminationGracePeriod, Karpenter may forcibly delete the pod + # before it has its full terminationGracePeriod to cleanup. + + # Note: changing this value in the nodepool will drift the nodeclaims. + terminationGracePeriod: 48h + # Requirements that constrain the parameters of provisioned nodes. # These requirements are combined with pod.spec.topologySpreadConstraints, pod.spec.affinity.nodeAffinity, pod.spec.affinity.podAffinity, and pod.spec.nodeSelector rules. # Operators { In, NotIn, Exists, DoesNotExist, Gt, and Lt } are supported. @@ -72,7 +92,7 @@ spec: operator: In values: ["c", "m", "r"] # minValues here enforces the scheduler to consider at least that number of unique instance-category to schedule the pods. - # This field is ALPHA and can be dropped or replaced at any time + # This field is ALPHA and can be dropped or replaced at any time minValues: 2 - key: "karpenter.k8s.aws/instance-family" operator: In @@ -97,55 +117,18 @@ spec: operator: In values: ["spot", "on-demand"] - # Karpenter provides the ability to specify a few additional Kubelet args. - # These are all optional and provide support for additional customization and use cases. - kubelet: - clusterDNS: ["10.0.1.100"] - systemReserved: - cpu: 100m - memory: 100Mi - ephemeral-storage: 1Gi - kubeReserved: - cpu: 200m - memory: 100Mi - ephemeral-storage: 3Gi - evictionHard: - memory.available: 5% - nodefs.available: 10% - nodefs.inodesFree: 10% - evictionSoft: - memory.available: 500Mi - nodefs.available: 15% - nodefs.inodesFree: 15% - evictionSoftGracePeriod: - memory.available: 1m - nodefs.available: 1m30s - nodefs.inodesFree: 2m - evictionMaxPodGracePeriod: 60 - imageGCHighThresholdPercent: 85 - imageGCLowThresholdPercent: 80 - cpuCFSQuota: true - podsPerCore: 2 - maxPods: 20 - # Disruption section which describes the ways in which Karpenter can disrupt and replace Nodes # Configuration in this section constrains how aggressive Karpenter can be with performing operations # like rolling Nodes due to them hitting their maximum lifetime (expiry) or scaling down nodes to reduce cluster cost disruption: # Describes which types of Nodes Karpenter should consider for consolidation - # If using 'WhenUnderutilized', Karpenter will consider all nodes for consolidation and attempt to remove or replace Nodes when it discovers that the Node is underutilized and could be changed to reduce cost + # If using 'WhenEmptyOrUnderutilized', Karpenter will consider all nodes for consolidation and attempt to remove or replace Nodes when it discovers that the Node is empty or underutilized and could be changed to reduce cost # If using `WhenEmpty`, Karpenter will only consider nodes for consolidation that contain no workload pods - consolidationPolicy: WhenUnderutilized | WhenEmpty + consolidationPolicy: WhenEmptyOrUnderutilized | WhenEmpty - # The amount of time Karpenter should wait after discovering a consolidation decision - # This value can currently only be set when the consolidationPolicy is 'WhenEmpty' + # The amount of time Karpenter should wait to consolidate a node after a pod has been added or removed from the node. # You can choose to disable consolidation entirely by setting the string value 'Never' here - consolidateAfter: 30s - - # The amount of time a Node can live on the cluster before being removed - # Avoiding long-running Nodes helps to reduce security vulnerabilities as well as to reduce the chance of issues that can plague Nodes with long uptimes such as file fragmentation or memory leaks from system processes - # You can choose to disable expiration entirely by setting the string value 'Never' here - expireAfter: 720h + consolidateAfter: 1m | Never # Added to allow additional control over consolidation aggressiveness # Budgets control the speed Karpenter can scale down nodes. # Karpenter will respect the minimum of the currently active budgets, and will round up @@ -167,7 +150,49 @@ spec: # to select. Higher weights indicate higher priority when comparing NodePools. # Specifying no weight is equivalent to specifying a weight of 0. weight: 10 +status: + conditions: + - type: Initialized + status: "False" + observedGeneration: 1 + lastTransitionTime: "2024-02-02T19:54:34Z" + reason: NodeClaimNotLaunched + message: "NodeClaim hasn't succeeded launch" + resources: + cpu: "20" + memory: "8192Mi" + ephemeral-storage: "100Gi" ``` +## metadata.name +The name of the NodePool. + +## spec.template.metadata.labels +Arbitrary key/value pairs to apply to all nodes. + +## spec.template.metadata.annotations +Arbitrary key/value pairs to apply to all nodes. + +## spec.template.spec.nodeClassRef + +This field points to the Cloud Provider NodeClass resource. See [EC2NodeClasses]({{}}) for details. + +## spec.template.spec.taints + +Taints to add to provisioned nodes. Pods that don't tolerate those taints could be prevented from scheduling. +See [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) for details. + +## spec.template.spec.startupTaints + +Taints that are added to nodes to indicate that a certain condition must be met, such as starting an agent or setting up networking, before the node is can be initialized. +These taints must be cleared before pods can be deployed to a node. + +## spec.template.spec.expireAfter + +The amount of time a Node can live on the cluster before being deleted by Karpenter. Nodes will begin draining once it's expiration has been hit. + +## spec.template.spec.terminationGracePeriod + +The amount of time a Node can be draining before Karpenter forcibly cleans up the node. Pods blocking eviction like PDBs and do-not-disrupt will be respected during draining until the `terminationGracePeriod` is reached, where those pods will be forcibly deleted. ## spec.template.spec.requirements @@ -234,7 +259,7 @@ Karpenter prioritizes Spot offerings if the NodePool allows Spot and on-demand i Karpenter also allows `karpenter.sh/capacity-type` to be used as a topology key for enforcing topology-spread. {{% alert title="Note" color="primary" %}} -There is currently a limit of 30 on the total number of requirements on both the NodePool and the NodeClaim. It's important to note that `spec.template.metadata.labels` are also propagated as requirements on the NodeClaim when it's created, meaning that you can't have more than 30 requirements and labels combined set on your NodePool. +There is currently a limit of 100 on the total number of requirements on both the NodePool and the NodeClaim. It's important to note that `spec.template.metadata.labels` are also propagated as requirements on the NodeClaim when it's created, meaning that you can't have more than 100 requirements and labels combined set on your NodePool. {{% /alert %}} ### Min Values @@ -331,157 +356,12 @@ spec: {{% /alert %}} -## spec.template.spec.nodeClassRef - -This field points to the Cloud Provider NodeClass resource. Learn more about [EC2NodeClasses]({{}}). - -## spec.template.spec.kubelet - -Karpenter provides the ability to specify a few additional Kubelet args. These are all optional and provide support for -additional customization and use cases. Adjust these only if you know you need to do so. For more details on kubelet configuration arguments, [see the KubeletConfiguration API specification docs](https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/). The implemented fields are a subset of the full list of upstream kubelet configuration arguments. Please cut an issue if you'd like to see another field implemented. - -```yaml -kubelet: - clusterDNS: ["10.0.1.100"] - systemReserved: - cpu: 100m - memory: 100Mi - ephemeral-storage: 1Gi - kubeReserved: - cpu: 200m - memory: 100Mi - ephemeral-storage: 3Gi - evictionHard: - memory.available: 5% - nodefs.available: 10% - nodefs.inodesFree: 10% - evictionSoft: - memory.available: 500Mi - nodefs.available: 15% - nodefs.inodesFree: 15% - evictionSoftGracePeriod: - memory.available: 1m - nodefs.available: 1m30s - nodefs.inodesFree: 2m - evictionMaxPodGracePeriod: 60 - imageGCHighThresholdPercent: 85 - imageGCLowThresholdPercent: 80 - cpuCFSQuota: true - podsPerCore: 2 - maxPods: 20 -``` - -### Reserved Resources - -Karpenter will automatically configure the system and kube reserved resource requests on the fly on your behalf. These requests are used to configure your node and to make scheduling decisions for your pods. If you have specific requirements or know that you will have additional capacity requirements, you can optionally override the `--system-reserved` configuration defaults with the `.spec.template.spec.kubelet.systemReserved` values and the `--kube-reserved` configuration defaults with the `.spec.template.spec.kubelet.kubeReserved` values. - -{{% alert title="Note" color="primary" %}} -Karpenter considers these reserved resources when computing the allocatable ephemeral storage on a given instance type. -If `kubeReserved` is not specified, Karpenter will compute the default reserved [CPU](https://github.com/awslabs/amazon-eks-ami/blob/db28da15d2b696bc08ac3aacc9675694f4a69933/files/bootstrap.sh#L251) and [memory](https://github.com/awslabs/amazon-eks-ami/blob/db28da15d2b696bc08ac3aacc9675694f4a69933/files/bootstrap.sh#L235) resources for the purpose of ephemeral storage computation. -These defaults are based on the defaults on Karpenter's supported AMI families, which are not the same as the kubelet defaults. -You should be aware of the CPU and memory default calculation when using Custom AMI Families. If they don't align, there may be a difference in Karpenter's computed allocatable ephemeral storage and the actually ephemeral storage available on the node. -{{% /alert %}} - -### Eviction Thresholds - -The kubelet supports eviction thresholds by default. When enough memory or file system pressure is exerted on the node, the kubelet will begin to evict pods to ensure that system daemons and other system processes can continue to run in a healthy manner. - -Kubelet has the notion of [hard evictions](https://kubernetes.io/docs/concepts/scheduling-eviction/node-pressure-eviction/#hard-eviction-thresholds) and [soft evictions](https://kubernetes.io/docs/concepts/scheduling-eviction/node-pressure-eviction/#soft-eviction-thresholds). In hard evictions, pods are evicted as soon as a threshold is met, with no grace period to terminate. Soft evictions, on the other hand, provide an opportunity for pods to be terminated gracefully. They do so by sending a termination signal to pods that are planning to be evicted and allowing those pods to terminate up to their grace period. - -Karpenter supports [hard evictions](https://kubernetes.io/docs/concepts/scheduling-eviction/node-pressure-eviction/#hard-eviction-thresholds) through the `.spec.template.spec.kubelet.evictionHard` field and [soft evictions](https://kubernetes.io/docs/concepts/scheduling-eviction/node-pressure-eviction/#soft-eviction-thresholds) through the `.spec.template.spec.kubelet.evictionSoft` field. `evictionHard` and `evictionSoft` are configured by listing [signal names](https://kubernetes.io/docs/concepts/scheduling-eviction/node-pressure-eviction/#eviction-signals) with either percentage values or resource values. - -```yaml -kubelet: - evictionHard: - memory.available: 500Mi - nodefs.available: 10% - nodefs.inodesFree: 10% - imagefs.available: 5% - imagefs.inodesFree: 5% - pid.available: 7% - evictionSoft: - memory.available: 1Gi - nodefs.available: 15% - nodefs.inodesFree: 15% - imagefs.available: 10% - imagefs.inodesFree: 10% - pid.available: 10% -``` - -#### Supported Eviction Signals - -| Eviction Signal | Description | -|--------------------|---------------------------------------------------------------------------------| -| memory.available | memory.available := node.status.capacity[memory] - node.stats.memory.workingSet | -| nodefs.available | nodefs.available := node.stats.fs.available | -| nodefs.inodesFree | nodefs.inodesFree := node.stats.fs.inodesFree | -| imagefs.available | imagefs.available := node.stats.runtime.imagefs.available | -| imagefs.inodesFree | imagefs.inodesFree := node.stats.runtime.imagefs.inodesFree | -| pid.available | pid.available := node.stats.rlimit.maxpid - node.stats.rlimit.curproc | - -For more information on eviction thresholds, view the [Node-pressure Eviction](https://kubernetes.io/docs/concepts/scheduling-eviction/node-pressure-eviction) section of the official Kubernetes docs. - -#### Soft Eviction Grace Periods - -Soft eviction pairs an eviction threshold with a specified grace period. With soft eviction thresholds, the kubelet will only begin evicting pods when the node exceeds its soft eviction threshold over the entire duration of its grace period. For example, if you specify `evictionSoft[memory.available]` of `500Mi` and a `evictionSoftGracePeriod[memory.available]` of `1m30`, the node must have less than `500Mi` of available memory over a minute and a half in order for the kubelet to begin evicting pods. - -Optionally, you can specify an `evictionMaxPodGracePeriod` which defines the administrator-specified maximum pod termination grace period to use during soft eviction. If a namespace-owner had specified a pod `terminationGracePeriodInSeconds` on pods in their namespace, the minimum of `evictionPodGracePeriod` and `terminationGracePeriodInSeconds` would be used. - -```yaml -kubelet: - evictionSoftGracePeriod: - memory.available: 1m - nodefs.available: 1m30s - nodefs.inodesFree: 2m - imagefs.available: 1m30s - imagefs.inodesFree: 2m - pid.available: 2m - evictionMaxPodGracePeriod: 60 -``` - -### Pod Density - -By default, the number of pods on a node is limited by both the number of networking interfaces (ENIs) that may be attached to an instance type and the number of IP addresses that can be assigned to each ENI. See [IP addresses per network interface per instance type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#AvailableIpPerENI) for a more detailed information on these instance types' limits. - -{{% alert title="Note" color="primary" %}} -By default, the VPC CNI allocates IPs for a node and pods from the same subnet. With [VPC CNI Custom Networking](https://aws.github.io/aws-eks-best-practices/networking/custom-networking), the pods will receive IP addresses from another subnet dedicated to pod IPs. This approach makes it easier to manage IP addresses and allows for separate Network Access Control Lists (NACLs) applied to your pods. VPC CNI Custom Networking reduces the pod density of a node since one of the ENI attachments will be used for the node and cannot share the allocated IPs on the interface to pods. Karpenter supports VPC CNI Custom Networking and similar CNI setups where the primary node interface is separated from the pods interfaces through a global [setting](./settings.md#configmap) within the karpenter-global-settings configmap: `aws.reservedENIs`. In the common case, `aws.reservedENIs` should be set to `"1"` if using Custom Networking. -{{% /alert %}} - -{{% alert title="Windows Support Notice" color="warning" %}} -It's currently not possible to specify custom networking with Windows nodes. -{{% /alert %}} - -#### Max Pods - -For small instances that require an increased pod density or large instances that require a reduced pod density, you can override this default value with `.spec.template.spec.kubelet.maxPods`. This value will be used during Karpenter pod scheduling and passed through to `--max-pods` on kubelet startup. - -{{% alert title="Note" color="primary" %}} -When using small instance types, it may be necessary to enable [prefix assignment mode](https://aws.amazon.com/blogs/containers/amazon-vpc-cni-increases-pods-per-node-limits/) in the AWS VPC CNI plugin to support a higher pod density per node. Prefix assignment mode was introduced in AWS VPC CNI v1.9 and allows ENIs to manage a broader set of IP addresses. Much higher pod densities are supported as a result. -{{% /alert %}} - -{{% alert title="Windows Support Notice" color="warning" %}} -Presently, Windows worker nodes do not support using more than one ENI. -As a consequence, the number of IP addresses, and subsequently, the number of pods that a Windows worker node can support is limited by the number of IPv4 addresses available on the primary ENI. -Currently, Karpenter will only consider individual secondary IP addresses when calculating the pod density limit. -{{% /alert %}} - -#### Pods Per Core - -An alternative way to dynamically set the maximum density of pods on a node is to use the `.spec.template.spec.kubelet.podsPerCore` value. Karpenter will calculate the pod density during scheduling by multiplying this value by the number of logical cores (vCPUs) on an instance type. This value will also be passed through to the `--pods-per-core` value on kubelet startup to configure the number of allocatable pods the kubelet can assign to the node instance. - -The value generated from `podsPerCore` cannot exceed `maxPods`, meaning, if both are set, the minimum of the `podsPerCore` dynamic pod density and the static `maxPods` value will be used for scheduling. - -{{% alert title="Note" color="primary" %}} -`maxPods` may not be set in the `kubelet` of a NodePool, but may still be restricted by the `ENI_LIMITED_POD_DENSITY` value. You may want to ensure that the `podsPerCore` value that will be used for instance families associated with the NodePool will not cause unexpected behavior by exceeding the `maxPods` value. -{{% /alert %}} - -{{% alert title="Pods Per Core on Bottlerocket" color="warning" %}} -Bottlerocket AMIFamily currently does not support `podsPerCore` configuration. If a NodePool contains a `provider` or `providerRef` to a node template that will launch a Bottlerocket instance, the `podsPerCore` value will be ignored for scheduling and for configuring the kubelet. -{{% /alert %}} ## spec.disruption -You can configure Karpenter to disrupt Nodes through your NodePool in multiple ways. You can use `spec.disruption.consolidationPolicy`, `spec.disruption.consolidateAfter` or `spec.disruption.expireAfter`. Read [Disruption]({{}}) for more. +You can configure Karpenter to disrupt Nodes through your NodePool in multiple ways. You can use `spec.disruption.consolidationPolicy`, `spec.disruption.consolidateAfter`, or `spec.template.spec.expireAfter`. +You can also rate limit Karpenter's disruption through the NodePool's `spec.disruption.budgets`. +Read [Disruption]({{}}) for more. ## spec.limits @@ -490,7 +370,7 @@ The NodePool spec includes a limits section (`spec.limits`), which constrains th Karpenter supports limits of any resource type reported by your cloudprovider. It limits instance types when scheduling to those that will not exceed the specified limits. If a limit has been exceeded, nodes provisioning is prevented until some nodes have been terminated. ```yaml -apiVersion: karpenter.sh/v1beta1 +apiVersion: karpenter.sh/v1 kind: NodePool metadata: name: default @@ -528,6 +408,18 @@ Karpenter allows you to describe NodePool preferences through a `weight` mechani For more information on weighting NodePools, see the [Weighted NodePools section]({{}}) in the scheduling docs. +## status.conditions +[Conditions](https://github.com/kubernetes/apimachinery/blob/f14778da5523847e4c07346e3161a4b4f6c9186e/pkg/apis/meta/v1/types.go#L1523) objects add observability features to Karpenter. +* The `status.conditions.type` object reflects node status, such as `Initialized` or `Available`. +* The status of the condition, `status.conditions.status`, indicates if the condition is `True` or `False`. +* The `status.conditions.observedGeneration` indicates if the instance is out of date with the current state of `.metadata.generation`. +* The `status.conditions.lastTransitionTime` object contains a programatic identifier that indicates the time of the condition's previous transition. +* The `status.conditions.reason` object indicates the reason for the condition's previous transition. +* The `status.conditions.message` object provides human-readable details about the condition's previous transition. + +## status.resources +Objects under `status.resources` provide information about the status of resources such as `cpu`, `memory`, and `ephemeral-storage`. + ## Examples ### Isolating Expensive Hardware @@ -536,13 +428,13 @@ A NodePool can be set up to only provision nodes on particular processor types. The following example sets a taint that only allows pods with tolerations for Nvidia GPUs to be scheduled: ```yaml -apiVersion: karpenter.sh/v1beta1 +apiVersion: karpenter.sh/v1 kind: NodePool metadata: name: gpu spec: disruption: - consolidationPolicy: WhenUnderutilized + consolidationPolicy: WhenEmptyOrUnderutilized template: spec: requirements: @@ -560,14 +452,16 @@ In order for a pod to run on a node defined in this NodePool, it must tolerate ` Per the Cilium [docs](https://docs.cilium.io/en/stable/installation/taints/#taint-effects), it's recommended to place a taint of `node.cilium.io/agent-not-ready=true:NoExecute` on nodes to allow Cilium to configure networking prior to other pods starting. This can be accomplished via the use of Karpenter `startupTaints`. These taints are placed on the node, but pods aren't required to tolerate these taints to be considered for provisioning. +Failure to provide accurate `startupTaints` can result in Karpenter continually provisioning new nodes. When the new node joins and the startup taint that Karpenter is unaware of is added, Karpenter now considers the pending pod to be unschedulable to this node. Karpenter will attempt to provision yet another new node to schedule the pending pod. + ```yaml -apiVersion: karpenter.sh/v1beta1 +apiVersion: karpenter.sh/v1 kind: NodePool metadata: name: cilium-startup spec: disruption: - consolidationPolicy: WhenUnderutilized + consolidationPolicy: WhenEmptyOrUnderutilized template: spec: startupTaints: diff --git a/website/content/en/v0.35/concepts/scheduling.md b/website/content/en/v1.0/concepts/scheduling.md similarity index 91% rename from website/content/en/v0.35/concepts/scheduling.md rename to website/content/en/v1.0/concepts/scheduling.md index 0674870f5b07..2ef5b2c62897 100755 --- a/website/content/en/v0.35/concepts/scheduling.md +++ b/website/content/en/v1.0/concepts/scheduling.md @@ -1,7 +1,7 @@ --- title: "Scheduling" linkTitle: "Scheduling" -weight: 3 +weight: 40 description: > Learn about scheduling workloads with Karpenter --- @@ -150,7 +150,9 @@ Take care to ensure the label domains are correct. A well known label like `karp | karpenter.k8s.aws/instance-family | g4dn | [AWS Specific] Instance types of similar properties but different resource quantities | | karpenter.k8s.aws/instance-size | 8xlarge | [AWS Specific] Instance types of similar resource quantities but different properties | | karpenter.k8s.aws/instance-cpu | 32 | [AWS Specific] Number of CPUs on the instance | +| karpenter.k8s.aws/instance-cpu-manufacturer | aws | [AWS Specific] Name of the CPU manufacturer | | karpenter.k8s.aws/instance-memory | 131072 | [AWS Specific] Number of mebibytes of memory on the instance | +| karpenter.k8s.aws/instance-ebs-bandwidth | 9500 | [AWS Specific] Number of [maximum megabits](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-optimized.html#ebs-optimization-performance) of EBS available on the instance | | karpenter.k8s.aws/instance-network-bandwidth | 131072 | [AWS Specific] Number of [baseline megabits](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-network-bandwidth.html) available on the instance | | karpenter.k8s.aws/instance-pods | 110 | [AWS Specific] Number of pods the instance supports | | karpenter.k8s.aws/instance-gpu-name | t4 | [AWS Specific] Name of the GPU on the instance, if available | @@ -172,8 +174,9 @@ requirements: - key: user.defined.label/type operator: Exists ``` + {{% alert title="Note" color="primary" %}} -There is currently a limit of 30 on the total number of requirements on both the NodePool and the NodeClaim. It's important to note that `spec.template.metadata.labels` are also propagated as requirements on the NodeClaim when it's created, meaning that you can't have more than 30 requirements and labels combined set on your NodePool. +There is currently a limit of 100 on the total number of requirements on both the NodePool and the NodeClaim. It's important to note that `spec.template.metadata.labels` are also propagated as requirements on the NodeClaim when it's created, meaning that you can't have more than 100 requirements and labels combined set on your NodePool. {{% /alert %}} #### Node selectors @@ -193,6 +196,16 @@ Then the pod can declare that custom label. See [nodeSelector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) in the Kubernetes documentation for details. +## Preferences + +Karpenter is aware of preferences (node affinity, pod affinity, pod anti-affinity, and pod topology) and treats them as requirements in most circumstances. Karpenter uses these preferences when determining if a pod can schedule on a node (absent topology requirements), or when determining if a pod can be shifted to a new node. + +Karpenter starts by treating preferred affinities as required affinities when constructing requirements for a pod. When these requirements cannot be met, the pod's preferences are relaxed one-at-a-time by ascending weight (lowest weight is relaxed first), and the remaining requirements are tried again. + +{{% alert title="Warning" color="warning" %}} +Karpenter does not interpret preferred affinities as required when constructing topology requirements for scheduling to a node. If these preferences are necessary, required affinities should be used [as documented in Node Affinity](#node-affinity). +{{% /alert %}} + ### Node affinity Examples below illustrate how to use Node affinity to include (`In`) and exclude (`NotIn`) objects. @@ -202,6 +215,10 @@ When setting rules, the following Node affinity types define how hard or soft ea * **requiredDuringSchedulingIgnoredDuringExecution**: This is a hard rule that must be met. * **preferredDuringSchedulingIgnoredDuringExecution**: This is a preference, but the pod can run on a node where it is not guaranteed. +{{% alert title="Note" color="primary" %}} +Preferred affinities on pods can result in more nodes being created than expected because Karpenter will prefer to create new nodes to satisfy preferences, [see the preferences documentation](#preferences) for details. +{{% /alert %}} + The `IgnoredDuringExecution` part of each tells the pod to keep running, even if conditions change on the node so the rules no longer matched. You can think of these concepts as `required` and `preferred`, since Kubernetes never implemented other variants of these rules. @@ -262,13 +279,13 @@ If they all fail, Karpenter will fail to provision the pod. Karpenter will backoff and retry over time. So if capacity becomes available, it will schedule the pod without user intervention. -## Taints and tolerations +### Taints and tolerations Taints are the opposite of affinity. Setting a taint on a node tells the scheduler to not run a pod on it unless the pod has explicitly said it can tolerate that taint. This example shows a NodePool that was set up with a taint for only running pods that require a GPU, such as the following: ```yaml -apiVersion: karpenter.sh/v1beta1 +apiVersion: karpenter.sh/v1 kind: NodePool metadata: name: gpu @@ -309,9 +326,14 @@ spec: ``` See [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) in the Kubernetes documentation for details. -## Topology Spread +### Topology Spread By using the Kubernetes `topologySpreadConstraints` you can ask the NodePool to have pods push away from each other to limit the blast radius of an outage. Think of it as the Kubernetes evolution for pod affinity: it lets you relate pods with respect to nodes while still allowing spread. + +{{% alert title="Note" color="primary" %}} +Preferred topology spread (`ScheduleAnyway`) can result in more nodes being created than expected because Karpenter will prefer to create new nodes to satisfy spread constraints, [see the preferences documentation](#preferences) for details. +{{% /alert %}} + For example: ```yaml @@ -356,9 +378,15 @@ See [Pod Topology Spread Constraints](https://kubernetes.io/docs/concepts/worklo NodePools do not attempt to balance or rebalance the availability zones for their nodes. Availability zone balancing may be achieved by defining zonal Topology Spread Constraints for Pods that require multi-zone durability, and NodePools will respect these constraints while optimizing for compute costs. {{% /alert %}} -## Pod affinity/anti-affinity +### Pod affinity/anti-affinity + +By using the `podAffinity` and `podAntiAffinity` configuration on a pod spec, you can inform the Karpenter scheduler of your desire for pods to schedule together or apart with respect to different topology domains. -By using the `podAffinity` and `podAntiAffinity` configuration on a pod spec, you can inform the Karpenter scheduler of your desire for pods to schedule together or apart with respect to different topology domains. For example: +{{% alert title="Note" color="primary" %}} +Preferred affinities on pods can result in more nodes being created than expected because Karpenter will prefer to create new nodes to satisfy preferences, [see the preferences documentation](#preferences) for details. +{{% /alert %}} + +For example: ```yaml spec: @@ -386,7 +414,7 @@ The anti-affinity rule would cause it to avoid running on any node with a pod la See [Inter-pod affinity and anti-affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity) in the Kubernetes documentation for details. -## Persistent Volume Topology +### Persistent Volume Topology Karpenter automatically detects storage scheduling requirements and includes them in node launch decisions. @@ -454,7 +482,7 @@ If you have purchased a [Savings Plan](https://aws.amazon.com/savingsplans/) or To enable this, you will need to tell the Karpenter controllers which instance types to prioritize and what is the maximum amount of capacity that should be provisioned using those instance types. We can set the `.spec.limits` field on the NodePool to limit the capacity that can be launched by this NodePool. Combined with the `.spec.weight` value, we can tell Karpenter to pull from instance types in the reserved NodePool before defaulting to generic instance types. ```yaml -apiVersion: karpenter.sh/v1beta1 +apiVersion: karpenter.sh/v1 kind: NodePool metadata: name: reserved-instance @@ -469,7 +497,7 @@ spec: operator: In values: ["c4.large"] --- -apiVersion: karpenter.sh/v1beta1 +apiVersion: karpenter.sh/v1 kind: NodePool metadata: name: default @@ -492,7 +520,7 @@ Pods that do not specify node selectors or affinities can potentially be assigne By assigning a higher `.spec.weight` value and restricting a NodePool to a specific capacity type or architecture, we can set default configuration for the nodes launched by pods that don't have node configuration restrictions. ```yaml -apiVersion: karpenter.sh/v1beta1 +apiVersion: karpenter.sh/v1 kind: NodePool metadata: name: default @@ -508,7 +536,7 @@ spec: operator: In values: ["amd64"] --- -apiVersion: karpenter.sh/v1beta1 +apiVersion: karpenter.sh/v1 kind: NodePool metadata: name: arm64-specific @@ -626,7 +654,7 @@ If using Gt/Lt operators, make sure to use values under the actual label values The `Exists` operator can be used on a NodePool to provide workload segregation across nodes. ```yaml -apiVersion: karpenter.sh/v1beta1 +apiVersion: karpenter.sh/v1 kind: NodePool spec: template: @@ -710,7 +738,7 @@ This is not identical to a topology spread with a specified ratio. We are const #### NodePools ```yaml -apiVersion: karpenter.sh/v1beta1 +apiVersion: karpenter.sh/v1 kind: NodePool metadata: name: spot @@ -729,7 +757,7 @@ spec: - "4" - "5" --- -apiVersion: karpenter.sh/v1beta1 +apiVersion: karpenter.sh/v1 kind: NodePool metadata: name: on-demand diff --git a/website/content/en/v0.35/contributing/_index.md b/website/content/en/v1.0/contributing/_index.md similarity index 100% rename from website/content/en/v0.35/contributing/_index.md rename to website/content/en/v1.0/contributing/_index.md diff --git a/website/content/en/v0.35/contributing/design-guide.md b/website/content/en/v1.0/contributing/design-guide.md similarity index 100% rename from website/content/en/v0.35/contributing/design-guide.md rename to website/content/en/v1.0/contributing/design-guide.md diff --git a/website/content/en/v0.35/contributing/development-guide.md b/website/content/en/v1.0/contributing/development-guide.md similarity index 93% rename from website/content/en/v0.35/contributing/development-guide.md rename to website/content/en/v1.0/contributing/development-guide.md index 93796441ea98..02632af9222e 100644 --- a/website/content/en/v0.35/contributing/development-guide.md +++ b/website/content/en/v1.0/contributing/development-guide.md @@ -84,13 +84,13 @@ By default, `make apply` will set the log level to debug. You can change the log OSX: ```bash -open http://localhost:8000/metrics && kubectl port-forward service/karpenter -n karpenter 8000 +open http://localhost:8080/metrics && kubectl port-forward service/karpenter -n kube-system 8080 ``` Linux: ```bash -gio open http://localhost:8000/metrics && kubectl port-forward service/karpenter -n karpenter 8000 +gio open http://localhost:8080/metrics && kubectl port-forward service/karpenter -n karpenter 8080 ``` ### Tailing Logs @@ -143,8 +143,8 @@ go install github.com/google/pprof@latest ### Get a profile ``` # Connect to the metrics endpoint -kubectl port-forward service/karpenter -n karpenter 8000 -open http://localhost:8000/debug/pprof/ +kubectl port-forward service/karpenter -n karpenter 8080 +open http://localhost:8080/debug/pprof/ # Visualize the memory -go tool pprof -http 0.0.0.0:9000 localhost:8000/debug/pprof/heap +go tool pprof -http 0.0.0.0:9000 localhost:8080/debug/pprof/heap ``` diff --git a/website/content/en/v1.0/contributing/documentation-updates.md b/website/content/en/v1.0/contributing/documentation-updates.md new file mode 100644 index 000000000000..0eb9db1e10b3 --- /dev/null +++ b/website/content/en/v1.0/contributing/documentation-updates.md @@ -0,0 +1,11 @@ +--- +title: "Documentation Updates" +linkTitle: "Documentation Updates" +weight: 50 +description: > + Information helpful for contributing simple documentation updates. +--- + +- Documentation for https://karpenter.sh/docs/ is built under website/content/en/preview/. +- Documentation updates should be made to the "preview" directory. Your changes will be promoted to website/content/en/docs/ by an automated process after the change has been merged. +- Previews for your changes are built and available a few minutes after you push. Look for the "netlify Deploy Preview" link in a comment in your PR. diff --git a/website/content/en/v0.35/contributing/working-group.md b/website/content/en/v1.0/contributing/working-group.md similarity index 100% rename from website/content/en/v0.35/contributing/working-group.md rename to website/content/en/v1.0/contributing/working-group.md diff --git a/website/content/en/v0.35/faq.md b/website/content/en/v1.0/faq.md similarity index 91% rename from website/content/en/v0.35/faq.md rename to website/content/en/v1.0/faq.md index e55b519a50e1..2318827a4dfe 100644 --- a/website/content/en/v0.35/faq.md +++ b/website/content/en/v1.0/faq.md @@ -7,6 +7,9 @@ description: > --- ## General +### Is Karpenter safe for production use? +Karpenter v1 is the first stable Karpenter API. Any future incompatible API changes will require a v2 version. + ### How does a NodePool decide to manage a particular node? See [Configuring NodePools]({{< ref "./concepts/#configuring-nodepools" >}}) for information on how Karpenter configures and manages nodes. @@ -14,7 +17,7 @@ See [Configuring NodePools]({{< ref "./concepts/#configuring-nodepools" >}}) for AWS is the first cloud provider supported by Karpenter, although it is designed to be used with other cloud providers as well. ### Can I write my own cloud provider for Karpenter? -Yes, but there is no documentation yet for it. Start with Karpenter's GitHub [cloudprovider](https://github.com/aws/karpenter-core/tree/v0.35.5/pkg/cloudprovider) documentation to see how the AWS provider is built, but there are other sections of the code that will require changes too. +Yes, but there is no documentation yet for it. Start with Karpenter's GitHub [cloudprovider](https://github.com/aws/karpenter-core/tree/v1.0.0/pkg/cloudprovider) documentation to see how the AWS provider is built, but there are other sections of the code that will require changes too. ### What operating system nodes does Karpenter deploy? Karpenter uses the OS defined by the [AMI Family in your EC2NodeClass]({{< ref "./concepts/nodeclasses#specamifamily" >}}). @@ -26,7 +29,7 @@ Karpenter has multiple mechanisms for configuring the [operating system]({{< ref Karpenter is flexible to multi-architecture configurations using [well known labels]({{< ref "./concepts/scheduling/#supported-labels">}}). ### What RBAC access is required? -All the required RBAC rules can be found in the Helm chart template. See [clusterrole-core.yaml](https://github.com/aws/karpenter/blob/v0.35.5/charts/karpenter/templates/clusterrole-core.yaml), [clusterrole.yaml](https://github.com/aws/karpenter/blob/v0.35.5/charts/karpenter/templates/clusterrole.yaml), [rolebinding.yaml](https://github.com/aws/karpenter/blob/v0.35.5/charts/karpenter/templates/rolebinding.yaml), and [role.yaml](https://github.com/aws/karpenter/blob/v0.35.5/charts/karpenter/templates/role.yaml) files for details. +All the required RBAC rules can be found in the Helm chart template. See [clusterrole-core.yaml](https://github.com/aws/karpenter/blob/v1.0.0/charts/karpenter/templates/clusterrole-core.yaml), [clusterrole.yaml](https://github.com/aws/karpenter/blob/v1.0.0/charts/karpenter/templates/clusterrole.yaml), [rolebinding.yaml](https://github.com/aws/karpenter/blob/v1.0.0/charts/karpenter/templates/rolebinding.yaml), and [role.yaml](https://github.com/aws/karpenter/blob/v1.0.0/charts/karpenter/templates/role.yaml) files for details. ### Can I run Karpenter outside of a Kubernetes cluster? Yes, as long as the controller has network and IAM/RBAC access to the Kubernetes API and your provider API. @@ -119,7 +122,7 @@ Karpenter has a concept of an “offering” for each instance type, which is a Yes! Karpenter dynamically discovers if you are running in an IPv6 cluster by checking the kube-dns service's cluster-ip. When using an AMI Family such as `AL2`, Karpenter will automatically configure the EKS Bootstrap script for IPv6. Some EC2 instance types do not support IPv6 and the Amazon VPC CNI only supports instance types that run on the Nitro hypervisor. It's best to add a requirement to your NodePool to only allow Nitro instance types: ``` -apiVersion: karpenter.sh/v1beta1 +apiVersion: karpenter.sh/v1 kind: NodePool ... spec: @@ -138,6 +141,25 @@ For more documentation on enabling IPv6 with the Amazon VPC CNI, see the [docs]( Windows nodes do not support IPv6. {{% /alert %}} +### Why do I see extra nodes get launched to schedule pending pods that remain empty and are later removed? + +You might have a daemonset, userData configuration, or some other workload that applies a taint after a node is provisioned. After the taint is applied, Karpenter will detect that the pod cannot be scheduled to this new node due to the added taint. As a result, Karpenter will provision yet another node. Typically, the original node has the taint removed and the pod schedules to it, leaving the extra new node unused and reaped by emptiness/consolidation. If the taint is not removed quickly enough, Karpenter may remove the original node before the pod can be scheduled via emptiness consolidation. This could result in an infinite loop of nodes being provisioned and consolidated without the pending pod ever scheduling. + +The solution is to configure [startupTaints]({{}}) to make Karpenter aware of any temporary taints that are needed to ensure that pods do not schedule on nodes that are not yet ready to receive them. + +Here's an example for Cilium's startup taint. +``` +apiVersion: karpenter.sh/v1 +kind: NodePool +... +spec: + template: + spec: + startupTaints: + - key: node.cilium.io/agent-not-ready + effect: NoSchedule +``` + ## Scheduling ### When using preferred scheduling constraints, Karpenter launches the correct number of nodes at first. Why do they then sometimes get consolidated immediately? @@ -179,10 +201,10 @@ Yes, see the [KubeletConfiguration Section in the NodePool docs]({{ To get started with Karpenter, the [Getting Started with Karpenter]({{< relref "getting-started-with-karpenter" >}}) guide provides an end-to-end procedure for creating a cluster (with `eksctl`) and adding Karpenter. + +See the [AKS Node autoprovisioning article](https://learn.microsoft.com/azure/aks/node-autoprovision) on how to use Karpenter on Azure's AKS or go to the [Karpenter provider for Azure open source repository](https://github.com/Azure/karpenter-provider-azure) for self-hosting on Azure and additional information. + If you prefer, the following instructions use Terraform to create a cluster and add Karpenter: * [Amazon EKS Blueprints for Terraform](https://aws-ia.github.io/terraform-aws-eks-blueprints): Follow a basic [Getting Started](https://aws-ia.github.io/terraform-aws-eks-blueprints/getting-started/) guide and also add modules and add-ons. This includes a [Karpenter](https://aws-ia.github.io/terraform-aws-eks-blueprints/patterns/karpenter/) add-on that lets you bypass the instructions in this guide for setting up Karpenter. diff --git a/website/content/en/v0.35/getting-started/getting-started-with-karpenter/_index.md b/website/content/en/v1.0/getting-started/getting-started-with-karpenter/_index.md similarity index 90% rename from website/content/en/v0.35/getting-started/getting-started-with-karpenter/_index.md rename to website/content/en/v1.0/getting-started/getting-started-with-karpenter/_index.md index 41a165b7e414..14879bbe720e 100644 --- a/website/content/en/v0.35/getting-started/getting-started-with-karpenter/_index.md +++ b/website/content/en/v1.0/getting-started/getting-started-with-karpenter/_index.md @@ -11,7 +11,10 @@ Karpenter automatically provisions new nodes in response to unschedulable pods. This guide shows how to get started with Karpenter by creating a Kubernetes cluster and installing Karpenter. To use Karpenter, you must be running a supported Kubernetes cluster on a supported cloud provider. -Currently, only EKS on AWS is supported. + +The guide below explains how to utilize the [Karpenter provider for AWS](https://github.com/aws/karpenter-provider-aws) with EKS. + +See the [AKS Node autoprovisioning article](https://learn.microsoft.com/azure/aks/node-autoprovision) on how to use Karpenter on Azure's AKS or go to the [Karpenter provider for Azure open source repository](https://github.com/Azure/karpenter-provider-azure) for self-hosting on Azure and additional information. ## Create a cluster and add Karpenter @@ -32,7 +35,7 @@ Install these tools before proceeding: 1. [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2-linux.html) 2. `kubectl` - [the Kubernetes CLI](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/) -3. `eksctl` (>= v0.169.0) - [the CLI for AWS EKS](https://docs.aws.amazon.com/eks/latest/userguide/eksctl.html) +3. `eksctl` (>= v0.180.0) - [the CLI for AWS EKS](https://docs.aws.amazon.com/eks/latest/userguide/eksctl.html) 4. `helm` - [the package manager for Kubernetes](https://helm.sh/docs/intro/install/) [Configure the AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-quickstart.html) @@ -45,8 +48,8 @@ After setting up the tools, set the Karpenter and Kubernetes version: ```bash export KARPENTER_NAMESPACE="kube-system" -export KARPENTER_VERSION="0.35.5" -export K8S_VERSION="1.29" +export KARPENTER_VERSION="1.0.0" +export K8S_VERSION="1.30" ``` Then set the following environment variable: @@ -109,8 +112,20 @@ See [Enabling Windows support](https://docs.aws.amazon.com/eks/latest/userguide/ {{% /tab %}} {{< /tabpane >}} +As the OCI Helm chart is signed by [Cosign](https://github.com/sigstore/cosign) as part of the release process you can verify the chart before installing it by running the following command. + +```bash +cosign verify public.ecr.aws/karpenter/karpenter:1.0.0 \ + --certificate-oidc-issuer=https://token.actions.githubusercontent.com \ + --certificate-identity-regexp='https://github\.com/aws/karpenter-provider-aws/\.github/workflows/release\.yaml@.+' \ + --certificate-github-workflow-repository=aws/karpenter-provider-aws \ + --certificate-github-workflow-name=Release \ + --certificate-github-workflow-ref=refs/tags/v1.0.0 \ + --annotations version=1.0.0 +``` + {{% alert title="DNS Policy Notice" color="warning" %}} -Karpenter uses the `ClusterFirst` pod DNS policy by default. This is the Kubernetes cluster default and this ensures that Karpetner can reach-out to internal Kubernetes services during its lifetime. There may be cases where you do not have the DNS service that you are using on your cluster up-and-running before Karpenter starts up. The most common case of this is you want Karpenter to manage the node capacity where your DNS service pods are running. +Karpenter uses the `ClusterFirst` pod DNS policy by default. This is the Kubernetes cluster default and this ensures that Karpenter can reach-out to internal Kubernetes services during its lifetime. There may be cases where you do not have the DNS service that you are using on your cluster up-and-running before Karpenter starts up. The most common case of this is you want Karpenter to manage the node capacity where your DNS service pods are running. If you need Karpenter to manage the DNS service pods' capacity, this means that DNS won't be running when Karpenter starts-up. In this case, you will need to set the pod DNS policy to `Default` with `--set dnsPolicy=Default`. This will tell Karpenter to use the host's DNS resolution instead of the internal DNS resolution, ensuring that you don't have a dependency on the DNS service pods to run. More details on this issue can be found in the following Github issues: [#2186](https://github.com/aws/karpenter-provider-aws/issues/2186) and [#4947](https://github.com/aws/karpenter-provider-aws/issues/4947). {{% /alert %}} @@ -139,7 +154,7 @@ A single Karpenter NodePool is capable of handling many different pod shapes. Ka Create a default NodePool using the command below. This NodePool uses `securityGroupSelectorTerms` and `subnetSelectorTerms` to discover resources used to launch nodes. We applied the tag `karpenter.sh/discovery` in the `eksctl` command above. Depending on how these resources are shared between clusters, you may need to use different tagging schemes. -The `consolidationPolicy` set to `WhenUnderutilized` in the `disruption` block configures Karpenter to reduce cluster cost by removing and replacing nodes. As a result, consolidation will terminate any empty nodes on the cluster. This behavior can be disabled by setting `consolidateAfter` to `Never`, telling Karpenter that it should never consolidate nodes. Review the [NodePool API docs]({{}}) for more information. +The `consolidationPolicy` set to `WhenEmptyOrUnderutilized` in the `disruption` block configures Karpenter to reduce cluster cost by removing and replacing nodes. As a result, consolidation will terminate any empty nodes on the cluster. This behavior can be disabled by setting `consolidateAfter` to `Never`, telling Karpenter that it should never consolidate nodes. Review the [NodePool API docs]({{}}) for more information. Note: This NodePool will create capacity as long as the sum of all created capacity is less than the specified limit. diff --git a/website/content/en/v0.35/getting-started/getting-started-with-karpenter/cloudformation.yaml b/website/content/en/v1.0/getting-started/getting-started-with-karpenter/cloudformation.yaml similarity index 96% rename from website/content/en/v0.35/getting-started/getting-started-with-karpenter/cloudformation.yaml rename to website/content/en/v1.0/getting-started/getting-started-with-karpenter/cloudformation.yaml index 1878cd6d352a..567808be5830 100644 --- a/website/content/en/v0.35/getting-started/getting-started-with-karpenter/cloudformation.yaml +++ b/website/content/en/v1.0/getting-started/getting-started-with-karpenter/cloudformation.yaml @@ -83,7 +83,8 @@ Resources: ], "Condition": { "StringEquals": { - "aws:RequestTag/kubernetes.io/cluster/${ClusterName}": "owned" + "aws:RequestTag/kubernetes.io/cluster/${ClusterName}": "owned", + "aws:RequestTag/eks:eks-cluster-name": "${ClusterName}" }, "StringLike": { "aws:RequestTag/karpenter.sh/nodepool": "*" @@ -105,6 +106,7 @@ Resources: "Condition": { "StringEquals": { "aws:RequestTag/kubernetes.io/cluster/${ClusterName}": "owned", + "aws:RequestTag/eks:eks-cluster-name": "${ClusterName}", "ec2:CreateAction": [ "RunInstances", "CreateFleet", @@ -128,8 +130,12 @@ Resources: "StringLike": { "aws:ResourceTag/karpenter.sh/nodepool": "*" }, + "StringEqualsIfExists": { + "aws:RequestTag/eks:eks-cluster-name": "${ClusterName}" + }, "ForAllValues:StringEquals": { "aws:TagKeys": [ + "eks:eks-cluster-name", "karpenter.sh/nodeclaim", "Name" ] @@ -220,6 +226,7 @@ Resources: "Condition": { "StringEquals": { "aws:RequestTag/kubernetes.io/cluster/${ClusterName}": "owned", + "aws:RequestTag/eks:eks-cluster-name": "${ClusterName}", "aws:RequestTag/topology.kubernetes.io/region": "${AWS::Region}" }, "StringLike": { @@ -239,6 +246,7 @@ Resources: "aws:ResourceTag/kubernetes.io/cluster/${ClusterName}": "owned", "aws:ResourceTag/topology.kubernetes.io/region": "${AWS::Region}", "aws:RequestTag/kubernetes.io/cluster/${ClusterName}": "owned", + "aws:RequestTag/eks:eks-cluster-name": "${ClusterName}", "aws:RequestTag/topology.kubernetes.io/region": "${AWS::Region}" }, "StringLike": { diff --git a/website/content/en/v0.35/getting-started/getting-started-with-karpenter/grafana-values.yaml b/website/content/en/v1.0/getting-started/getting-started-with-karpenter/grafana-values.yaml similarity index 68% rename from website/content/en/v0.35/getting-started/getting-started-with-karpenter/grafana-values.yaml rename to website/content/en/v1.0/getting-started/getting-started-with-karpenter/grafana-values.yaml index 67d28f71217c..0b301d9a20ea 100644 --- a/website/content/en/v0.35/getting-started/getting-started-with-karpenter/grafana-values.yaml +++ b/website/content/en/v1.0/getting-started/getting-started-with-karpenter/grafana-values.yaml @@ -22,6 +22,6 @@ dashboardProviders: dashboards: default: capacity-dashboard: - url: https://karpenter.sh/v0.35/getting-started/getting-started-with-karpenter/karpenter-capacity-dashboard.json + url: https://karpenter.sh/v1.0/getting-started/getting-started-with-karpenter/karpenter-capacity-dashboard.json performance-dashboard: - url: https://karpenter.sh/v0.35/getting-started/getting-started-with-karpenter/karpenter-performance-dashboard.json + url: https://karpenter.sh/v1.0/getting-started/getting-started-with-karpenter/karpenter-performance-dashboard.json diff --git a/website/content/en/v0.35/getting-started/getting-started-with-karpenter/karpenter-capacity-dashboard.json b/website/content/en/v1.0/getting-started/getting-started-with-karpenter/karpenter-capacity-dashboard.json similarity index 93% rename from website/content/en/v0.35/getting-started/getting-started-with-karpenter/karpenter-capacity-dashboard.json rename to website/content/en/v1.0/getting-started/getting-started-with-karpenter/karpenter-capacity-dashboard.json index 7f93053b3206..e85e582de299 100644 --- a/website/content/en/v0.35/getting-started/getting-started-with-karpenter/karpenter-capacity-dashboard.json +++ b/website/content/en/v1.0/getting-started/getting-started-with-karpenter/karpenter-capacity-dashboard.json @@ -115,7 +115,7 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "sum by(cluster,nodepool) (karpenter_nodes_created{nodepool=~\"$nodepool\"})", + "expr": "sum by(cluster,nodepool) (karpenter_nodes_created_total{nodepool=~\"$nodepool\"})", "format": "time_series", "legendFormat": "{{cluster}}", "range": true, @@ -215,7 +215,7 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "sum by(cluster,nodepool) (karpenter_nodes_terminated{nodepool=~\"$nodepool\"})", + "expr": "sum by(cluster,nodepool) (karpenter_nodes_terminated_total{nodepool=~\"$nodepool\"})", "format": "time_series", "legendFormat": "{{cluster}}", "range": true, @@ -408,7 +408,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "sum by(action,consolidation_type,method)(karpenter_disruption_actions_performed_total)", + "expr": "sum by(action,consolidation_type,method)(karpenter_disruption_decisions_total)", "legendFormat": "{{label_name}}", "range": true, "refId": "A" @@ -417,102 +417,6 @@ "title": "Disruption Actions Performed", "type": "timeseries" }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "See: https://karpenter.sh/v0.35/concepts/disruption/#automated-methods", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 10, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "normal" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 6, - "w": 24, - "x": 0, - "y": 22 - }, - "id": 17, - "options": { - "legend": { - "calcs": [ - "last" - ], - "displayMode": "table", - "placement": "right", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "expr": "sum by(action,consolidation_type,method)(karpenter_disruption_nodes_disrupted_total{nodepool=~\"$nodepool\"})", - "legendFormat": "{{label_name}}", - "range": true, - "refId": "A" - } - ], - "title": "Voluntary Node Disruptions: nodepool \"$nodepool\"", - "type": "timeseries" - }, { "datasource": { "type": "prometheus", @@ -1609,7 +1513,7 @@ "type": "prometheus", "uid": "prometheus" }, - "definition": "label_values(karpenter_disruption_actions_performed_total,method)", + "definition": "label_values(karpenter_disruption_decisions_total,method)", "hide": 0, "includeAll": true, "multi": true, @@ -1617,7 +1521,7 @@ "options": [], "query": { "qryType": 1, - "query": "label_values(karpenter_disruption_actions_performed_total,method)", + "query": "label_values(karpenter_disruption_decisions_total,method)", "refId": "PrometheusVariableQueryEditor-VariableQuery" }, "refresh": 2, diff --git a/website/content/en/v0.35/getting-started/getting-started-with-karpenter/karpenter-controllers-allocation.json b/website/content/en/v1.0/getting-started/getting-started-with-karpenter/karpenter-controllers-allocation.json similarity index 100% rename from website/content/en/v0.35/getting-started/getting-started-with-karpenter/karpenter-controllers-allocation.json rename to website/content/en/v1.0/getting-started/getting-started-with-karpenter/karpenter-controllers-allocation.json diff --git a/website/content/en/v0.35/getting-started/getting-started-with-karpenter/karpenter-controllers.json b/website/content/en/v1.0/getting-started/getting-started-with-karpenter/karpenter-controllers.json similarity index 100% rename from website/content/en/v0.35/getting-started/getting-started-with-karpenter/karpenter-controllers.json rename to website/content/en/v1.0/getting-started/getting-started-with-karpenter/karpenter-controllers.json diff --git a/website/content/en/v0.35/getting-started/getting-started-with-karpenter/karpenter-performance-dashboard.json b/website/content/en/v1.0/getting-started/getting-started-with-karpenter/karpenter-performance-dashboard.json similarity index 100% rename from website/content/en/v0.35/getting-started/getting-started-with-karpenter/karpenter-performance-dashboard.json rename to website/content/en/v1.0/getting-started/getting-started-with-karpenter/karpenter-performance-dashboard.json diff --git a/website/content/en/v0.35/getting-started/getting-started-with-karpenter/prometheus-values.yaml b/website/content/en/v1.0/getting-started/getting-started-with-karpenter/prometheus-values.yaml similarity index 100% rename from website/content/en/v0.35/getting-started/getting-started-with-karpenter/prometheus-values.yaml rename to website/content/en/v1.0/getting-started/getting-started-with-karpenter/prometheus-values.yaml diff --git a/website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step01-config.sh b/website/content/en/v1.0/getting-started/getting-started-with-karpenter/scripts/step01-config.sh similarity index 100% rename from website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step01-config.sh rename to website/content/en/v1.0/getting-started/getting-started-with-karpenter/scripts/step01-config.sh diff --git a/website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster-fargate.sh b/website/content/en/v1.0/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster-fargate.sh similarity index 91% rename from website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster-fargate.sh rename to website/content/en/v1.0/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster-fargate.sh index fa577d724e9f..07e9e5add716 100755 --- a/website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster-fargate.sh +++ b/website/content/en/v1.0/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster-fargate.sh @@ -1,4 +1,4 @@ -curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/v"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml > $TEMPOUT \ +curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/v1.0.0/website/content/en/v1.0/getting-started/getting-started-with-karpenter/cloudformation.yaml > $TEMPOUT \ && aws cloudformation deploy \ --stack-name "Karpenter-${CLUSTER_NAME}" \ --template-file "${TEMPOUT}" \ diff --git a/website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster.sh b/website/content/en/v1.0/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster.sh similarity index 92% rename from website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster.sh rename to website/content/en/v1.0/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster.sh index 59c32bcb7a0c..47c290b4fdb3 100755 --- a/website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster.sh +++ b/website/content/en/v1.0/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster.sh @@ -1,4 +1,4 @@ -curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/v"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml > $TEMPOUT \ +curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/v1.0.0/website/content/en/v1.0/getting-started/getting-started-with-karpenter/cloudformation.yaml > "${TEMPOUT}" \ && aws cloudformation deploy \ --stack-name "Karpenter-${CLUSTER_NAME}" \ --template-file "${TEMPOUT}" \ diff --git a/website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step03-iam-cloud-formation.sh b/website/content/en/v1.0/getting-started/getting-started-with-karpenter/scripts/step03-iam-cloud-formation.sh similarity index 66% rename from website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step03-iam-cloud-formation.sh rename to website/content/en/v1.0/getting-started/getting-started-with-karpenter/scripts/step03-iam-cloud-formation.sh index 54e826db269b..169e5f1902bc 100755 --- a/website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step03-iam-cloud-formation.sh +++ b/website/content/en/v1.0/getting-started/getting-started-with-karpenter/scripts/step03-iam-cloud-formation.sh @@ -1,6 +1,6 @@ TEMPOUT="$(mktemp)" -curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/v"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml > "${TEMPOUT}" \ +curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/v1.0.0/website/content/en/v1.0/getting-started/getting-started-with-karpenter/cloudformation.yaml > "${TEMPOUT}" \ && aws cloudformation deploy \ --stack-name "Karpenter-${CLUSTER_NAME}" \ --template-file "${TEMPOUT}" \ diff --git a/website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step04-grant-access.sh b/website/content/en/v1.0/getting-started/getting-started-with-karpenter/scripts/step04-grant-access.sh similarity index 100% rename from website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step04-grant-access.sh rename to website/content/en/v1.0/getting-started/getting-started-with-karpenter/scripts/step04-grant-access.sh diff --git a/website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step05-controller-iam.sh b/website/content/en/v1.0/getting-started/getting-started-with-karpenter/scripts/step05-controller-iam.sh similarity index 100% rename from website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step05-controller-iam.sh rename to website/content/en/v1.0/getting-started/getting-started-with-karpenter/scripts/step05-controller-iam.sh diff --git a/website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step06-add-spot-role.sh b/website/content/en/v1.0/getting-started/getting-started-with-karpenter/scripts/step06-add-spot-role.sh similarity index 100% rename from website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step06-add-spot-role.sh rename to website/content/en/v1.0/getting-started/getting-started-with-karpenter/scripts/step06-add-spot-role.sh diff --git a/website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step08-apply-helm-chart-fargate.sh b/website/content/en/v1.0/getting-started/getting-started-with-karpenter/scripts/step08-apply-helm-chart-fargate.sh similarity index 100% rename from website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step08-apply-helm-chart-fargate.sh rename to website/content/en/v1.0/getting-started/getting-started-with-karpenter/scripts/step08-apply-helm-chart-fargate.sh diff --git a/website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step08-apply-helm-chart.sh b/website/content/en/v1.0/getting-started/getting-started-with-karpenter/scripts/step08-apply-helm-chart.sh similarity index 100% rename from website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step08-apply-helm-chart.sh rename to website/content/en/v1.0/getting-started/getting-started-with-karpenter/scripts/step08-apply-helm-chart.sh diff --git a/website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step09-add-prometheus-grafana.sh b/website/content/en/v1.0/getting-started/getting-started-with-karpenter/scripts/step09-add-prometheus-grafana.sh similarity index 100% rename from website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step09-add-prometheus-grafana.sh rename to website/content/en/v1.0/getting-started/getting-started-with-karpenter/scripts/step09-add-prometheus-grafana.sh diff --git a/website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step10-add-grafana-port-forward.sh b/website/content/en/v1.0/getting-started/getting-started-with-karpenter/scripts/step10-add-grafana-port-forward.sh similarity index 100% rename from website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step10-add-grafana-port-forward.sh rename to website/content/en/v1.0/getting-started/getting-started-with-karpenter/scripts/step10-add-grafana-port-forward.sh diff --git a/website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step11-grafana-get-password.sh b/website/content/en/v1.0/getting-started/getting-started-with-karpenter/scripts/step11-grafana-get-password.sh similarity index 100% rename from website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step11-grafana-get-password.sh rename to website/content/en/v1.0/getting-started/getting-started-with-karpenter/scripts/step11-grafana-get-password.sh diff --git a/website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step12-add-nodepool.sh b/website/content/en/v1.0/getting-started/getting-started-with-karpenter/scripts/step12-add-nodepool.sh similarity index 87% rename from website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step12-add-nodepool.sh rename to website/content/en/v1.0/getting-started/getting-started-with-karpenter/scripts/step12-add-nodepool.sh index 33f1cb553b1b..85213a3457c3 100755 --- a/website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step12-add-nodepool.sh +++ b/website/content/en/v1.0/getting-started/getting-started-with-karpenter/scripts/step12-add-nodepool.sh @@ -1,5 +1,5 @@ cat < cloudformation.yaml ``` @@ -162,7 +162,7 @@ For `RunInstances` and `CreateFleet` actions, the Karpenter controller can read The AllowScopedEC2InstanceActionsWithTags Sid allows the [RunInstances](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html), [CreateFleet](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet.html), and [CreateLaunchTemplate](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateLaunchTemplate.html) -actions requested by the Karpenter controller to create all `fleet`, `instance`, `volume`, `network-interface`, `launch-template` or `spot-instances-request` EC2 resources (for the partition and region), and requires that the `kubernetes.io/cluster/${ClusterName}` tag be set to `owned` and a `karpenter.sh/nodepool` tag be set to any value. This ensures that Karpenter is only allowed to create instances for a single EKS cluster. +actions requested by the Karpenter controller to create all `fleet`, `instance`, `volume`, `network-interface`, `launch-template` or `spot-instances-request` EC2 resources (for the partition and region). It also requires that the `kubernetes.io/cluster/${ClusterName}` tag be set to `owned`, `aws:RequestTag/eks:eks-cluster-name` be set to `"${ClusterName}`, and a `karpenter.sh/nodepool` tag be set to any value. This ensures that Karpenter is only allowed to create instances for a single EKS cluster. ```json { @@ -184,6 +184,7 @@ actions requested by the Karpenter controller to create all `fleet`, `instance`, "Condition": { "StringEquals": { "aws:RequestTag/kubernetes.io/cluster/${ClusterName}": "owned" + "aws:RequestTag/eks:eks-cluster-name": "${ClusterName}" }, "StringLike": { "aws:RequestTag/karpenter.sh/nodepool": "*" @@ -196,6 +197,7 @@ actions requested by the Karpenter controller to create all `fleet`, `instance`, The AllowScopedResourceCreationTagging Sid allows EC2 [CreateTags](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateTags.html) actions on `fleet`, `instance`, `volume`, `network-interface`, `launch-template` and `spot-instances-request` resources, While making `RunInstance`, `CreateFleet`, or `CreateLaunchTemplate` calls. Additionally, this ensures that resources can't be tagged arbitrarily by Karpenter after they are created. +Conditions that must be met include that `aws:RequestTag/kubernetes.io/cluster/${ClusterName}` be set to `owned` and `aws:RequestTag/eks:eks-cluster-name` be set to `${ClusterName}`. ```json { @@ -213,6 +215,7 @@ actions on `fleet`, `instance`, `volume`, `network-interface`, `launch-template` "Condition": { "StringEquals": { "aws:RequestTag/kubernetes.io/cluster/${ClusterName}": "owned", + "aws:RequestTag/eks:eks-cluster-name": "${ClusterName}" "ec2:CreateAction": [ "RunInstances", "CreateFleet", @@ -229,6 +232,7 @@ actions on `fleet`, `instance`, `volume`, `network-interface`, `launch-template` #### AllowScopedResourceTagging The AllowScopedResourceTagging Sid allows EC2 [CreateTags](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateTags.html) actions on all instances created by Karpenter after their creation. It enforces that Karpenter is only able to update the tags on cluster instances it is operating on through the `kubernetes.io/cluster/${ClusterName}`" and `karpenter.sh/nodepool` tags. +Likewise, `RequestTag/eks:eks-cluster-name` must be set to `${ClusterName}`, if it exists, and `TagKeys` must equal `eks:eks-cluster-name`, `karpenter.sh/nodeclaim`, and `Name`, for all values. ```json { "Sid": "AllowScopedResourceTagging", @@ -242,8 +246,12 @@ The AllowScopedResourceTagging Sid allows EC2 [CreateTags](https://docs.aws.amaz "StringLike": { "aws:ResourceTag/karpenter.sh/nodepool": "*" }, + "StringEqualsIfExists": { + "aws:RequestTag/eks:eks-cluster-name": "${ClusterName}" + }, "ForAllValues:StringEquals": { "aws:TagKeys": [ + "eks:eks-cluster-name", "karpenter.sh/nodeclaim", "Name" ] @@ -310,7 +318,7 @@ This allows the Karpenter controller to do any of those read-only actions across #### AllowSSMReadActions -The AllowSSMReadActions Sid allows the Karpenter controller to read SSM parameters (`ssm:GetParameter`) from the current region for SSM parameters generated by ASW services. +The AllowSSMReadActions Sid allows the Karpenter controller to get SSM parameters (`ssm:GetParameter`) from the current region for SSM parameters generated by AWS services. **NOTE**: If potentially sensitive information is stored in SSM parameters, you could consider restricting access to these messages further. ```json @@ -376,7 +384,7 @@ This gives EC2 permission explicit permission to use the `KarpenterNodeRole-${Cl #### AllowScopedInstanceProfileCreationActions The AllowScopedInstanceProfileCreationActions Sid gives the Karpenter controller permission to create a new instance profile with [`iam:CreateInstanceProfile`](https://docs.aws.amazon.com/IAM/latest/APIReference/API_CreateInstanceProfile.html), -provided that the request is made to a cluster with `kubernetes.io/cluster/${ClusterName}` set to owned and is made in the current region. +provided that the request is made to a cluster with `RequestTag` `kubernetes.io/cluster/${ClusterName}` set to `owned`, the `eks:eks-cluster-name` set to `${ClusterName}`, and `topology.kubernetes.io/region` set to the current region. Also, `karpenter.k8s.aws/ec2nodeclass` must be set to some value. This ensures that Karpenter can generate instance profiles on your behalf based on roles specified in your `EC2NodeClasses` that you use to configure Karpenter. ```json @@ -390,6 +398,7 @@ Also, `karpenter.k8s.aws/ec2nodeclass` must be set to some value. This ensures t "Condition": { "StringEquals": { "aws:RequestTag/kubernetes.io/cluster/${ClusterName}": "owned", + "aws:RequestTag/eks:eks-cluster-name": "${ClusterName}", "aws:RequestTag/topology.kubernetes.io/region": "${AWS::Region}" }, "StringLike": { @@ -401,8 +410,8 @@ Also, `karpenter.k8s.aws/ec2nodeclass` must be set to some value. This ensures t #### AllowScopedInstanceProfileTagActions -The AllowScopedInstanceProfileTagActions Sid gives the Karpenter controller permission to tag an instance profile with [`iam:TagInstanceProfile`](https://docs.aws.amazon.com/IAM/latest/APIReference/API_TagInstanceProfile.html), based on the values shown below, -Also, `karpenter.k8s.aws/ec2nodeclass` must be set to some value. This ensures that Karpenter is only able to act on instance profiles that it provisions for this cluster. +The AllowScopedInstanceProfileTagActions Sid gives the Karpenter controller permission to tag an instance profile with [`iam:TagInstanceProfile`](https://docs.aws.amazon.com/IAM/latest/APIReference/API_TagInstanceProfile.html), provided that `ResourceTag` attributes `kubernetes.io/cluster/${ClusterName}` is set to `owned` and `topology.kubernetes.io/region` is set to the current region and `RequestTag` attributes `kubernetes.io/cluster/${ClusterName}` is set to `owned`, `eks:eks-cluster-name` is set to `${ClusterName}`, and `topology.kubernetes.io/region` is set to the current region. +Also, `ResourceTag/karpenter.k8s.aws/ec2nodeclass` and `RequestTag/karpenter.k8s.aws/ec2nodeclass` must be set to some value. This ensures that Karpenter is only able to act on instance profiles that it provisions for this cluster. ```json { @@ -417,6 +426,7 @@ Also, `karpenter.k8s.aws/ec2nodeclass` must be set to some value. This ensures t "aws:ResourceTag/kubernetes.io/cluster/${ClusterName}": "owned", "aws:ResourceTag/topology.kubernetes.io/region": "${AWS::Region}", "aws:RequestTag/kubernetes.io/cluster/${ClusterName}": "owned", + "aws:RequestTag/eks:eks-cluster-name": "${ClusterName}", "aws:RequestTag/topology.kubernetes.io/region": "${AWS::Region}" }, "StringLike": { @@ -456,15 +466,15 @@ Also, `karpenter.k8s.aws/ec2nodeclass` must be set to some value. This permissio } ``` -#### AllowInstanceProfileActions +#### AllowInstanceProfileReadActions -The AllowInstanceProfileActions Sid gives the Karpenter controller permission to perform [`iam:GetInstanceProfile`](https://docs.aws.amazon.com/IAM/latest/APIReference/API_GetInstanceProfile.html) actions to retrieve information about a specified instance profile, including understanding if an instance profile has been provisioned for an `EC2NodeClass` or needs to be re-provisioned. +The AllowInstanceProfileReadActions Sid gives the Karpenter controller permission to perform [`iam:GetInstanceProfile`](https://docs.aws.amazon.com/IAM/latest/APIReference/API_GetInstanceProfile.html) actions to retrieve information about a specified instance profile, including understanding if an instance profile has been provisioned for an `EC2NodeClass` or needs to be re-provisioned. ```json { "Sid": "AllowInstanceProfileReadActions", "Effect": "Allow", - "Resource": "arn:aws:iam::${AWS::AccountId}:instance-profile/*", + "Resource": "arn:${AWS::Partition}:iam::${AWS::AccountId}:instance-profile/*", "Action": "iam:GetInstanceProfile" } ``` diff --git a/website/content/en/v0.35/reference/instance-types.md b/website/content/en/v1.0/reference/instance-types.md similarity index 86% rename from website/content/en/v0.35/reference/instance-types.md rename to website/content/en/v1.0/reference/instance-types.md index f93fc2c75a81..29dd6b22a6e4 100644 --- a/website/content/en/v0.35/reference/instance-types.md +++ b/website/content/en/v1.0/reference/instance-types.md @@ -18,6 +18,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|a| |karpenter.k8s.aws/instance-cpu|1| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|3500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|a1| |karpenter.k8s.aws/instance-generation|1| @@ -42,6 +44,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|a| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|3500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|a1| |karpenter.k8s.aws/instance-generation|1| @@ -66,6 +70,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|a| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|3500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|a1| |karpenter.k8s.aws/instance-generation|1| @@ -90,6 +96,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|a| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|3500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|a1| |karpenter.k8s.aws/instance-generation|1| @@ -114,6 +122,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|a| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|3500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|a1| |karpenter.k8s.aws/instance-generation|1| @@ -138,6 +148,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|a| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|3500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|a1| |karpenter.k8s.aws/instance-generation|1| @@ -163,6 +175,7 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|c1| |karpenter.k8s.aws/instance-generation|1| @@ -185,6 +198,7 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|c1| |karpenter.k8s.aws/instance-generation|1| @@ -208,6 +222,7 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|c3| |karpenter.k8s.aws/instance-generation|3| @@ -230,6 +245,7 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|c3| |karpenter.k8s.aws/instance-generation|3| @@ -252,6 +268,7 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|c3| |karpenter.k8s.aws/instance-generation|3| @@ -274,6 +291,7 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|c3| |karpenter.k8s.aws/instance-generation|3| @@ -296,6 +314,7 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|c3| |karpenter.k8s.aws/instance-generation|3| @@ -320,6 +339,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|c4| |karpenter.k8s.aws/instance-generation|4| @@ -342,6 +363,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|c4| |karpenter.k8s.aws/instance-generation|4| @@ -364,6 +387,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|1000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|c4| |karpenter.k8s.aws/instance-generation|4| @@ -386,6 +411,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|2000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|c4| |karpenter.k8s.aws/instance-generation|4| @@ -408,6 +435,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|36| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|c4| |karpenter.k8s.aws/instance-generation|4| @@ -432,6 +461,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|c5| |karpenter.k8s.aws/instance-generation|5| @@ -456,6 +487,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|c5| |karpenter.k8s.aws/instance-generation|5| @@ -480,6 +513,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|c5| |karpenter.k8s.aws/instance-generation|5| @@ -504,6 +539,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|c5| |karpenter.k8s.aws/instance-generation|5| @@ -528,6 +565,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|36| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|9500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|c5| |karpenter.k8s.aws/instance-generation|5| @@ -552,6 +591,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|9500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|c5| |karpenter.k8s.aws/instance-generation|5| @@ -576,6 +617,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|72| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|c5| |karpenter.k8s.aws/instance-generation|5| @@ -600,6 +643,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|c5| |karpenter.k8s.aws/instance-generation|5| @@ -624,6 +669,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|c5| |karpenter.k8s.aws/instance-generation|5| @@ -649,6 +696,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|3170| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c5a| |karpenter.k8s.aws/instance-generation|5| @@ -673,6 +722,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|3170| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c5a| |karpenter.k8s.aws/instance-generation|5| @@ -697,6 +748,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|3170| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c5a| |karpenter.k8s.aws/instance-generation|5| @@ -721,6 +774,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|3170| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c5a| |karpenter.k8s.aws/instance-generation|5| @@ -745,6 +800,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|3170| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c5a| |karpenter.k8s.aws/instance-generation|5| @@ -769,6 +826,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c5a| |karpenter.k8s.aws/instance-generation|5| @@ -793,6 +852,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|6300| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c5a| |karpenter.k8s.aws/instance-generation|5| @@ -817,6 +878,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|9500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c5a| |karpenter.k8s.aws/instance-generation|5| @@ -842,6 +905,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|3170| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c5ad| |karpenter.k8s.aws/instance-generation|5| @@ -867,6 +932,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|3170| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c5ad| |karpenter.k8s.aws/instance-generation|5| @@ -892,6 +959,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|3170| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c5ad| |karpenter.k8s.aws/instance-generation|5| @@ -917,6 +986,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|3170| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c5ad| |karpenter.k8s.aws/instance-generation|5| @@ -942,6 +1013,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|3170| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c5ad| |karpenter.k8s.aws/instance-generation|5| @@ -967,6 +1040,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c5ad| |karpenter.k8s.aws/instance-generation|5| @@ -992,6 +1067,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|6300| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c5ad| |karpenter.k8s.aws/instance-generation|5| @@ -1017,6 +1094,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|9500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c5ad| |karpenter.k8s.aws/instance-generation|5| @@ -1043,6 +1122,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|c5d| |karpenter.k8s.aws/instance-generation|5| @@ -1068,6 +1149,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|c5d| |karpenter.k8s.aws/instance-generation|5| @@ -1093,6 +1176,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|c5d| |karpenter.k8s.aws/instance-generation|5| @@ -1118,6 +1203,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|c5d| |karpenter.k8s.aws/instance-generation|5| @@ -1143,6 +1230,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|36| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|9500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|c5d| |karpenter.k8s.aws/instance-generation|5| @@ -1168,6 +1257,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|9500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|c5d| |karpenter.k8s.aws/instance-generation|5| @@ -1193,6 +1284,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|72| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|c5d| |karpenter.k8s.aws/instance-generation|5| @@ -1218,6 +1311,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|c5d| |karpenter.k8s.aws/instance-generation|5| @@ -1243,6 +1338,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|c5d| |karpenter.k8s.aws/instance-generation|5| @@ -1269,6 +1366,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c5n| |karpenter.k8s.aws/instance-generation|5| @@ -1293,6 +1392,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c5n| |karpenter.k8s.aws/instance-generation|5| @@ -1317,6 +1418,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c5n| |karpenter.k8s.aws/instance-generation|5| @@ -1341,6 +1444,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c5n| |karpenter.k8s.aws/instance-generation|5| @@ -1365,6 +1470,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|36| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|9500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c5n| |karpenter.k8s.aws/instance-generation|5| @@ -1390,6 +1497,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|72| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c5n| |karpenter.k8s.aws/instance-generation|5| @@ -1415,6 +1524,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|72| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c5n| |karpenter.k8s.aws/instance-generation|5| @@ -1441,6 +1552,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c6a| |karpenter.k8s.aws/instance-generation|6| @@ -1465,6 +1578,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c6a| |karpenter.k8s.aws/instance-generation|6| @@ -1489,6 +1604,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c6a| |karpenter.k8s.aws/instance-generation|6| @@ -1513,6 +1630,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c6a| |karpenter.k8s.aws/instance-generation|6| @@ -1537,6 +1656,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c6a| |karpenter.k8s.aws/instance-generation|6| @@ -1561,6 +1682,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|15000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c6a| |karpenter.k8s.aws/instance-generation|6| @@ -1585,6 +1708,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|20000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c6a| |karpenter.k8s.aws/instance-generation|6| @@ -1609,6 +1734,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|30000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c6a| |karpenter.k8s.aws/instance-generation|6| @@ -1633,6 +1760,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|128| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|40000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c6a| |karpenter.k8s.aws/instance-generation|6| @@ -1657,6 +1786,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|192| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|40000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c6a| |karpenter.k8s.aws/instance-generation|6| @@ -1682,6 +1813,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|192| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|40000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c6a| |karpenter.k8s.aws/instance-generation|6| @@ -1708,6 +1841,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|1| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|c6g| |karpenter.k8s.aws/instance-generation|6| @@ -1732,6 +1867,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|c6g| |karpenter.k8s.aws/instance-generation|6| @@ -1756,6 +1893,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|c6g| |karpenter.k8s.aws/instance-generation|6| @@ -1780,6 +1919,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|c6g| |karpenter.k8s.aws/instance-generation|6| @@ -1804,6 +1945,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|c6g| |karpenter.k8s.aws/instance-generation|6| @@ -1828,6 +1971,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|9500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|c6g| |karpenter.k8s.aws/instance-generation|6| @@ -1852,6 +1997,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|14250| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|c6g| |karpenter.k8s.aws/instance-generation|6| @@ -1876,6 +2023,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|c6g| |karpenter.k8s.aws/instance-generation|6| @@ -1900,6 +2049,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|c6g| |karpenter.k8s.aws/instance-generation|6| @@ -1925,6 +2076,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|1| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|c6gd| |karpenter.k8s.aws/instance-generation|6| @@ -1950,6 +2103,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|c6gd| |karpenter.k8s.aws/instance-generation|6| @@ -1975,6 +2130,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|c6gd| |karpenter.k8s.aws/instance-generation|6| @@ -2000,6 +2157,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|c6gd| |karpenter.k8s.aws/instance-generation|6| @@ -2025,6 +2184,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|c6gd| |karpenter.k8s.aws/instance-generation|6| @@ -2050,6 +2211,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|9500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|c6gd| |karpenter.k8s.aws/instance-generation|6| @@ -2075,6 +2238,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|14250| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|c6gd| |karpenter.k8s.aws/instance-generation|6| @@ -2100,6 +2265,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|c6gd| |karpenter.k8s.aws/instance-generation|6| @@ -2125,6 +2292,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|c6gd| |karpenter.k8s.aws/instance-generation|6| @@ -2151,6 +2320,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|1| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|9500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c6gn| |karpenter.k8s.aws/instance-generation|6| @@ -2175,6 +2346,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|9500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c6gn| |karpenter.k8s.aws/instance-generation|6| @@ -2199,6 +2372,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|9500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c6gn| |karpenter.k8s.aws/instance-generation|6| @@ -2223,6 +2398,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|9500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c6gn| |karpenter.k8s.aws/instance-generation|6| @@ -2247,6 +2424,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|9500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c6gn| |karpenter.k8s.aws/instance-generation|6| @@ -2271,6 +2450,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c6gn| |karpenter.k8s.aws/instance-generation|6| @@ -2295,6 +2476,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|28500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c6gn| |karpenter.k8s.aws/instance-generation|6| @@ -2319,6 +2502,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|38000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c6gn| |karpenter.k8s.aws/instance-generation|6| @@ -2345,6 +2530,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c6i| |karpenter.k8s.aws/instance-generation|6| @@ -2369,6 +2556,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c6i| |karpenter.k8s.aws/instance-generation|6| @@ -2393,6 +2582,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c6i| |karpenter.k8s.aws/instance-generation|6| @@ -2417,6 +2608,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c6i| |karpenter.k8s.aws/instance-generation|6| @@ -2441,6 +2634,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c6i| |karpenter.k8s.aws/instance-generation|6| @@ -2465,6 +2660,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|15000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c6i| |karpenter.k8s.aws/instance-generation|6| @@ -2489,6 +2686,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|20000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c6i| |karpenter.k8s.aws/instance-generation|6| @@ -2513,6 +2712,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|30000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c6i| |karpenter.k8s.aws/instance-generation|6| @@ -2537,6 +2738,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|128| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|40000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c6i| |karpenter.k8s.aws/instance-generation|6| @@ -2562,6 +2765,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|128| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|40000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c6i| |karpenter.k8s.aws/instance-generation|6| @@ -2588,6 +2793,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c6id| |karpenter.k8s.aws/instance-generation|6| @@ -2613,6 +2820,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c6id| |karpenter.k8s.aws/instance-generation|6| @@ -2638,6 +2847,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c6id| |karpenter.k8s.aws/instance-generation|6| @@ -2663,6 +2874,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c6id| |karpenter.k8s.aws/instance-generation|6| @@ -2688,6 +2901,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c6id| |karpenter.k8s.aws/instance-generation|6| @@ -2713,6 +2928,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|15000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c6id| |karpenter.k8s.aws/instance-generation|6| @@ -2738,6 +2955,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|20000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c6id| |karpenter.k8s.aws/instance-generation|6| @@ -2763,6 +2982,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|30000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c6id| |karpenter.k8s.aws/instance-generation|6| @@ -2788,6 +3009,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|128| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|40000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c6id| |karpenter.k8s.aws/instance-generation|6| @@ -2814,6 +3037,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|128| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|40000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c6id| |karpenter.k8s.aws/instance-generation|6| @@ -2841,6 +3066,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|25000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c6in| |karpenter.k8s.aws/instance-generation|6| @@ -2865,6 +3092,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|25000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c6in| |karpenter.k8s.aws/instance-generation|6| @@ -2889,6 +3118,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|25000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c6in| |karpenter.k8s.aws/instance-generation|6| @@ -2913,6 +3144,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|25000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c6in| |karpenter.k8s.aws/instance-generation|6| @@ -2937,6 +3170,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|25000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c6in| |karpenter.k8s.aws/instance-generation|6| @@ -2961,6 +3196,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|37500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c6in| |karpenter.k8s.aws/instance-generation|6| @@ -2985,6 +3222,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|50000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c6in| |karpenter.k8s.aws/instance-generation|6| @@ -3009,6 +3248,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|75000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c6in| |karpenter.k8s.aws/instance-generation|6| @@ -3033,6 +3274,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|128| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|100000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c6in| |karpenter.k8s.aws/instance-generation|6| @@ -3051,13 +3294,15 @@ below are the resources available with some assumptions and after the instance o |memory|237794Mi| |pods|394| |vpc.amazonaws.com/efa|2| - |vpc.amazonaws.com/pod-eni|108| + |vpc.amazonaws.com/pod-eni|106| ### `c6in.metal` #### Labels | Label | Value | |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|128| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|100000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c6in| |karpenter.k8s.aws/instance-generation|6| @@ -3076,7 +3321,7 @@ below are the resources available with some assumptions and after the instance o |memory|237794Mi| |pods|394| |vpc.amazonaws.com/efa|2| - |vpc.amazonaws.com/pod-eni|108| + |vpc.amazonaws.com/pod-eni|106| ## c7a Family ### `c7a.medium` #### Labels @@ -3084,6 +3329,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|1| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c7a| |karpenter.k8s.aws/instance-generation|7| @@ -3108,6 +3355,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c7a| |karpenter.k8s.aws/instance-generation|7| @@ -3132,6 +3381,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c7a| |karpenter.k8s.aws/instance-generation|7| @@ -3156,6 +3407,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c7a| |karpenter.k8s.aws/instance-generation|7| @@ -3180,6 +3433,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c7a| |karpenter.k8s.aws/instance-generation|7| @@ -3204,6 +3459,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c7a| |karpenter.k8s.aws/instance-generation|7| @@ -3228,6 +3485,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|15000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c7a| |karpenter.k8s.aws/instance-generation|7| @@ -3252,6 +3511,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|20000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c7a| |karpenter.k8s.aws/instance-generation|7| @@ -3276,6 +3537,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|30000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c7a| |karpenter.k8s.aws/instance-generation|7| @@ -3300,6 +3563,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|128| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|40000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c7a| |karpenter.k8s.aws/instance-generation|7| @@ -3324,6 +3589,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|192| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|40000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c7a| |karpenter.k8s.aws/instance-generation|7| @@ -3349,6 +3616,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|192| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|40000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c7a| |karpenter.k8s.aws/instance-generation|7| @@ -3375,6 +3644,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|1| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c7g| |karpenter.k8s.aws/instance-generation|7| @@ -3399,6 +3670,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c7g| |karpenter.k8s.aws/instance-generation|7| @@ -3423,6 +3696,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c7g| |karpenter.k8s.aws/instance-generation|7| @@ -3447,6 +3722,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c7g| |karpenter.k8s.aws/instance-generation|7| @@ -3471,6 +3748,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c7g| |karpenter.k8s.aws/instance-generation|7| @@ -3495,6 +3774,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c7g| |karpenter.k8s.aws/instance-generation|7| @@ -3519,6 +3800,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|15000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c7g| |karpenter.k8s.aws/instance-generation|7| @@ -3543,6 +3826,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|20000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c7g| |karpenter.k8s.aws/instance-generation|7| @@ -3568,6 +3853,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|20000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c7g| |karpenter.k8s.aws/instance-generation|7| @@ -3594,6 +3881,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|1| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c7gd| |karpenter.k8s.aws/instance-generation|7| @@ -3619,6 +3908,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c7gd| |karpenter.k8s.aws/instance-generation|7| @@ -3644,6 +3935,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c7gd| |karpenter.k8s.aws/instance-generation|7| @@ -3669,6 +3962,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c7gd| |karpenter.k8s.aws/instance-generation|7| @@ -3694,6 +3989,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c7gd| |karpenter.k8s.aws/instance-generation|7| @@ -3719,6 +4016,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c7gd| |karpenter.k8s.aws/instance-generation|7| @@ -3744,6 +4043,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|15000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c7gd| |karpenter.k8s.aws/instance-generation|7| @@ -3769,6 +4070,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|20000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c7gd| |karpenter.k8s.aws/instance-generation|7| @@ -3795,12 +4098,15 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|20000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c7gd| |karpenter.k8s.aws/instance-generation|7| |karpenter.k8s.aws/instance-hypervisor|| |karpenter.k8s.aws/instance-local-nvme|3800| |karpenter.k8s.aws/instance-memory|131072| + |karpenter.k8s.aws/instance-network-bandwidth|30000| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -3813,6 +4119,7 @@ below are the resources available with some assumptions and after the instance o |memory|112720Mi| |pods|737| |vpc.amazonaws.com/efa|1| + |vpc.amazonaws.com/pod-eni|107| ## c7gn Family ### `c7gn.medium` #### Labels @@ -3820,6 +4127,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|1| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c7gn| |karpenter.k8s.aws/instance-generation|7| @@ -3844,6 +4153,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c7gn| |karpenter.k8s.aws/instance-generation|7| @@ -3868,6 +4179,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c7gn| |karpenter.k8s.aws/instance-generation|7| @@ -3892,6 +4205,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c7gn| |karpenter.k8s.aws/instance-generation|7| @@ -3916,6 +4231,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c7gn| |karpenter.k8s.aws/instance-generation|7| @@ -3940,6 +4257,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|20000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c7gn| |karpenter.k8s.aws/instance-generation|7| @@ -3964,6 +4283,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|30000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c7gn| |karpenter.k8s.aws/instance-generation|7| @@ -3988,6 +4309,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|40000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c7gn| |karpenter.k8s.aws/instance-generation|7| @@ -4013,11 +4336,14 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|40000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c7gn| |karpenter.k8s.aws/instance-generation|7| |karpenter.k8s.aws/instance-hypervisor|| |karpenter.k8s.aws/instance-memory|131072| + |karpenter.k8s.aws/instance-network-bandwidth|200000| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -4030,6 +4356,7 @@ below are the resources available with some assumptions and after the instance o |memory|112720Mi| |pods|737| |vpc.amazonaws.com/efa|1| + |vpc.amazonaws.com/pod-eni|107| ## c7i Family ### `c7i.large` #### Labels @@ -4037,6 +4364,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c7i| |karpenter.k8s.aws/instance-generation|7| @@ -4061,6 +4390,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c7i| |karpenter.k8s.aws/instance-generation|7| @@ -4085,6 +4416,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c7i| |karpenter.k8s.aws/instance-generation|7| @@ -4109,6 +4442,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c7i| |karpenter.k8s.aws/instance-generation|7| @@ -4133,6 +4468,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c7i| |karpenter.k8s.aws/instance-generation|7| @@ -4157,6 +4494,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|15000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c7i| |karpenter.k8s.aws/instance-generation|7| @@ -4181,6 +4520,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|20000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c7i| |karpenter.k8s.aws/instance-generation|7| @@ -4205,6 +4546,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|30000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c7i| |karpenter.k8s.aws/instance-generation|7| @@ -4229,6 +4572,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|30000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c7i| |karpenter.k8s.aws/instance-generation|7| @@ -4253,6 +4598,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|192| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|40000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c7i| |karpenter.k8s.aws/instance-generation|7| @@ -4278,6 +4625,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|c| |karpenter.k8s.aws/instance-cpu|192| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|40000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|c7i| |karpenter.k8s.aws/instance-generation|7| @@ -4304,6 +4653,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|d| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|d2| |karpenter.k8s.aws/instance-generation|2| @@ -4326,6 +4677,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|d| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|1000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|d2| |karpenter.k8s.aws/instance-generation|2| @@ -4348,6 +4701,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|d| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|2000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|d2| |karpenter.k8s.aws/instance-generation|2| @@ -4370,6 +4725,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|d| |karpenter.k8s.aws/instance-cpu|36| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|d2| |karpenter.k8s.aws/instance-generation|2| @@ -4394,6 +4751,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|d| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|2800| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|d3| |karpenter.k8s.aws/instance-generation|3| @@ -4419,6 +4778,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|d| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|2800| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|d3| |karpenter.k8s.aws/instance-generation|3| @@ -4444,6 +4805,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|d| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|2800| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|d3| |karpenter.k8s.aws/instance-generation|3| @@ -4469,6 +4832,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|d| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|5000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|d3| |karpenter.k8s.aws/instance-generation|3| @@ -4495,6 +4860,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|d| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|2800| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|d3en| |karpenter.k8s.aws/instance-generation|3| @@ -4520,6 +4887,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|d| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|2800| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|d3en| |karpenter.k8s.aws/instance-generation|3| @@ -4545,6 +4914,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|d| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|2800| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|d3en| |karpenter.k8s.aws/instance-generation|3| @@ -4570,6 +4941,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|d| |karpenter.k8s.aws/instance-cpu|24| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|d3en| |karpenter.k8s.aws/instance-generation|3| @@ -4595,6 +4968,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|d| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|5000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|d3en| |karpenter.k8s.aws/instance-generation|3| @@ -4620,6 +4995,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|d| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|7000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|d3en| |karpenter.k8s.aws/instance-generation|3| @@ -4646,6 +5023,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|dl| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|dl1| |karpenter.k8s.aws/instance-generation|1| @@ -4678,13 +5057,14 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|f| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|1700| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|f1| |karpenter.k8s.aws/instance-generation|1| |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-local-nvme|470| |karpenter.k8s.aws/instance-memory|124928| - |karpenter.k8s.aws/instance-network-bandwidth|2500| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -4702,13 +5082,14 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|f| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|3500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|f1| |karpenter.k8s.aws/instance-generation|1| |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-local-nvme|940| |karpenter.k8s.aws/instance-memory|249856| - |karpenter.k8s.aws/instance-network-bandwidth|5000| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -4726,6 +5107,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|f| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|14000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|f1| |karpenter.k8s.aws/instance-generation|1| @@ -4751,6 +5134,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|g| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|3500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|g3| |karpenter.k8s.aws/instance-generation|3| @@ -4760,7 +5145,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-gpu-name|m60| |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|124928| - |karpenter.k8s.aws/instance-network-bandwidth|5000| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -4773,12 +5157,15 @@ below are the resources available with some assumptions and after the instance o |memory|112629Mi| |nvidia.com/gpu|1| |pods|234| + |vpc.amazonaws.com/pod-eni|6| ### `g3.8xlarge` #### Labels | Label | Value | |--|--| |karpenter.k8s.aws/instance-category|g| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|7000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|g3| |karpenter.k8s.aws/instance-generation|3| @@ -4801,12 +5188,15 @@ below are the resources available with some assumptions and after the instance o |memory|228187Mi| |nvidia.com/gpu|2| |pods|234| + |vpc.amazonaws.com/pod-eni|6| ### `g3.16xlarge` #### Labels | Label | Value | |--|--| |karpenter.k8s.aws/instance-category|g| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|14000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|g3| |karpenter.k8s.aws/instance-generation|3| @@ -4836,6 +5226,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|g| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|850| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|g3s| |karpenter.k8s.aws/instance-generation|3| @@ -4857,6 +5249,7 @@ below are the resources available with some assumptions and after the instance o |memory|27896Mi| |nvidia.com/gpu|1| |pods|58| + |vpc.amazonaws.com/pod-eni|10| ## g4ad Family ### `g4ad.xlarge` #### Labels @@ -4864,6 +5257,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|g| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|3170| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|g4ad| |karpenter.k8s.aws/instance-generation|4| @@ -4894,6 +5289,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|g| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|3170| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|g4ad| |karpenter.k8s.aws/instance-generation|4| @@ -4924,6 +5321,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|g| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|3170| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|g4ad| |karpenter.k8s.aws/instance-generation|4| @@ -4954,6 +5353,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|g| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|3170| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|g4ad| |karpenter.k8s.aws/instance-generation|4| @@ -4984,6 +5385,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|g| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|6300| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|g4ad| |karpenter.k8s.aws/instance-generation|4| @@ -5015,6 +5418,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|g| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|3500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|g4dn| |karpenter.k8s.aws/instance-generation|4| @@ -5045,6 +5450,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|g| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|3500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|g4dn| |karpenter.k8s.aws/instance-generation|4| @@ -5075,6 +5482,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|g| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|g4dn| |karpenter.k8s.aws/instance-generation|4| @@ -5105,6 +5514,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|g| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|9500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|g4dn| |karpenter.k8s.aws/instance-generation|4| @@ -5136,6 +5547,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|g| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|9500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|g4dn| |karpenter.k8s.aws/instance-generation|4| @@ -5167,6 +5580,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|g| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|9500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|g4dn| |karpenter.k8s.aws/instance-generation|4| @@ -5198,6 +5613,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|g| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|g4dn| |karpenter.k8s.aws/instance-generation|4| @@ -5230,6 +5647,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|g| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|3500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|g5| |karpenter.k8s.aws/instance-generation|5| @@ -5260,6 +5679,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|g| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|3500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|g5| |karpenter.k8s.aws/instance-generation|5| @@ -5290,6 +5711,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|g| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|g5| |karpenter.k8s.aws/instance-generation|5| @@ -5320,6 +5743,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|g| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|16000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|g5| |karpenter.k8s.aws/instance-generation|5| @@ -5351,6 +5776,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|g| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|16000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|g5| |karpenter.k8s.aws/instance-generation|5| @@ -5382,6 +5809,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|g| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|16000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|g5| |karpenter.k8s.aws/instance-generation|5| @@ -5413,6 +5842,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|g| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|g5| |karpenter.k8s.aws/instance-generation|5| @@ -5444,6 +5875,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|g| |karpenter.k8s.aws/instance-cpu|192| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|g5| |karpenter.k8s.aws/instance-generation|5| @@ -5476,6 +5909,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|g| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|g5g| |karpenter.k8s.aws/instance-generation|5| @@ -5505,6 +5940,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|g| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|g5g| |karpenter.k8s.aws/instance-generation|5| @@ -5534,6 +5971,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|g| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|g5g| |karpenter.k8s.aws/instance-generation|5| @@ -5563,6 +6002,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|g| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|9500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|g5g| |karpenter.k8s.aws/instance-generation|5| @@ -5592,6 +6033,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|g| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|g5g| |karpenter.k8s.aws/instance-generation|5| @@ -5621,6 +6064,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|g| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|g5g| |karpenter.k8s.aws/instance-generation|5| @@ -5651,6 +6096,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|g| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|5000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|g6| |karpenter.k8s.aws/instance-generation|6| @@ -5661,6 +6108,7 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-local-nvme|250| |karpenter.k8s.aws/instance-memory|16384| + |karpenter.k8s.aws/instance-network-bandwidth|2500| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -5673,12 +6121,15 @@ below are the resources available with some assumptions and after the instance o |memory|14162Mi| |nvidia.com/gpu|1| |pods|58| + |vpc.amazonaws.com/pod-eni|18| ### `g6.2xlarge` #### Labels | Label | Value | |--|--| |karpenter.k8s.aws/instance-category|g| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|5000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|g6| |karpenter.k8s.aws/instance-generation|6| @@ -5689,6 +6140,7 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-local-nvme|450| |karpenter.k8s.aws/instance-memory|32768| + |karpenter.k8s.aws/instance-network-bandwidth|5000| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -5701,12 +6153,15 @@ below are the resources available with some assumptions and after the instance o |memory|29317Mi| |nvidia.com/gpu|1| |pods|58| + |vpc.amazonaws.com/pod-eni|38| ### `g6.4xlarge` #### Labels | Label | Value | |--|--| |karpenter.k8s.aws/instance-category|g| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|8000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|g6| |karpenter.k8s.aws/instance-generation|6| @@ -5717,6 +6172,7 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-local-nvme|600| |karpenter.k8s.aws/instance-memory|65536| + |karpenter.k8s.aws/instance-network-bandwidth|10000| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -5729,12 +6185,15 @@ below are the resources available with some assumptions and after the instance o |memory|57691Mi| |nvidia.com/gpu|1| |pods|234| + |vpc.amazonaws.com/pod-eni|54| ### `g6.8xlarge` #### Labels | Label | Value | |--|--| |karpenter.k8s.aws/instance-category|g| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|16000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|g6| |karpenter.k8s.aws/instance-generation|6| @@ -5745,6 +6204,7 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-local-nvme|900| |karpenter.k8s.aws/instance-memory|131072| + |karpenter.k8s.aws/instance-network-bandwidth|25000| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -5758,12 +6218,15 @@ below are the resources available with some assumptions and after the instance o |nvidia.com/gpu|1| |pods|234| |vpc.amazonaws.com/efa|1| + |vpc.amazonaws.com/pod-eni|84| ### `g6.12xlarge` #### Labels | Label | Value | |--|--| |karpenter.k8s.aws/instance-category|g| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|20000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|g6| |karpenter.k8s.aws/instance-generation|6| @@ -5772,8 +6235,9 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-gpu-memory|91553| |karpenter.k8s.aws/instance-gpu-name|l4| |karpenter.k8s.aws/instance-hypervisor|nitro| - |karpenter.k8s.aws/instance-local-nvme|15200| + |karpenter.k8s.aws/instance-local-nvme|3760| |karpenter.k8s.aws/instance-memory|196608| + |karpenter.k8s.aws/instance-network-bandwidth|40000| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -5787,12 +6251,15 @@ below are the resources available with some assumptions and after the instance o |nvidia.com/gpu|4| |pods|234| |vpc.amazonaws.com/efa|1| + |vpc.amazonaws.com/pod-eni|114| ### `g6.16xlarge` #### Labels | Label | Value | |--|--| |karpenter.k8s.aws/instance-category|g| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|20000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|g6| |karpenter.k8s.aws/instance-generation|6| @@ -5801,8 +6268,9 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-gpu-memory|22888| |karpenter.k8s.aws/instance-gpu-name|l4| |karpenter.k8s.aws/instance-hypervisor|nitro| - |karpenter.k8s.aws/instance-local-nvme|3800| + |karpenter.k8s.aws/instance-local-nvme|1880| |karpenter.k8s.aws/instance-memory|262144| + |karpenter.k8s.aws/instance-network-bandwidth|25000| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -5816,12 +6284,15 @@ below are the resources available with some assumptions and after the instance o |nvidia.com/gpu|1| |pods|737| |vpc.amazonaws.com/efa|1| + |vpc.amazonaws.com/pod-eni|107| ### `g6.24xlarge` #### Labels | Label | Value | |--|--| |karpenter.k8s.aws/instance-category|g| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|30000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|g6| |karpenter.k8s.aws/instance-generation|6| @@ -5830,8 +6301,9 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-gpu-memory|91553| |karpenter.k8s.aws/instance-gpu-name|l4| |karpenter.k8s.aws/instance-hypervisor|nitro| - |karpenter.k8s.aws/instance-local-nvme|15200| + |karpenter.k8s.aws/instance-local-nvme|3760| |karpenter.k8s.aws/instance-memory|393216| + |karpenter.k8s.aws/instance-network-bandwidth|50000| |karpenter.k8s.aws/instance-size|24xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -5845,12 +6317,15 @@ below are the resources available with some assumptions and after the instance o |nvidia.com/gpu|4| |pods|737| |vpc.amazonaws.com/efa|1| + |vpc.amazonaws.com/pod-eni|107| ### `g6.48xlarge` #### Labels | Label | Value | |--|--| |karpenter.k8s.aws/instance-category|g| |karpenter.k8s.aws/instance-cpu|192| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|60000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|g6| |karpenter.k8s.aws/instance-generation|6| @@ -5859,8 +6334,9 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-gpu-memory|183105| |karpenter.k8s.aws/instance-gpu-name|l4| |karpenter.k8s.aws/instance-hypervisor|nitro| - |karpenter.k8s.aws/instance-local-nvme|60800| + |karpenter.k8s.aws/instance-local-nvme|7520| |karpenter.k8s.aws/instance-memory|786432| + |karpenter.k8s.aws/instance-network-bandwidth|100000| |karpenter.k8s.aws/instance-size|48xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -5874,6 +6350,7 @@ below are the resources available with some assumptions and after the instance o |nvidia.com/gpu|8| |pods|737| |vpc.amazonaws.com/efa|1| + |vpc.amazonaws.com/pod-eni|107| ## gr6 Family ### `gr6.4xlarge` #### Labels @@ -5881,6 +6358,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|gr| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|8000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|gr6| |karpenter.k8s.aws/instance-generation|6| @@ -5891,6 +6370,7 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-local-nvme|600| |karpenter.k8s.aws/instance-memory|131072| + |karpenter.k8s.aws/instance-network-bandwidth|10000| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -5903,12 +6383,15 @@ below are the resources available with some assumptions and after the instance o |memory|118312Mi| |nvidia.com/gpu|1| |pods|234| + |vpc.amazonaws.com/pod-eni|54| ### `gr6.8xlarge` #### Labels | Label | Value | |--|--| |karpenter.k8s.aws/instance-category|gr| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|16000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|gr6| |karpenter.k8s.aws/instance-generation|6| @@ -5919,6 +6402,7 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-local-nvme|900| |karpenter.k8s.aws/instance-memory|262144| + |karpenter.k8s.aws/instance-network-bandwidth|25000| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -5932,6 +6416,7 @@ below are the resources available with some assumptions and after the instance o |nvidia.com/gpu|1| |pods|234| |vpc.amazonaws.com/efa|1| + |vpc.amazonaws.com/pod-eni|84| ## h1 Family ### `h1.2xlarge` #### Labels @@ -5939,6 +6424,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|h| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|1750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|h1| |karpenter.k8s.aws/instance-generation|1| @@ -5962,6 +6449,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|h| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|3500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|h1| |karpenter.k8s.aws/instance-generation|1| @@ -5985,6 +6474,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|h| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|7000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|h1| |karpenter.k8s.aws/instance-generation|1| @@ -6008,6 +6499,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|h| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|14000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|h1| |karpenter.k8s.aws/instance-generation|1| @@ -6032,6 +6525,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|hpc| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|2085| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|hpc7g| |karpenter.k8s.aws/instance-generation|7| @@ -6056,6 +6551,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|hpc| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|2085| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|hpc7g| |karpenter.k8s.aws/instance-generation|7| @@ -6080,6 +6577,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|hpc| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|2085| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|hpc7g| |karpenter.k8s.aws/instance-generation|7| @@ -6105,6 +6604,7 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|i| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|i2| |karpenter.k8s.aws/instance-generation|2| @@ -6127,6 +6627,7 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|i| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|i2| |karpenter.k8s.aws/instance-generation|2| @@ -6149,6 +6650,7 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|i| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|i2| |karpenter.k8s.aws/instance-generation|2| @@ -6171,6 +6673,7 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|i| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|i2| |karpenter.k8s.aws/instance-generation|2| @@ -6195,6 +6698,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|i| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|425| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|i3| |karpenter.k8s.aws/instance-generation|3| @@ -6219,6 +6724,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|i| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|850| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|i3| |karpenter.k8s.aws/instance-generation|3| @@ -6243,6 +6750,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|i| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|1700| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|i3| |karpenter.k8s.aws/instance-generation|3| @@ -6267,6 +6776,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|i| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|3500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|i3| |karpenter.k8s.aws/instance-generation|3| @@ -6291,6 +6802,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|i| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|7000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|i3| |karpenter.k8s.aws/instance-generation|3| @@ -6315,6 +6828,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|i| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|14000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|i3| |karpenter.k8s.aws/instance-generation|3| @@ -6339,6 +6854,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|i| |karpenter.k8s.aws/instance-cpu|72| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|i3| |karpenter.k8s.aws/instance-generation|3| @@ -6365,6 +6882,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|i| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|i3en| |karpenter.k8s.aws/instance-generation|3| @@ -6390,6 +6909,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|i| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|i3en| |karpenter.k8s.aws/instance-generation|3| @@ -6415,6 +6936,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|i| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|i3en| |karpenter.k8s.aws/instance-generation|3| @@ -6440,6 +6963,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|i| |karpenter.k8s.aws/instance-cpu|12| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|i3en| |karpenter.k8s.aws/instance-generation|3| @@ -6465,6 +6990,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|i| |karpenter.k8s.aws/instance-cpu|24| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|i3en| |karpenter.k8s.aws/instance-generation|3| @@ -6490,6 +7017,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|i| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|9500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|i3en| |karpenter.k8s.aws/instance-generation|3| @@ -6516,6 +7045,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|i| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|i3en| |karpenter.k8s.aws/instance-generation|3| @@ -6542,6 +7073,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|i| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|i3en| |karpenter.k8s.aws/instance-generation|3| @@ -6569,6 +7102,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|i| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|i4g| |karpenter.k8s.aws/instance-generation|4| @@ -6594,6 +7129,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|i| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|i4g| |karpenter.k8s.aws/instance-generation|4| @@ -6619,6 +7156,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|i| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|i4g| |karpenter.k8s.aws/instance-generation|4| @@ -6644,6 +7183,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|i| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|i4g| |karpenter.k8s.aws/instance-generation|4| @@ -6669,6 +7210,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|i| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|i4g| |karpenter.k8s.aws/instance-generation|4| @@ -6694,6 +7237,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|i| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|20000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|i4g| |karpenter.k8s.aws/instance-generation|4| @@ -6721,6 +7266,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|i| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|i4i| |karpenter.k8s.aws/instance-generation|4| @@ -6745,6 +7292,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|i| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|i4i| |karpenter.k8s.aws/instance-generation|4| @@ -6770,6 +7319,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|i| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|i4i| |karpenter.k8s.aws/instance-generation|4| @@ -6795,6 +7346,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|i| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|i4i| |karpenter.k8s.aws/instance-generation|4| @@ -6820,6 +7373,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|i| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|i4i| |karpenter.k8s.aws/instance-generation|4| @@ -6845,6 +7400,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|i| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|15000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|i4i| |karpenter.k8s.aws/instance-generation|4| @@ -6870,6 +7427,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|i| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|20000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|i4i| |karpenter.k8s.aws/instance-generation|4| @@ -6895,6 +7454,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|i| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|30000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|i4i| |karpenter.k8s.aws/instance-generation|4| @@ -6920,6 +7481,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|i| |karpenter.k8s.aws/instance-cpu|128| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|40000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|i4i| |karpenter.k8s.aws/instance-generation|4| @@ -6946,6 +7509,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|i| |karpenter.k8s.aws/instance-cpu|128| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|40000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|i4i| |karpenter.k8s.aws/instance-generation|4| @@ -6973,6 +7538,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|im| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|im4gn| |karpenter.k8s.aws/instance-generation|4| @@ -6998,6 +7565,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|im| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|im4gn| |karpenter.k8s.aws/instance-generation|4| @@ -7023,6 +7592,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|im| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|im4gn| |karpenter.k8s.aws/instance-generation|4| @@ -7048,6 +7619,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|im| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|im4gn| |karpenter.k8s.aws/instance-generation|4| @@ -7073,6 +7646,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|im| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|20000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|im4gn| |karpenter.k8s.aws/instance-generation|4| @@ -7098,6 +7673,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|im| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|40000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|im4gn| |karpenter.k8s.aws/instance-generation|4| @@ -7128,6 +7705,8 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-accelerator-name|inferentia| |karpenter.k8s.aws/instance-category|inf| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|inf1| |karpenter.k8s.aws/instance-generation|1| @@ -7156,6 +7735,8 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-accelerator-name|inferentia| |karpenter.k8s.aws/instance-category|inf| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|inf1| |karpenter.k8s.aws/instance-generation|1| @@ -7184,6 +7765,8 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-accelerator-name|inferentia| |karpenter.k8s.aws/instance-category|inf| |karpenter.k8s.aws/instance-cpu|24| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|inf1| |karpenter.k8s.aws/instance-generation|1| @@ -7212,6 +7795,8 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-accelerator-name|inferentia| |karpenter.k8s.aws/instance-category|inf| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|inf1| |karpenter.k8s.aws/instance-generation|1| @@ -7242,6 +7827,8 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-accelerator-name|inferentia| |karpenter.k8s.aws/instance-category|inf| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|inf2| |karpenter.k8s.aws/instance-generation|2| @@ -7270,6 +7857,8 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-accelerator-name|inferentia| |karpenter.k8s.aws/instance-category|inf| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|inf2| |karpenter.k8s.aws/instance-generation|2| @@ -7298,6 +7887,8 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-accelerator-name|inferentia| |karpenter.k8s.aws/instance-category|inf| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|30000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|inf2| |karpenter.k8s.aws/instance-generation|2| @@ -7326,6 +7917,8 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-accelerator-name|inferentia| |karpenter.k8s.aws/instance-category|inf| |karpenter.k8s.aws/instance-cpu|192| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|60000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|inf2| |karpenter.k8s.aws/instance-generation|2| @@ -7352,6 +7945,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|is| |karpenter.k8s.aws/instance-cpu|1| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|is4gen| |karpenter.k8s.aws/instance-generation|4| @@ -7377,6 +7972,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|is| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|is4gen| |karpenter.k8s.aws/instance-generation|4| @@ -7402,6 +7999,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|is| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|is4gen| |karpenter.k8s.aws/instance-generation|4| @@ -7427,6 +8026,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|is| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|is4gen| |karpenter.k8s.aws/instance-generation|4| @@ -7452,6 +8053,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|is| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|is4gen| |karpenter.k8s.aws/instance-generation|4| @@ -7477,6 +8080,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|is| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|20000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|is4gen| |karpenter.k8s.aws/instance-generation|4| @@ -7503,6 +8108,7 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|1| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m1| |karpenter.k8s.aws/instance-generation|1| @@ -7525,6 +8131,7 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|1| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m1| |karpenter.k8s.aws/instance-generation|1| @@ -7547,6 +8154,7 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m1| |karpenter.k8s.aws/instance-generation|1| @@ -7569,6 +8177,7 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m1| |karpenter.k8s.aws/instance-generation|1| @@ -7592,6 +8201,7 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m2| |karpenter.k8s.aws/instance-generation|2| @@ -7614,6 +8224,7 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m2| |karpenter.k8s.aws/instance-generation|2| @@ -7636,6 +8247,7 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m2| |karpenter.k8s.aws/instance-generation|2| @@ -7659,6 +8271,7 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|1| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m3| |karpenter.k8s.aws/instance-generation|3| @@ -7681,6 +8294,7 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m3| |karpenter.k8s.aws/instance-generation|3| @@ -7703,6 +8317,7 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m3| |karpenter.k8s.aws/instance-generation|3| @@ -7725,6 +8340,7 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m3| |karpenter.k8s.aws/instance-generation|3| @@ -7748,6 +8364,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|450| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m4| |karpenter.k8s.aws/instance-generation|4| @@ -7770,6 +8388,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m4| |karpenter.k8s.aws/instance-generation|4| @@ -7792,6 +8412,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|1000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m4| |karpenter.k8s.aws/instance-generation|4| @@ -7814,6 +8436,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|2000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m4| |karpenter.k8s.aws/instance-generation|4| @@ -7836,6 +8460,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|40| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m4| |karpenter.k8s.aws/instance-generation|4| @@ -7859,6 +8485,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m4| |karpenter.k8s.aws/instance-generation|4| @@ -7883,6 +8511,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m5| |karpenter.k8s.aws/instance-generation|5| @@ -7907,6 +8537,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m5| |karpenter.k8s.aws/instance-generation|5| @@ -7931,6 +8563,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m5| |karpenter.k8s.aws/instance-generation|5| @@ -7955,6 +8589,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m5| |karpenter.k8s.aws/instance-generation|5| @@ -7979,6 +8615,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|6800| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m5| |karpenter.k8s.aws/instance-generation|5| @@ -8003,6 +8641,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|9500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m5| |karpenter.k8s.aws/instance-generation|5| @@ -8027,6 +8667,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|13600| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m5| |karpenter.k8s.aws/instance-generation|5| @@ -8051,6 +8693,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m5| |karpenter.k8s.aws/instance-generation|5| @@ -8075,6 +8719,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m5| |karpenter.k8s.aws/instance-generation|5| @@ -8100,6 +8746,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|2880| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m5a| |karpenter.k8s.aws/instance-generation|5| @@ -8124,6 +8772,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|2880| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m5a| |karpenter.k8s.aws/instance-generation|5| @@ -8148,6 +8798,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|2880| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m5a| |karpenter.k8s.aws/instance-generation|5| @@ -8172,6 +8824,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|2880| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m5a| |karpenter.k8s.aws/instance-generation|5| @@ -8196,6 +8850,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m5a| |karpenter.k8s.aws/instance-generation|5| @@ -8220,6 +8876,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|6780| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m5a| |karpenter.k8s.aws/instance-generation|5| @@ -8244,6 +8902,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|9500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m5a| |karpenter.k8s.aws/instance-generation|5| @@ -8268,6 +8928,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|13750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m5a| |karpenter.k8s.aws/instance-generation|5| @@ -8293,6 +8955,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|2880| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m5ad| |karpenter.k8s.aws/instance-generation|5| @@ -8318,6 +8982,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|2880| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m5ad| |karpenter.k8s.aws/instance-generation|5| @@ -8343,6 +9009,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|2880| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m5ad| |karpenter.k8s.aws/instance-generation|5| @@ -8368,6 +9036,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|2880| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m5ad| |karpenter.k8s.aws/instance-generation|5| @@ -8393,6 +9063,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m5ad| |karpenter.k8s.aws/instance-generation|5| @@ -8418,6 +9090,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|6780| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m5ad| |karpenter.k8s.aws/instance-generation|5| @@ -8443,6 +9117,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|9500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m5ad| |karpenter.k8s.aws/instance-generation|5| @@ -8468,6 +9144,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|13750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m5ad| |karpenter.k8s.aws/instance-generation|5| @@ -8494,6 +9172,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m5d| |karpenter.k8s.aws/instance-generation|5| @@ -8519,6 +9199,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m5d| |karpenter.k8s.aws/instance-generation|5| @@ -8544,6 +9226,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m5d| |karpenter.k8s.aws/instance-generation|5| @@ -8569,6 +9253,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m5d| |karpenter.k8s.aws/instance-generation|5| @@ -8594,6 +9280,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|6800| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m5d| |karpenter.k8s.aws/instance-generation|5| @@ -8619,6 +9307,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|9500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m5d| |karpenter.k8s.aws/instance-generation|5| @@ -8644,6 +9334,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|13600| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m5d| |karpenter.k8s.aws/instance-generation|5| @@ -8669,6 +9361,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m5d| |karpenter.k8s.aws/instance-generation|5| @@ -8694,6 +9388,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m5d| |karpenter.k8s.aws/instance-generation|5| @@ -8720,6 +9416,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m5dn| |karpenter.k8s.aws/instance-generation|5| @@ -8745,6 +9443,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m5dn| |karpenter.k8s.aws/instance-generation|5| @@ -8770,6 +9470,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m5dn| |karpenter.k8s.aws/instance-generation|5| @@ -8795,6 +9497,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m5dn| |karpenter.k8s.aws/instance-generation|5| @@ -8820,6 +9524,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|6800| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m5dn| |karpenter.k8s.aws/instance-generation|5| @@ -8845,6 +9551,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|9500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m5dn| |karpenter.k8s.aws/instance-generation|5| @@ -8870,6 +9578,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|13600| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m5dn| |karpenter.k8s.aws/instance-generation|5| @@ -8895,6 +9605,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m5dn| |karpenter.k8s.aws/instance-generation|5| @@ -8921,6 +9633,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m5dn| |karpenter.k8s.aws/instance-generation|5| @@ -8948,6 +9662,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m5n| |karpenter.k8s.aws/instance-generation|5| @@ -8972,6 +9688,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m5n| |karpenter.k8s.aws/instance-generation|5| @@ -8996,6 +9714,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m5n| |karpenter.k8s.aws/instance-generation|5| @@ -9020,6 +9740,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m5n| |karpenter.k8s.aws/instance-generation|5| @@ -9044,6 +9766,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|6800| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m5n| |karpenter.k8s.aws/instance-generation|5| @@ -9068,6 +9792,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|9500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m5n| |karpenter.k8s.aws/instance-generation|5| @@ -9092,6 +9818,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|13600| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m5n| |karpenter.k8s.aws/instance-generation|5| @@ -9116,6 +9844,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m5n| |karpenter.k8s.aws/instance-generation|5| @@ -9141,6 +9871,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m5n| |karpenter.k8s.aws/instance-generation|5| @@ -9167,6 +9899,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|3170| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m5zn| |karpenter.k8s.aws/instance-generation|5| @@ -9191,6 +9925,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|3170| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m5zn| |karpenter.k8s.aws/instance-generation|5| @@ -9215,6 +9951,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|3170| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m5zn| |karpenter.k8s.aws/instance-generation|5| @@ -9239,6 +9977,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|12| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m5zn| |karpenter.k8s.aws/instance-generation|5| @@ -9263,6 +10003,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|24| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|9500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m5zn| |karpenter.k8s.aws/instance-generation|5| @@ -9287,6 +10029,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m5zn| |karpenter.k8s.aws/instance-generation|5| @@ -9312,6 +10056,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m5zn| |karpenter.k8s.aws/instance-generation|5| @@ -9338,6 +10084,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6a| |karpenter.k8s.aws/instance-generation|6| @@ -9362,6 +10110,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6a| |karpenter.k8s.aws/instance-generation|6| @@ -9386,6 +10136,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6a| |karpenter.k8s.aws/instance-generation|6| @@ -9410,6 +10162,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6a| |karpenter.k8s.aws/instance-generation|6| @@ -9434,6 +10188,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6a| |karpenter.k8s.aws/instance-generation|6| @@ -9458,6 +10214,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|15000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6a| |karpenter.k8s.aws/instance-generation|6| @@ -9482,6 +10240,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|20000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6a| |karpenter.k8s.aws/instance-generation|6| @@ -9506,6 +10266,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|30000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6a| |karpenter.k8s.aws/instance-generation|6| @@ -9530,6 +10292,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|128| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|40000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6a| |karpenter.k8s.aws/instance-generation|6| @@ -9554,6 +10318,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|192| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|40000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6a| |karpenter.k8s.aws/instance-generation|6| @@ -9579,6 +10345,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|192| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|40000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6a| |karpenter.k8s.aws/instance-generation|6| @@ -9605,6 +10373,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|1| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m6g| |karpenter.k8s.aws/instance-generation|6| @@ -9629,6 +10399,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m6g| |karpenter.k8s.aws/instance-generation|6| @@ -9653,6 +10425,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m6g| |karpenter.k8s.aws/instance-generation|6| @@ -9677,6 +10451,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m6g| |karpenter.k8s.aws/instance-generation|6| @@ -9701,6 +10477,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m6g| |karpenter.k8s.aws/instance-generation|6| @@ -9725,6 +10503,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|9500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m6g| |karpenter.k8s.aws/instance-generation|6| @@ -9749,6 +10529,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|14250| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m6g| |karpenter.k8s.aws/instance-generation|6| @@ -9773,6 +10555,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m6g| |karpenter.k8s.aws/instance-generation|6| @@ -9797,6 +10581,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m6g| |karpenter.k8s.aws/instance-generation|6| @@ -9822,6 +10608,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|1| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m6gd| |karpenter.k8s.aws/instance-generation|6| @@ -9847,6 +10635,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m6gd| |karpenter.k8s.aws/instance-generation|6| @@ -9872,6 +10662,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m6gd| |karpenter.k8s.aws/instance-generation|6| @@ -9897,6 +10689,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m6gd| |karpenter.k8s.aws/instance-generation|6| @@ -9922,6 +10716,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m6gd| |karpenter.k8s.aws/instance-generation|6| @@ -9947,6 +10743,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|9500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m6gd| |karpenter.k8s.aws/instance-generation|6| @@ -9972,6 +10770,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|14250| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m6gd| |karpenter.k8s.aws/instance-generation|6| @@ -9997,6 +10797,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m6gd| |karpenter.k8s.aws/instance-generation|6| @@ -10022,6 +10824,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|m6gd| |karpenter.k8s.aws/instance-generation|6| @@ -10048,6 +10852,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6i| |karpenter.k8s.aws/instance-generation|6| @@ -10072,6 +10878,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6i| |karpenter.k8s.aws/instance-generation|6| @@ -10096,6 +10904,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6i| |karpenter.k8s.aws/instance-generation|6| @@ -10120,6 +10930,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6i| |karpenter.k8s.aws/instance-generation|6| @@ -10144,6 +10956,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6i| |karpenter.k8s.aws/instance-generation|6| @@ -10168,6 +10982,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|15000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6i| |karpenter.k8s.aws/instance-generation|6| @@ -10192,6 +11008,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|20000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6i| |karpenter.k8s.aws/instance-generation|6| @@ -10216,6 +11034,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|30000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6i| |karpenter.k8s.aws/instance-generation|6| @@ -10240,6 +11060,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|128| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|40000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6i| |karpenter.k8s.aws/instance-generation|6| @@ -10265,6 +11087,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|128| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|40000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6i| |karpenter.k8s.aws/instance-generation|6| @@ -10291,6 +11115,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6id| |karpenter.k8s.aws/instance-generation|6| @@ -10316,6 +11142,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6id| |karpenter.k8s.aws/instance-generation|6| @@ -10341,6 +11169,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6id| |karpenter.k8s.aws/instance-generation|6| @@ -10366,6 +11196,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6id| |karpenter.k8s.aws/instance-generation|6| @@ -10391,6 +11223,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6id| |karpenter.k8s.aws/instance-generation|6| @@ -10416,6 +11250,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|15000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6id| |karpenter.k8s.aws/instance-generation|6| @@ -10441,6 +11277,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|20000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6id| |karpenter.k8s.aws/instance-generation|6| @@ -10466,6 +11304,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|30000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6id| |karpenter.k8s.aws/instance-generation|6| @@ -10491,6 +11331,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|128| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|40000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6id| |karpenter.k8s.aws/instance-generation|6| @@ -10517,6 +11359,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|128| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|40000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6id| |karpenter.k8s.aws/instance-generation|6| @@ -10544,6 +11388,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|25000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6idn| |karpenter.k8s.aws/instance-generation|6| @@ -10569,6 +11415,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|25000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6idn| |karpenter.k8s.aws/instance-generation|6| @@ -10594,6 +11442,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|25000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6idn| |karpenter.k8s.aws/instance-generation|6| @@ -10619,6 +11469,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|25000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6idn| |karpenter.k8s.aws/instance-generation|6| @@ -10644,6 +11496,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|25000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6idn| |karpenter.k8s.aws/instance-generation|6| @@ -10669,6 +11523,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|37500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6idn| |karpenter.k8s.aws/instance-generation|6| @@ -10694,6 +11550,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|50000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6idn| |karpenter.k8s.aws/instance-generation|6| @@ -10719,6 +11577,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|75000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6idn| |karpenter.k8s.aws/instance-generation|6| @@ -10744,6 +11604,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|128| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|100000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6idn| |karpenter.k8s.aws/instance-generation|6| @@ -10763,13 +11625,15 @@ below are the resources available with some assumptions and after the instance o |memory|480277Mi| |pods|394| |vpc.amazonaws.com/efa|2| - |vpc.amazonaws.com/pod-eni|108| + |vpc.amazonaws.com/pod-eni|106| ### `m6idn.metal` #### Labels | Label | Value | |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|128| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|100000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6idn| |karpenter.k8s.aws/instance-generation|6| @@ -10789,7 +11653,7 @@ below are the resources available with some assumptions and after the instance o |memory|480277Mi| |pods|394| |vpc.amazonaws.com/efa|2| - |vpc.amazonaws.com/pod-eni|108| + |vpc.amazonaws.com/pod-eni|106| ## m6in Family ### `m6in.large` #### Labels @@ -10797,6 +11661,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|25000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6in| |karpenter.k8s.aws/instance-generation|6| @@ -10821,6 +11687,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|25000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6in| |karpenter.k8s.aws/instance-generation|6| @@ -10845,6 +11713,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|25000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6in| |karpenter.k8s.aws/instance-generation|6| @@ -10869,6 +11739,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|25000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6in| |karpenter.k8s.aws/instance-generation|6| @@ -10893,6 +11765,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|25000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6in| |karpenter.k8s.aws/instance-generation|6| @@ -10917,6 +11791,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|37500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6in| |karpenter.k8s.aws/instance-generation|6| @@ -10941,6 +11817,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|50000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6in| |karpenter.k8s.aws/instance-generation|6| @@ -10965,6 +11843,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|75000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6in| |karpenter.k8s.aws/instance-generation|6| @@ -10989,6 +11869,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|128| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|100000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6in| |karpenter.k8s.aws/instance-generation|6| @@ -11007,13 +11889,15 @@ below are the resources available with some assumptions and after the instance o |memory|480277Mi| |pods|394| |vpc.amazonaws.com/efa|2| - |vpc.amazonaws.com/pod-eni|108| + |vpc.amazonaws.com/pod-eni|106| ### `m6in.metal` #### Labels | Label | Value | |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|128| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|100000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m6in| |karpenter.k8s.aws/instance-generation|6| @@ -11032,7 +11916,7 @@ below are the resources available with some assumptions and after the instance o |memory|480277Mi| |pods|394| |vpc.amazonaws.com/efa|2| - |vpc.amazonaws.com/pod-eni|108| + |vpc.amazonaws.com/pod-eni|106| ## m7a Family ### `m7a.medium` #### Labels @@ -11040,6 +11924,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|1| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m7a| |karpenter.k8s.aws/instance-generation|7| @@ -11064,6 +11950,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m7a| |karpenter.k8s.aws/instance-generation|7| @@ -11088,6 +11976,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m7a| |karpenter.k8s.aws/instance-generation|7| @@ -11112,6 +12002,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m7a| |karpenter.k8s.aws/instance-generation|7| @@ -11136,6 +12028,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m7a| |karpenter.k8s.aws/instance-generation|7| @@ -11160,6 +12054,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m7a| |karpenter.k8s.aws/instance-generation|7| @@ -11184,6 +12080,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|15000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m7a| |karpenter.k8s.aws/instance-generation|7| @@ -11208,6 +12106,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|20000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m7a| |karpenter.k8s.aws/instance-generation|7| @@ -11232,6 +12132,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|30000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m7a| |karpenter.k8s.aws/instance-generation|7| @@ -11256,6 +12158,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|128| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|40000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m7a| |karpenter.k8s.aws/instance-generation|7| @@ -11280,6 +12184,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|192| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|40000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m7a| |karpenter.k8s.aws/instance-generation|7| @@ -11305,6 +12211,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|192| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|40000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m7a| |karpenter.k8s.aws/instance-generation|7| @@ -11331,6 +12239,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|1| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m7g| |karpenter.k8s.aws/instance-generation|7| @@ -11355,6 +12265,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m7g| |karpenter.k8s.aws/instance-generation|7| @@ -11379,6 +12291,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m7g| |karpenter.k8s.aws/instance-generation|7| @@ -11403,6 +12317,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m7g| |karpenter.k8s.aws/instance-generation|7| @@ -11427,6 +12343,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m7g| |karpenter.k8s.aws/instance-generation|7| @@ -11451,6 +12369,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m7g| |karpenter.k8s.aws/instance-generation|7| @@ -11475,6 +12395,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|15000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m7g| |karpenter.k8s.aws/instance-generation|7| @@ -11499,6 +12421,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|20000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m7g| |karpenter.k8s.aws/instance-generation|7| @@ -11524,6 +12448,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|20000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m7g| |karpenter.k8s.aws/instance-generation|7| @@ -11550,6 +12476,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|1| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m7gd| |karpenter.k8s.aws/instance-generation|7| @@ -11575,6 +12503,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m7gd| |karpenter.k8s.aws/instance-generation|7| @@ -11600,6 +12530,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m7gd| |karpenter.k8s.aws/instance-generation|7| @@ -11625,6 +12557,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m7gd| |karpenter.k8s.aws/instance-generation|7| @@ -11650,6 +12584,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m7gd| |karpenter.k8s.aws/instance-generation|7| @@ -11675,6 +12611,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m7gd| |karpenter.k8s.aws/instance-generation|7| @@ -11700,6 +12638,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|15000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m7gd| |karpenter.k8s.aws/instance-generation|7| @@ -11725,6 +12665,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|20000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m7gd| |karpenter.k8s.aws/instance-generation|7| @@ -11751,12 +12693,15 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|20000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m7gd| |karpenter.k8s.aws/instance-generation|7| |karpenter.k8s.aws/instance-hypervisor|| |karpenter.k8s.aws/instance-local-nvme|3800| |karpenter.k8s.aws/instance-memory|262144| + |karpenter.k8s.aws/instance-network-bandwidth|30000| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -11769,6 +12714,7 @@ below are the resources available with some assumptions and after the instance o |memory|233962Mi| |pods|737| |vpc.amazonaws.com/efa|1| + |vpc.amazonaws.com/pod-eni|107| ## m7i Family ### `m7i.large` #### Labels @@ -11776,6 +12722,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m7i| |karpenter.k8s.aws/instance-generation|7| @@ -11800,6 +12748,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m7i| |karpenter.k8s.aws/instance-generation|7| @@ -11824,6 +12774,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m7i| |karpenter.k8s.aws/instance-generation|7| @@ -11848,6 +12800,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m7i| |karpenter.k8s.aws/instance-generation|7| @@ -11872,6 +12826,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m7i| |karpenter.k8s.aws/instance-generation|7| @@ -11896,6 +12852,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|15000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m7i| |karpenter.k8s.aws/instance-generation|7| @@ -11920,6 +12878,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|20000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m7i| |karpenter.k8s.aws/instance-generation|7| @@ -11944,6 +12904,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|30000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m7i| |karpenter.k8s.aws/instance-generation|7| @@ -11968,6 +12930,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|30000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m7i| |karpenter.k8s.aws/instance-generation|7| @@ -11992,6 +12956,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|192| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|40000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m7i| |karpenter.k8s.aws/instance-generation|7| @@ -12017,6 +12983,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|192| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|40000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m7i| |karpenter.k8s.aws/instance-generation|7| @@ -12043,6 +13011,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m7i-flex| |karpenter.k8s.aws/instance-generation|7| @@ -12067,6 +13037,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m7i-flex| |karpenter.k8s.aws/instance-generation|7| @@ -12091,6 +13063,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m7i-flex| |karpenter.k8s.aws/instance-generation|7| @@ -12115,6 +13089,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m7i-flex| |karpenter.k8s.aws/instance-generation|7| @@ -12139,6 +13115,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|m| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|m7i-flex| |karpenter.k8s.aws/instance-generation|7| @@ -12164,6 +13142,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|p| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|p2| |karpenter.k8s.aws/instance-generation|2| @@ -12191,6 +13171,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|p| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|5000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|p2| |karpenter.k8s.aws/instance-generation|2| @@ -12219,6 +13201,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|p| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|p2| |karpenter.k8s.aws/instance-generation|2| @@ -12248,6 +13232,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|p| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|1750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|p3| |karpenter.k8s.aws/instance-generation|3| @@ -12275,6 +13261,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|p| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|7000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|p3| |karpenter.k8s.aws/instance-generation|3| @@ -12303,6 +13291,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|p| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|14000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|p3| |karpenter.k8s.aws/instance-generation|3| @@ -12332,6 +13322,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|p| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|p3dn| |karpenter.k8s.aws/instance-generation|3| @@ -12364,6 +13356,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|p| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|p4d| |karpenter.k8s.aws/instance-generation|4| @@ -12396,6 +13390,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|p| |karpenter.k8s.aws/instance-cpu|192| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|80000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|p5| |karpenter.k8s.aws/instance-generation|5| @@ -12428,6 +13424,7 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r3| |karpenter.k8s.aws/instance-generation|3| @@ -12450,6 +13447,7 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r3| |karpenter.k8s.aws/instance-generation|3| @@ -12472,6 +13470,7 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r3| |karpenter.k8s.aws/instance-generation|3| @@ -12494,6 +13493,7 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r3| |karpenter.k8s.aws/instance-generation|3| @@ -12516,6 +13516,7 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r3| |karpenter.k8s.aws/instance-generation|3| @@ -12540,6 +13541,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|425| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r4| |karpenter.k8s.aws/instance-generation|4| @@ -12563,6 +13566,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|850| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r4| |karpenter.k8s.aws/instance-generation|4| @@ -12586,6 +13591,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|1700| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r4| |karpenter.k8s.aws/instance-generation|4| @@ -12609,6 +13616,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|3500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r4| |karpenter.k8s.aws/instance-generation|4| @@ -12632,6 +13641,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|7000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r4| |karpenter.k8s.aws/instance-generation|4| @@ -12655,6 +13666,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|14000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r4| |karpenter.k8s.aws/instance-generation|4| @@ -12679,6 +13692,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r5| |karpenter.k8s.aws/instance-generation|5| @@ -12703,6 +13718,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r5| |karpenter.k8s.aws/instance-generation|5| @@ -12727,6 +13744,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r5| |karpenter.k8s.aws/instance-generation|5| @@ -12751,6 +13770,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r5| |karpenter.k8s.aws/instance-generation|5| @@ -12775,6 +13796,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|6800| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r5| |karpenter.k8s.aws/instance-generation|5| @@ -12799,6 +13822,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|9500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r5| |karpenter.k8s.aws/instance-generation|5| @@ -12823,6 +13848,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|13600| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r5| |karpenter.k8s.aws/instance-generation|5| @@ -12847,6 +13874,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r5| |karpenter.k8s.aws/instance-generation|5| @@ -12871,6 +13900,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r5| |karpenter.k8s.aws/instance-generation|5| @@ -12896,6 +13927,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|2880| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r5a| |karpenter.k8s.aws/instance-generation|5| @@ -12920,6 +13953,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|2880| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r5a| |karpenter.k8s.aws/instance-generation|5| @@ -12944,6 +13979,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|2880| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r5a| |karpenter.k8s.aws/instance-generation|5| @@ -12968,6 +14005,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|2880| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r5a| |karpenter.k8s.aws/instance-generation|5| @@ -12992,6 +14031,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r5a| |karpenter.k8s.aws/instance-generation|5| @@ -13016,6 +14057,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|6780| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r5a| |karpenter.k8s.aws/instance-generation|5| @@ -13040,6 +14083,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|9500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r5a| |karpenter.k8s.aws/instance-generation|5| @@ -13064,6 +14109,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|13570| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r5a| |karpenter.k8s.aws/instance-generation|5| @@ -13089,6 +14136,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|2880| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r5ad| |karpenter.k8s.aws/instance-generation|5| @@ -13114,6 +14163,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|2880| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r5ad| |karpenter.k8s.aws/instance-generation|5| @@ -13139,6 +14190,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|2880| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r5ad| |karpenter.k8s.aws/instance-generation|5| @@ -13164,6 +14217,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|2880| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r5ad| |karpenter.k8s.aws/instance-generation|5| @@ -13189,6 +14244,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r5ad| |karpenter.k8s.aws/instance-generation|5| @@ -13214,6 +14271,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|6780| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r5ad| |karpenter.k8s.aws/instance-generation|5| @@ -13239,6 +14298,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|9500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r5ad| |karpenter.k8s.aws/instance-generation|5| @@ -13264,6 +14325,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|13570| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r5ad| |karpenter.k8s.aws/instance-generation|5| @@ -13290,6 +14353,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r5b| |karpenter.k8s.aws/instance-generation|5| @@ -13314,6 +14379,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r5b| |karpenter.k8s.aws/instance-generation|5| @@ -13338,6 +14405,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r5b| |karpenter.k8s.aws/instance-generation|5| @@ -13362,6 +14431,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r5b| |karpenter.k8s.aws/instance-generation|5| @@ -13386,6 +14457,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|20000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r5b| |karpenter.k8s.aws/instance-generation|5| @@ -13410,6 +14483,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|30000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r5b| |karpenter.k8s.aws/instance-generation|5| @@ -13434,6 +14509,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|40000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r5b| |karpenter.k8s.aws/instance-generation|5| @@ -13458,6 +14535,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|60000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r5b| |karpenter.k8s.aws/instance-generation|5| @@ -13482,6 +14561,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|60000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r5b| |karpenter.k8s.aws/instance-generation|5| @@ -13507,6 +14588,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r5d| |karpenter.k8s.aws/instance-generation|5| @@ -13532,6 +14615,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r5d| |karpenter.k8s.aws/instance-generation|5| @@ -13557,6 +14642,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r5d| |karpenter.k8s.aws/instance-generation|5| @@ -13582,6 +14669,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r5d| |karpenter.k8s.aws/instance-generation|5| @@ -13607,6 +14696,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|6800| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r5d| |karpenter.k8s.aws/instance-generation|5| @@ -13632,6 +14723,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|9500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r5d| |karpenter.k8s.aws/instance-generation|5| @@ -13657,6 +14750,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|13600| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r5d| |karpenter.k8s.aws/instance-generation|5| @@ -13682,6 +14777,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r5d| |karpenter.k8s.aws/instance-generation|5| @@ -13707,6 +14804,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r5d| |karpenter.k8s.aws/instance-generation|5| @@ -13733,6 +14832,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r5dn| |karpenter.k8s.aws/instance-generation|5| @@ -13758,6 +14859,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r5dn| |karpenter.k8s.aws/instance-generation|5| @@ -13783,6 +14886,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r5dn| |karpenter.k8s.aws/instance-generation|5| @@ -13808,6 +14913,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r5dn| |karpenter.k8s.aws/instance-generation|5| @@ -13833,6 +14940,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|6800| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r5dn| |karpenter.k8s.aws/instance-generation|5| @@ -13858,6 +14967,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|9500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r5dn| |karpenter.k8s.aws/instance-generation|5| @@ -13883,6 +14994,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|13600| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r5dn| |karpenter.k8s.aws/instance-generation|5| @@ -13908,6 +15021,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r5dn| |karpenter.k8s.aws/instance-generation|5| @@ -13934,6 +15049,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r5dn| |karpenter.k8s.aws/instance-generation|5| @@ -13961,6 +15078,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r5n| |karpenter.k8s.aws/instance-generation|5| @@ -13985,6 +15104,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r5n| |karpenter.k8s.aws/instance-generation|5| @@ -14009,6 +15130,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r5n| |karpenter.k8s.aws/instance-generation|5| @@ -14033,6 +15156,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r5n| |karpenter.k8s.aws/instance-generation|5| @@ -14057,6 +15182,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|6800| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r5n| |karpenter.k8s.aws/instance-generation|5| @@ -14081,6 +15208,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|9500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r5n| |karpenter.k8s.aws/instance-generation|5| @@ -14105,6 +15234,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|13600| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r5n| |karpenter.k8s.aws/instance-generation|5| @@ -14129,6 +15260,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r5n| |karpenter.k8s.aws/instance-generation|5| @@ -14154,6 +15287,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r5n| |karpenter.k8s.aws/instance-generation|5| @@ -14180,6 +15315,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6a| |karpenter.k8s.aws/instance-generation|6| @@ -14204,6 +15341,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6a| |karpenter.k8s.aws/instance-generation|6| @@ -14228,6 +15367,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6a| |karpenter.k8s.aws/instance-generation|6| @@ -14252,6 +15393,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6a| |karpenter.k8s.aws/instance-generation|6| @@ -14276,6 +15419,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6a| |karpenter.k8s.aws/instance-generation|6| @@ -14300,6 +15445,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|15000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6a| |karpenter.k8s.aws/instance-generation|6| @@ -14324,6 +15471,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|20000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6a| |karpenter.k8s.aws/instance-generation|6| @@ -14348,6 +15497,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|30000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6a| |karpenter.k8s.aws/instance-generation|6| @@ -14372,6 +15523,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|128| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|40000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6a| |karpenter.k8s.aws/instance-generation|6| @@ -14396,6 +15549,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|192| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|40000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6a| |karpenter.k8s.aws/instance-generation|6| @@ -14421,6 +15576,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|192| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|40000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6a| |karpenter.k8s.aws/instance-generation|6| @@ -14447,6 +15604,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|1| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r6g| |karpenter.k8s.aws/instance-generation|6| @@ -14471,6 +15630,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r6g| |karpenter.k8s.aws/instance-generation|6| @@ -14495,6 +15656,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r6g| |karpenter.k8s.aws/instance-generation|6| @@ -14519,6 +15682,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r6g| |karpenter.k8s.aws/instance-generation|6| @@ -14543,6 +15708,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r6g| |karpenter.k8s.aws/instance-generation|6| @@ -14567,6 +15734,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|9500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r6g| |karpenter.k8s.aws/instance-generation|6| @@ -14591,6 +15760,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|14250| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r6g| |karpenter.k8s.aws/instance-generation|6| @@ -14615,6 +15786,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r6g| |karpenter.k8s.aws/instance-generation|6| @@ -14639,6 +15812,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r6g| |karpenter.k8s.aws/instance-generation|6| @@ -14664,6 +15839,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|1| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r6gd| |karpenter.k8s.aws/instance-generation|6| @@ -14689,6 +15866,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r6gd| |karpenter.k8s.aws/instance-generation|6| @@ -14714,6 +15893,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r6gd| |karpenter.k8s.aws/instance-generation|6| @@ -14739,6 +15920,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r6gd| |karpenter.k8s.aws/instance-generation|6| @@ -14764,6 +15947,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r6gd| |karpenter.k8s.aws/instance-generation|6| @@ -14789,6 +15974,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|9500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r6gd| |karpenter.k8s.aws/instance-generation|6| @@ -14814,6 +16001,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|14250| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r6gd| |karpenter.k8s.aws/instance-generation|6| @@ -14839,6 +16028,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r6gd| |karpenter.k8s.aws/instance-generation|6| @@ -14864,6 +16055,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|r6gd| |karpenter.k8s.aws/instance-generation|6| @@ -14890,6 +16083,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6i| |karpenter.k8s.aws/instance-generation|6| @@ -14914,6 +16109,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6i| |karpenter.k8s.aws/instance-generation|6| @@ -14938,6 +16135,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6i| |karpenter.k8s.aws/instance-generation|6| @@ -14962,6 +16161,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6i| |karpenter.k8s.aws/instance-generation|6| @@ -14986,6 +16187,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6i| |karpenter.k8s.aws/instance-generation|6| @@ -15010,6 +16213,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|15000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6i| |karpenter.k8s.aws/instance-generation|6| @@ -15034,6 +16239,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|20000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6i| |karpenter.k8s.aws/instance-generation|6| @@ -15058,6 +16265,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|30000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6i| |karpenter.k8s.aws/instance-generation|6| @@ -15082,6 +16291,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|128| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|40000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6i| |karpenter.k8s.aws/instance-generation|6| @@ -15107,6 +16318,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|128| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|40000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6i| |karpenter.k8s.aws/instance-generation|6| @@ -15133,6 +16346,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6id| |karpenter.k8s.aws/instance-generation|6| @@ -15158,6 +16373,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6id| |karpenter.k8s.aws/instance-generation|6| @@ -15183,6 +16400,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6id| |karpenter.k8s.aws/instance-generation|6| @@ -15208,6 +16427,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6id| |karpenter.k8s.aws/instance-generation|6| @@ -15233,6 +16454,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6id| |karpenter.k8s.aws/instance-generation|6| @@ -15258,6 +16481,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|15000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6id| |karpenter.k8s.aws/instance-generation|6| @@ -15283,6 +16508,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|20000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6id| |karpenter.k8s.aws/instance-generation|6| @@ -15308,6 +16535,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|30000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6id| |karpenter.k8s.aws/instance-generation|6| @@ -15333,6 +16562,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|128| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|40000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6id| |karpenter.k8s.aws/instance-generation|6| @@ -15359,6 +16590,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|128| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|40000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6id| |karpenter.k8s.aws/instance-generation|6| @@ -15386,6 +16619,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|25000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6idn| |karpenter.k8s.aws/instance-generation|6| @@ -15411,6 +16646,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|25000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6idn| |karpenter.k8s.aws/instance-generation|6| @@ -15436,6 +16673,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|25000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6idn| |karpenter.k8s.aws/instance-generation|6| @@ -15461,6 +16700,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|25000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6idn| |karpenter.k8s.aws/instance-generation|6| @@ -15486,6 +16727,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|25000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6idn| |karpenter.k8s.aws/instance-generation|6| @@ -15511,6 +16754,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|37500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6idn| |karpenter.k8s.aws/instance-generation|6| @@ -15536,6 +16781,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|50000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6idn| |karpenter.k8s.aws/instance-generation|6| @@ -15561,6 +16808,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|75000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6idn| |karpenter.k8s.aws/instance-generation|6| @@ -15586,6 +16835,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|128| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|100000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6idn| |karpenter.k8s.aws/instance-generation|6| @@ -15605,13 +16856,15 @@ below are the resources available with some assumptions and after the instance o |memory|965243Mi| |pods|394| |vpc.amazonaws.com/efa|2| - |vpc.amazonaws.com/pod-eni|108| + |vpc.amazonaws.com/pod-eni|106| ### `r6idn.metal` #### Labels | Label | Value | |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|128| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|100000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6idn| |karpenter.k8s.aws/instance-generation|6| @@ -15631,7 +16884,7 @@ below are the resources available with some assumptions and after the instance o |memory|965243Mi| |pods|394| |vpc.amazonaws.com/efa|2| - |vpc.amazonaws.com/pod-eni|108| + |vpc.amazonaws.com/pod-eni|106| ## r6in Family ### `r6in.large` #### Labels @@ -15639,6 +16892,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|25000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6in| |karpenter.k8s.aws/instance-generation|6| @@ -15663,6 +16918,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|25000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6in| |karpenter.k8s.aws/instance-generation|6| @@ -15687,6 +16944,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|25000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6in| |karpenter.k8s.aws/instance-generation|6| @@ -15711,6 +16970,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|25000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6in| |karpenter.k8s.aws/instance-generation|6| @@ -15735,6 +16996,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|25000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6in| |karpenter.k8s.aws/instance-generation|6| @@ -15759,6 +17022,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|37500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6in| |karpenter.k8s.aws/instance-generation|6| @@ -15783,6 +17048,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|50000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6in| |karpenter.k8s.aws/instance-generation|6| @@ -15807,6 +17074,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|75000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6in| |karpenter.k8s.aws/instance-generation|6| @@ -15831,6 +17100,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|128| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|100000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6in| |karpenter.k8s.aws/instance-generation|6| @@ -15849,13 +17120,15 @@ below are the resources available with some assumptions and after the instance o |memory|965243Mi| |pods|394| |vpc.amazonaws.com/efa|2| - |vpc.amazonaws.com/pod-eni|108| + |vpc.amazonaws.com/pod-eni|106| ### `r6in.metal` #### Labels | Label | Value | |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|128| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|100000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r6in| |karpenter.k8s.aws/instance-generation|6| @@ -15874,7 +17147,7 @@ below are the resources available with some assumptions and after the instance o |memory|965243Mi| |pods|394| |vpc.amazonaws.com/efa|2| - |vpc.amazonaws.com/pod-eni|108| + |vpc.amazonaws.com/pod-eni|106| ## r7a Family ### `r7a.medium` #### Labels @@ -15882,6 +17155,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|1| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7a| |karpenter.k8s.aws/instance-generation|7| @@ -15906,6 +17181,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7a| |karpenter.k8s.aws/instance-generation|7| @@ -15930,6 +17207,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7a| |karpenter.k8s.aws/instance-generation|7| @@ -15954,6 +17233,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7a| |karpenter.k8s.aws/instance-generation|7| @@ -15978,6 +17259,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7a| |karpenter.k8s.aws/instance-generation|7| @@ -16002,6 +17285,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7a| |karpenter.k8s.aws/instance-generation|7| @@ -16026,6 +17311,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|15000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7a| |karpenter.k8s.aws/instance-generation|7| @@ -16050,6 +17337,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|20000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7a| |karpenter.k8s.aws/instance-generation|7| @@ -16074,6 +17363,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|30000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7a| |karpenter.k8s.aws/instance-generation|7| @@ -16098,6 +17389,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|128| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|40000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7a| |karpenter.k8s.aws/instance-generation|7| @@ -16122,6 +17415,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|192| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|40000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7a| |karpenter.k8s.aws/instance-generation|7| @@ -16147,6 +17442,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|192| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|40000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7a| |karpenter.k8s.aws/instance-generation|7| @@ -16173,6 +17470,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|1| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7g| |karpenter.k8s.aws/instance-generation|7| @@ -16197,6 +17496,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7g| |karpenter.k8s.aws/instance-generation|7| @@ -16221,6 +17522,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7g| |karpenter.k8s.aws/instance-generation|7| @@ -16245,6 +17548,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7g| |karpenter.k8s.aws/instance-generation|7| @@ -16269,6 +17574,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7g| |karpenter.k8s.aws/instance-generation|7| @@ -16293,6 +17600,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7g| |karpenter.k8s.aws/instance-generation|7| @@ -16317,6 +17626,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|15000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7g| |karpenter.k8s.aws/instance-generation|7| @@ -16341,6 +17652,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|20000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7g| |karpenter.k8s.aws/instance-generation|7| @@ -16366,6 +17679,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|20000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7g| |karpenter.k8s.aws/instance-generation|7| @@ -16392,6 +17707,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|1| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7gd| |karpenter.k8s.aws/instance-generation|7| @@ -16417,6 +17734,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7gd| |karpenter.k8s.aws/instance-generation|7| @@ -16442,6 +17761,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7gd| |karpenter.k8s.aws/instance-generation|7| @@ -16467,6 +17788,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7gd| |karpenter.k8s.aws/instance-generation|7| @@ -16492,6 +17815,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7gd| |karpenter.k8s.aws/instance-generation|7| @@ -16517,6 +17842,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7gd| |karpenter.k8s.aws/instance-generation|7| @@ -16542,6 +17869,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|15000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7gd| |karpenter.k8s.aws/instance-generation|7| @@ -16567,6 +17896,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|20000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7gd| |karpenter.k8s.aws/instance-generation|7| @@ -16593,12 +17924,15 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|20000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7gd| |karpenter.k8s.aws/instance-generation|7| |karpenter.k8s.aws/instance-hypervisor|| |karpenter.k8s.aws/instance-local-nvme|3800| |karpenter.k8s.aws/instance-memory|524288| + |karpenter.k8s.aws/instance-network-bandwidth|30000| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -16611,6 +17945,7 @@ below are the resources available with some assumptions and after the instance o |memory|476445Mi| |pods|737| |vpc.amazonaws.com/efa|1| + |vpc.amazonaws.com/pod-eni|107| ## r7i Family ### `r7i.large` #### Labels @@ -16618,6 +17953,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7i| |karpenter.k8s.aws/instance-generation|7| @@ -16642,6 +17979,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7i| |karpenter.k8s.aws/instance-generation|7| @@ -16666,6 +18005,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7i| |karpenter.k8s.aws/instance-generation|7| @@ -16690,6 +18031,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7i| |karpenter.k8s.aws/instance-generation|7| @@ -16714,6 +18057,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7i| |karpenter.k8s.aws/instance-generation|7| @@ -16738,6 +18083,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|15000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7i| |karpenter.k8s.aws/instance-generation|7| @@ -16762,6 +18109,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|20000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7i| |karpenter.k8s.aws/instance-generation|7| @@ -16786,6 +18135,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|30000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7i| |karpenter.k8s.aws/instance-generation|7| @@ -16810,6 +18161,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|30000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7i| |karpenter.k8s.aws/instance-generation|7| @@ -16834,6 +18187,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|192| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|40000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7i| |karpenter.k8s.aws/instance-generation|7| @@ -16859,6 +18214,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|192| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|40000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7i| |karpenter.k8s.aws/instance-generation|7| @@ -16885,6 +18242,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7iz| |karpenter.k8s.aws/instance-generation|7| @@ -16909,6 +18268,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7iz| |karpenter.k8s.aws/instance-generation|7| @@ -16933,6 +18294,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7iz| |karpenter.k8s.aws/instance-generation|7| @@ -16957,6 +18320,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7iz| |karpenter.k8s.aws/instance-generation|7| @@ -16981,6 +18346,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7iz| |karpenter.k8s.aws/instance-generation|7| @@ -17005,6 +18372,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7iz| |karpenter.k8s.aws/instance-generation|7| @@ -17029,6 +18398,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|20000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7iz| |karpenter.k8s.aws/instance-generation|7| @@ -17053,6 +18424,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|20000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7iz| |karpenter.k8s.aws/instance-generation|7| @@ -17077,6 +18450,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|128| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|40000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7iz| |karpenter.k8s.aws/instance-generation|7| @@ -17102,6 +18477,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|128| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|40000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|r7iz| |karpenter.k8s.aws/instance-generation|7| @@ -17121,6 +18498,311 @@ below are the resources available with some assumptions and after the instance o |pods|737| |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| +## r8g Family +### `r8g.medium` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|r| + |karpenter.k8s.aws/instance-cpu|1| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|r8g| + |karpenter.k8s.aws/instance-generation|8| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-memory|8192| + |karpenter.k8s.aws/instance-network-bandwidth|520| + |karpenter.k8s.aws/instance-size|medium| + |kubernetes.io/arch|arm64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|r8g.medium| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|940m| + |ephemeral-storage|17Gi| + |memory|7075Mi| + |pods|8| +### `r8g.large` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|r| + |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|r8g| + |karpenter.k8s.aws/instance-generation|8| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-memory|16384| + |karpenter.k8s.aws/instance-network-bandwidth|937| + |karpenter.k8s.aws/instance-size|large| + |kubernetes.io/arch|arm64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|r8g.large| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|1930m| + |ephemeral-storage|17Gi| + |memory|14422Mi| + |pods|29| +### `r8g.xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|r| + |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|r8g| + |karpenter.k8s.aws/instance-generation|8| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-memory|32768| + |karpenter.k8s.aws/instance-network-bandwidth|1876| + |karpenter.k8s.aws/instance-size|xlarge| + |kubernetes.io/arch|arm64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|r8g.xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|3920m| + |ephemeral-storage|17Gi| + |memory|29258Mi| + |pods|58| +### `r8g.2xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|r| + |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|r8g| + |karpenter.k8s.aws/instance-generation|8| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-memory|65536| + |karpenter.k8s.aws/instance-network-bandwidth|3750| + |karpenter.k8s.aws/instance-size|2xlarge| + |kubernetes.io/arch|arm64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|r8g.2xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|7910m| + |ephemeral-storage|17Gi| + |memory|59568Mi| + |pods|58| +### `r8g.4xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|r| + |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|r8g| + |karpenter.k8s.aws/instance-generation|8| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-memory|131072| + |karpenter.k8s.aws/instance-network-bandwidth|7500| + |karpenter.k8s.aws/instance-size|4xlarge| + |kubernetes.io/arch|arm64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|r8g.4xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|15890m| + |ephemeral-storage|17Gi| + |memory|118253Mi| + |pods|234| +### `r8g.8xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|r| + |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|10000| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|r8g| + |karpenter.k8s.aws/instance-generation|8| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-memory|262144| + |karpenter.k8s.aws/instance-network-bandwidth|15000| + |karpenter.k8s.aws/instance-size|8xlarge| + |kubernetes.io/arch|arm64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|r8g.8xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|31850m| + |ephemeral-storage|17Gi| + |memory|239495Mi| + |pods|234| +### `r8g.12xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|r| + |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|15000| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|r8g| + |karpenter.k8s.aws/instance-generation|8| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-memory|393216| + |karpenter.k8s.aws/instance-network-bandwidth|22500| + |karpenter.k8s.aws/instance-size|12xlarge| + |kubernetes.io/arch|arm64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|r8g.12xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|47810m| + |ephemeral-storage|17Gi| + |memory|360736Mi| + |pods|234| +### `r8g.16xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|r| + |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|20000| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|r8g| + |karpenter.k8s.aws/instance-generation|8| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-memory|524288| + |karpenter.k8s.aws/instance-network-bandwidth|30000| + |karpenter.k8s.aws/instance-size|16xlarge| + |kubernetes.io/arch|arm64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|r8g.16xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|63770m| + |ephemeral-storage|17Gi| + |memory|476445Mi| + |pods|737| +### `r8g.24xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|r| + |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|30000| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|r8g| + |karpenter.k8s.aws/instance-generation|8| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-memory|786432| + |karpenter.k8s.aws/instance-network-bandwidth|40000| + |karpenter.k8s.aws/instance-size|24xlarge| + |kubernetes.io/arch|arm64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|r8g.24xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|95690m| + |ephemeral-storage|17Gi| + |memory|718928Mi| + |pods|737| + |vpc.amazonaws.com/efa|1| +### `r8g.metal-24xl` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|r| + |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|30000| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|r8g| + |karpenter.k8s.aws/instance-generation|8| + |karpenter.k8s.aws/instance-hypervisor|| + |karpenter.k8s.aws/instance-memory|786432| + |karpenter.k8s.aws/instance-network-bandwidth|40000| + |karpenter.k8s.aws/instance-size|metal-24xl| + |kubernetes.io/arch|arm64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|r8g.metal-24xl| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|95690m| + |ephemeral-storage|17Gi| + |memory|718928Mi| + |pods|737| + |vpc.amazonaws.com/efa|1| +### `r8g.48xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|r| + |karpenter.k8s.aws/instance-cpu|192| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|40000| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|r8g| + |karpenter.k8s.aws/instance-generation|8| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-memory|1572864| + |karpenter.k8s.aws/instance-network-bandwidth|50000| + |karpenter.k8s.aws/instance-size|48xlarge| + |kubernetes.io/arch|arm64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|r8g.48xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|191450m| + |ephemeral-storage|17Gi| + |memory|1446378Mi| + |pods|737| + |vpc.amazonaws.com/efa|1| +### `r8g.metal-48xl` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|r| + |karpenter.k8s.aws/instance-cpu|192| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|40000| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|r8g| + |karpenter.k8s.aws/instance-generation|8| + |karpenter.k8s.aws/instance-hypervisor|| + |karpenter.k8s.aws/instance-memory|1572864| + |karpenter.k8s.aws/instance-network-bandwidth|50000| + |karpenter.k8s.aws/instance-size|metal-48xl| + |kubernetes.io/arch|arm64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|r8g.metal-48xl| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|191450m| + |ephemeral-storage|17Gi| + |memory|1446378Mi| + |pods|737| + |vpc.amazonaws.com/efa|1| ## t1 Family ### `t1.micro` #### Labels @@ -17128,6 +18810,7 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|t| |karpenter.k8s.aws/instance-cpu|1| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|t1| |karpenter.k8s.aws/instance-generation|1| @@ -17151,6 +18834,7 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|t| |karpenter.k8s.aws/instance-cpu|1| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|t2| |karpenter.k8s.aws/instance-generation|2| @@ -17173,6 +18857,7 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|t| |karpenter.k8s.aws/instance-cpu|1| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|t2| |karpenter.k8s.aws/instance-generation|2| @@ -17195,6 +18880,7 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|t| |karpenter.k8s.aws/instance-cpu|1| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|t2| |karpenter.k8s.aws/instance-generation|2| @@ -17217,6 +18903,7 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|t| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|t2| |karpenter.k8s.aws/instance-generation|2| @@ -17239,6 +18926,7 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|t| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|t2| |karpenter.k8s.aws/instance-generation|2| @@ -17261,6 +18949,7 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|t| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|t2| |karpenter.k8s.aws/instance-generation|2| @@ -17283,6 +18972,7 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|t| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|t2| |karpenter.k8s.aws/instance-generation|2| @@ -17306,6 +18996,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|t| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|2085| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|t3| |karpenter.k8s.aws/instance-generation|3| @@ -17329,6 +19021,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|t| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|2085| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|t3| |karpenter.k8s.aws/instance-generation|3| @@ -17352,6 +19046,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|t| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|2085| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|t3| |karpenter.k8s.aws/instance-generation|3| @@ -17375,6 +19071,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|t| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|2085| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|t3| |karpenter.k8s.aws/instance-generation|3| @@ -17398,6 +19096,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|t| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|2780| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|t3| |karpenter.k8s.aws/instance-generation|3| @@ -17421,6 +19121,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|t| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|2780| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|t3| |karpenter.k8s.aws/instance-generation|3| @@ -17444,6 +19146,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|t| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|2780| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|t3| |karpenter.k8s.aws/instance-generation|3| @@ -17468,6 +19172,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|t| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|2085| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|t3a| |karpenter.k8s.aws/instance-generation|3| @@ -17491,6 +19197,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|t| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|2085| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|t3a| |karpenter.k8s.aws/instance-generation|3| @@ -17514,6 +19222,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|t| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|2085| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|t3a| |karpenter.k8s.aws/instance-generation|3| @@ -17537,6 +19247,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|t| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|2085| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|t3a| |karpenter.k8s.aws/instance-generation|3| @@ -17560,6 +19272,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|t| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|2780| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|t3a| |karpenter.k8s.aws/instance-generation|3| @@ -17583,6 +19297,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|t| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|2780| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|t3a| |karpenter.k8s.aws/instance-generation|3| @@ -17606,6 +19322,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|t| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|amd| + |karpenter.k8s.aws/instance-ebs-bandwidth|2780| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|t3a| |karpenter.k8s.aws/instance-generation|3| @@ -17630,6 +19348,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|t| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|2085| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|t4g| |karpenter.k8s.aws/instance-generation|4| @@ -17653,6 +19373,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|t| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|2085| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|t4g| |karpenter.k8s.aws/instance-generation|4| @@ -17676,6 +19398,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|t| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|2085| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|t4g| |karpenter.k8s.aws/instance-generation|4| @@ -17699,6 +19423,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|t| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|2085| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|t4g| |karpenter.k8s.aws/instance-generation|4| @@ -17722,6 +19448,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|t| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|2780| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|t4g| |karpenter.k8s.aws/instance-generation|4| @@ -17745,6 +19473,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|t| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|2780| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|t4g| |karpenter.k8s.aws/instance-generation|4| @@ -17768,6 +19498,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|t| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|2780| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|t4g| |karpenter.k8s.aws/instance-generation|4| @@ -17795,6 +19527,8 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-accelerator-name|inferentia| |karpenter.k8s.aws/instance-category|trn| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|20000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|trn1| |karpenter.k8s.aws/instance-generation|1| @@ -17824,6 +19558,8 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-accelerator-name|inferentia| |karpenter.k8s.aws/instance-category|trn| |karpenter.k8s.aws/instance-cpu|128| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|80000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|trn1| |karpenter.k8s.aws/instance-generation|1| @@ -17855,6 +19591,8 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-accelerator-name|inferentia| |karpenter.k8s.aws/instance-category|trn| |karpenter.k8s.aws/instance-cpu|128| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|80000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|trn1n| |karpenter.k8s.aws/instance-generation|1| @@ -17883,6 +19621,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|u| |karpenter.k8s.aws/instance-cpu|448| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|38000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|u-12tb1| |karpenter.k8s.aws/instance-generation|1| @@ -17907,6 +19647,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|u| |karpenter.k8s.aws/instance-cpu|448| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|38000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|u-18tb1| |karpenter.k8s.aws/instance-generation|1| @@ -17931,6 +19673,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|u| |karpenter.k8s.aws/instance-cpu|448| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|38000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|u-24tb1| |karpenter.k8s.aws/instance-generation|1| @@ -17955,6 +19699,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|u| |karpenter.k8s.aws/instance-cpu|224| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|u-3tb1| |karpenter.k8s.aws/instance-generation|1| @@ -17980,6 +19726,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|u| |karpenter.k8s.aws/instance-cpu|224| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|38000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|u-6tb1| |karpenter.k8s.aws/instance-generation|1| @@ -18003,6 +19751,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|u| |karpenter.k8s.aws/instance-cpu|448| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|38000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|u-6tb1| |karpenter.k8s.aws/instance-generation|1| @@ -18027,6 +19777,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|u| |karpenter.k8s.aws/instance-cpu|448| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|38000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|u-9tb1| |karpenter.k8s.aws/instance-generation|1| @@ -18044,6 +19796,114 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|8720933Mi| |pods|737| +## u7i-12tb Family +### `u7i-12tb.224xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|u| + |karpenter.k8s.aws/instance-cpu|896| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|60000| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|u7i-12tb| + |karpenter.k8s.aws/instance-generation|7| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-memory|12582912| + |karpenter.k8s.aws/instance-network-bandwidth|100000| + |karpenter.k8s.aws/instance-size|224xlarge| + |kubernetes.io/arch|amd64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|u7i-12tb.224xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|893690m| + |ephemeral-storage|17Gi| + |memory|11630731Mi| + |pods|737| + |vpc.amazonaws.com/efa|1| +## u7in-16tb Family +### `u7in-16tb.224xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|u| + |karpenter.k8s.aws/instance-cpu|896| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|100000| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|u7in-16tb| + |karpenter.k8s.aws/instance-generation|7| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-memory|16777216| + |karpenter.k8s.aws/instance-network-bandwidth|200000| + |karpenter.k8s.aws/instance-size|224xlarge| + |kubernetes.io/arch|amd64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|u7in-16tb.224xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|893690m| + |ephemeral-storage|17Gi| + |memory|15514235Mi| + |pods|394| + |vpc.amazonaws.com/efa|2| +## u7in-24tb Family +### `u7in-24tb.224xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|u| + |karpenter.k8s.aws/instance-cpu|896| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|100000| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|u7in-24tb| + |karpenter.k8s.aws/instance-generation|7| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-memory|25165824| + |karpenter.k8s.aws/instance-network-bandwidth|200000| + |karpenter.k8s.aws/instance-size|224xlarge| + |kubernetes.io/arch|amd64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|u7in-24tb.224xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|893690m| + |ephemeral-storage|17Gi| + |memory|23273698Mi| + |pods|394| + |vpc.amazonaws.com/efa|2| +## u7in-32tb Family +### `u7in-32tb.224xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|u| + |karpenter.k8s.aws/instance-cpu|896| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|100000| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|u7in-32tb| + |karpenter.k8s.aws/instance-generation|7| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-memory|33554432| + |karpenter.k8s.aws/instance-network-bandwidth|200000| + |karpenter.k8s.aws/instance-size|224xlarge| + |kubernetes.io/arch|amd64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|u7in-32tb.224xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|893690m| + |ephemeral-storage|17Gi| + |memory|31033160Mi| + |pods|394| + |vpc.amazonaws.com/efa|2| ## vt1 Family ### `vt1.3xlarge` #### Labels @@ -18051,6 +19911,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|vt| |karpenter.k8s.aws/instance-cpu|12| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|vt1| |karpenter.k8s.aws/instance-generation|1| @@ -18075,6 +19937,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|vt| |karpenter.k8s.aws/instance-cpu|24| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|vt1| |karpenter.k8s.aws/instance-generation|1| @@ -18099,6 +19963,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|vt| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|vt1| |karpenter.k8s.aws/instance-generation|1| @@ -18125,6 +19991,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|x| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|7000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|x1| |karpenter.k8s.aws/instance-generation|1| @@ -18148,6 +20016,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|x| |karpenter.k8s.aws/instance-cpu|128| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|14000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|x1| |karpenter.k8s.aws/instance-generation|1| @@ -18172,6 +20042,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|x| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|x1e| |karpenter.k8s.aws/instance-generation|1| @@ -18195,6 +20067,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|x| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|1000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|x1e| |karpenter.k8s.aws/instance-generation|1| @@ -18218,6 +20092,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|x| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|1750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|x1e| |karpenter.k8s.aws/instance-generation|1| @@ -18241,6 +20117,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|x| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|3500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|x1e| |karpenter.k8s.aws/instance-generation|1| @@ -18264,6 +20142,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|x| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|7000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|x1e| |karpenter.k8s.aws/instance-generation|1| @@ -18287,6 +20167,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|x| |karpenter.k8s.aws/instance-cpu|128| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|14000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|x1e| |karpenter.k8s.aws/instance-generation|1| @@ -18311,6 +20193,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|x| |karpenter.k8s.aws/instance-cpu|1| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|x2gd| |karpenter.k8s.aws/instance-generation|2| @@ -18336,6 +20220,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|x| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|x2gd| |karpenter.k8s.aws/instance-generation|2| @@ -18361,6 +20247,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|x| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|x2gd| |karpenter.k8s.aws/instance-generation|2| @@ -18386,6 +20274,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|x| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|x2gd| |karpenter.k8s.aws/instance-generation|2| @@ -18411,6 +20301,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|x| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|x2gd| |karpenter.k8s.aws/instance-generation|2| @@ -18436,6 +20328,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|x| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|9500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|x2gd| |karpenter.k8s.aws/instance-generation|2| @@ -18461,6 +20355,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|x| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|14250| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|x2gd| |karpenter.k8s.aws/instance-generation|2| @@ -18486,6 +20382,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|x| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|x2gd| |karpenter.k8s.aws/instance-generation|2| @@ -18511,6 +20409,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|x| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|aws| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|x2gd| |karpenter.k8s.aws/instance-generation|2| @@ -18537,6 +20437,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|x| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|40000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|x2idn| |karpenter.k8s.aws/instance-generation|2| @@ -18562,6 +20464,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|x| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|60000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|x2idn| |karpenter.k8s.aws/instance-generation|2| @@ -18587,6 +20491,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|x| |karpenter.k8s.aws/instance-cpu|128| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|80000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|x2idn| |karpenter.k8s.aws/instance-generation|2| @@ -18613,6 +20519,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|x| |karpenter.k8s.aws/instance-cpu|128| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|80000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|x2idn| |karpenter.k8s.aws/instance-generation|2| @@ -18640,6 +20548,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|x| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|20000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|x2iedn| |karpenter.k8s.aws/instance-generation|2| @@ -18665,6 +20575,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|x| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|20000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|x2iedn| |karpenter.k8s.aws/instance-generation|2| @@ -18690,6 +20602,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|x| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|20000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|x2iedn| |karpenter.k8s.aws/instance-generation|2| @@ -18715,6 +20629,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|x| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|20000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|x2iedn| |karpenter.k8s.aws/instance-generation|2| @@ -18740,6 +20656,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|x| |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|40000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|x2iedn| |karpenter.k8s.aws/instance-generation|2| @@ -18765,6 +20683,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|x| |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|60000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|x2iedn| |karpenter.k8s.aws/instance-generation|2| @@ -18790,6 +20710,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|x| |karpenter.k8s.aws/instance-cpu|128| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|80000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|x2iedn| |karpenter.k8s.aws/instance-generation|2| @@ -18816,6 +20738,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|x| |karpenter.k8s.aws/instance-cpu|128| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|80000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|x2iedn| |karpenter.k8s.aws/instance-generation|2| @@ -18843,6 +20767,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|x| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|3170| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|x2iezn| |karpenter.k8s.aws/instance-generation|2| @@ -18867,6 +20793,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|x| |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|x2iezn| |karpenter.k8s.aws/instance-generation|2| @@ -18891,6 +20819,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|x| |karpenter.k8s.aws/instance-cpu|24| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|9500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|x2iezn| |karpenter.k8s.aws/instance-generation|2| @@ -18915,6 +20845,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|x| |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|12000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|x2iezn| |karpenter.k8s.aws/instance-generation|2| @@ -18939,6 +20871,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|x| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|x2iezn| |karpenter.k8s.aws/instance-generation|2| @@ -18964,6 +20898,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|x| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| |karpenter.k8s.aws/instance-family|x2iezn| |karpenter.k8s.aws/instance-generation|2| @@ -18990,6 +20926,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|z| |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|3170| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|z1d| |karpenter.k8s.aws/instance-generation|1| @@ -19015,6 +20953,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|z| |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|3170| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|z1d| |karpenter.k8s.aws/instance-generation|1| @@ -19040,6 +20980,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|z| |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|3170| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|z1d| |karpenter.k8s.aws/instance-generation|1| @@ -19065,6 +21007,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|z| |karpenter.k8s.aws/instance-cpu|12| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|4750| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|z1d| |karpenter.k8s.aws/instance-generation|1| @@ -19090,6 +21034,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|z| |karpenter.k8s.aws/instance-cpu|24| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|9500| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|z1d| |karpenter.k8s.aws/instance-generation|1| @@ -19115,6 +21061,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|z| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|z1d| |karpenter.k8s.aws/instance-generation|1| @@ -19140,6 +21088,8 @@ below are the resources available with some assumptions and after the instance o |--|--| |karpenter.k8s.aws/instance-category|z| |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-cpu-manufacturer|intel| + |karpenter.k8s.aws/instance-ebs-bandwidth|19000| |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|z1d| |karpenter.k8s.aws/instance-generation|1| diff --git a/website/content/en/v1.0/reference/metrics.md b/website/content/en/v1.0/reference/metrics.md new file mode 100644 index 000000000000..674dc859179c --- /dev/null +++ b/website/content/en/v1.0/reference/metrics.md @@ -0,0 +1,310 @@ +--- +title: "Metrics" +linkTitle: "Metrics" +weight: 7 + +description: > + Inspect Karpenter Metrics +--- + +Karpenter makes several metrics available in Prometheus format to allow monitoring cluster provisioning status. These metrics are available by default at `karpenter.karpenter.svc.cluster.local:8080/metrics` configurable via the `METRICS_PORT` environment variable documented [here](../settings) +### `karpenter_build_info` +A metric with a constant '1' value labeled by version from which karpenter was built. +- Stability Level: STABLE + +## Nodeclaims Metrics + +### `karpenter_nodeclaims_termination_duration_seconds` +Duration of NodeClaim termination in seconds. +- Stability Level: BETA + +### `karpenter_nodeclaims_terminated_total` +Number of nodeclaims terminated in total by Karpenter. Labeled by the owning nodepool. +- Stability Level: STABLE + +### `karpenter_nodeclaims_instance_termination_duration_seconds` +Duration of CloudProvider Instance termination in seconds. +- Stability Level: BETA + +### `karpenter_nodeclaims_disrupted_total` +Number of nodeclaims disrupted in total by Karpenter. Labeled by reason the nodeclaim was disrupted and the owning nodepool. +- Stability Level: ALPHA + +### `karpenter_nodeclaims_created_total` +Number of nodeclaims created in total by Karpenter. Labeled by reason the nodeclaim was created and the owning nodepool. +- Stability Level: STABLE + +## Nodes Metrics + +### `karpenter_nodes_total_pod_requests` +Node total pod requests are the resources requested by pods bound to nodes, including the DaemonSet pods. +- Stability Level: BETA + +### `karpenter_nodes_total_pod_limits` +Node total pod limits are the resources specified by pod limits, including the DaemonSet pods. +- Stability Level: BETA + +### `karpenter_nodes_total_daemon_requests` +Node total daemon requests are the resource requested by DaemonSet pods bound to nodes. +- Stability Level: BETA + +### `karpenter_nodes_total_daemon_limits` +Node total daemon limits are the resources specified by DaemonSet pod limits. +- Stability Level: BETA + +### `karpenter_nodes_termination_duration_seconds` +The time taken between a node's deletion request and the removal of its finalizer +- Stability Level: BETA + +### `karpenter_nodes_terminated_total` +Number of nodes terminated in total by Karpenter. Labeled by owning nodepool. +- Stability Level: STABLE + +### `karpenter_nodes_system_overhead` +Node system daemon overhead are the resources reserved for system overhead, the difference between the node's capacity and allocatable values are reported by the status. +- Stability Level: BETA + +### `karpenter_nodes_leases_deleted_total` +Number of deleted leaked leases. +- Stability Level: ALPHA + +### `karpenter_nodes_created_total` +Number of nodes created in total by Karpenter. Labeled by owning nodepool. +- Stability Level: STABLE + +### `karpenter_nodes_allocatable` +Node allocatable are the resources allocatable by nodes. +- Stability Level: BETA + +## Pods Metrics + +### `karpenter_pods_state` +Pod state is the current state of pods. This metric can be used several ways as it is labeled by the pod name, namespace, owner, node, nodepool name, zone, architecture, capacity type, instance type and pod phase. +- Stability Level: BETA + +### `karpenter_pods_startup_duration_seconds` +The time from pod creation until the pod is running. +- Stability Level: STABLE + +## Voluntary Disruption Metrics + +### `karpenter_voluntary_disruption_queue_failures_total` +The number of times that an enqueued disruption decision failed. Labeled by disruption method. +- Stability Level: BETA + +### `karpenter_voluntary_disruption_eligible_nodes` +Number of nodes eligible for disruption by Karpenter. Labeled by disruption reason. +- Stability Level: BETA + +### `karpenter_voluntary_disruption_decisions_total` +Number of disruption decisions performed. Labeled by disruption decision, reason, and consolidation type. +- Stability Level: STABLE + +### `karpenter_voluntary_disruption_decision_evaluation_duration_seconds` +Duration of the disruption decision evaluation process in seconds. Labeled by method and consolidation type. +- Stability Level: BETA + +### `karpenter_voluntary_disruption_consolidation_timeouts_total` +Number of times the Consolidation algorithm has reached a timeout. Labeled by consolidation type. +- Stability Level: BETA + +## Scheduler Metrics + +### `karpenter_scheduler_scheduling_duration_seconds` +Duration of scheduling simulations used for deprovisioning and provisioning in seconds. +- Stability Level: STABLE + +### `karpenter_scheduler_queue_depth` +The number of pods currently waiting to be scheduled. +- Stability Level: BETA + +## Nodepools Metrics + +### `karpenter_nodepools_usage` +The amount of resources that have been provisioned for a nodepool. Labeled by nodepool name and resource type. +- Stability Level: ALPHA + +### `karpenter_nodepools_limit` +Limits specified on the nodepool that restrict the quantity of resources provisioned. Labeled by nodepool name and resource type. +- Stability Level: ALPHA + +### `karpenter_nodepools_allowed_disruptions` +The number of nodes for a given NodePool that can be concurrently disrupting at a point in time. Labeled by NodePool. Note that allowed disruptions can change very rapidly, as new nodes may be created and others may be deleted at any point. +- Stability Level: ALPHA + +## Interruption Metrics + +### `karpenter_interruption_received_messages_total` +Count of messages received from the SQS queue. Broken down by message type and whether the message was actionable. +- Stability Level: STABLE + +### `karpenter_interruption_message_queue_duration_seconds` +Amount of time an interruption message is on the queue before it is processed by karpenter. +- Stability Level: STABLE + +### `karpenter_interruption_deleted_messages_total` +Count of messages deleted from the SQS queue. +- Stability Level: STABLE + +## Cluster State Metrics + +### `karpenter_cluster_state_synced` +Returns 1 if cluster state is synced and 0 otherwise. Synced checks that nodeclaims and nodes that are stored in the APIServer have the same representation as Karpenter's cluster state +- Stability Level: STABLE + +### `karpenter_cluster_state_node_count` +Current count of nodes in cluster state +- Stability Level: STABLE + +## Cloudprovider Metrics + +### `karpenter_cloudprovider_instance_type_offering_price_estimate` +Instance type offering estimated hourly price used when making informed decisions on node cost calculation, based on instance type, capacity type, and zone. +- Stability Level: BETA + +### `karpenter_cloudprovider_instance_type_offering_available` +Instance type offering availability, based on instance type, capacity type, and zone +- Stability Level: BETA + +### `karpenter_cloudprovider_instance_type_memory_bytes` +Memory, in bytes, for a given instance type. +- Stability Level: BETA + +### `karpenter_cloudprovider_instance_type_cpu_cores` +VCPUs cores for a given instance type. +- Stability Level: BETA + +### `karpenter_cloudprovider_errors_total` +Total number of errors returned from CloudProvider calls. +- Stability Level: BETA + +### `karpenter_cloudprovider_duration_seconds` +Duration of cloud provider method calls. Labeled by the controller, method name and provider. +- Stability Level: BETA + +## Cloudprovider Batcher Metrics + +### `karpenter_cloudprovider_batcher_batch_time_seconds` +Duration of the batching window per batcher +- Stability Level: BETA + +### `karpenter_cloudprovider_batcher_batch_size` +Size of the request batch per batcher +- Stability Level: BETA + +## Controller Runtime Metrics + +### `controller_runtime_terminal_reconcile_errors_total` +Total number of terminal reconciliation errors per controller +- Stability Level: STABLE + +### `controller_runtime_reconcile_total` +Total number of reconciliations per controller +- Stability Level: STABLE + +### `controller_runtime_reconcile_time_seconds` +Length of time per reconciliation per controller +- Stability Level: STABLE + +### `controller_runtime_reconcile_errors_total` +Total number of reconciliation errors per controller +- Stability Level: STABLE + +### `controller_runtime_max_concurrent_reconciles` +Maximum number of concurrent reconciles per controller +- Stability Level: STABLE + +### `controller_runtime_active_workers` +Number of currently used workers per controller +- Stability Level: STABLE + +## Workqueue Metrics + +### `workqueue_work_duration_seconds` +How long in seconds processing an item from workqueue takes. +- Stability Level: STABLE + +### `workqueue_unfinished_work_seconds` +How many seconds of work has been done that is in progress and hasn't been observed by work_duration. Large values indicate stuck threads. One can deduce the number of stuck threads by observing the rate at which this increases. +- Stability Level: STABLE + +### `workqueue_retries_total` +Total number of retries handled by workqueue +- Stability Level: STABLE + +### `workqueue_queue_duration_seconds` +How long in seconds an item stays in workqueue before being requested +- Stability Level: STABLE + +### `workqueue_longest_running_processor_seconds` +How many seconds has the longest running processor for workqueue been running. +- Stability Level: STABLE + +### `workqueue_depth` +Current depth of workqueue +- Stability Level: STABLE + +### `workqueue_adds_total` +Total number of adds handled by workqueue +- Stability Level: STABLE + +## Status Condition Metrics + +### `operator_status_condition_transitions_total` +The count of transitions of a given object, type and status. +- Stability Level: BETA + +### `operator_status_condition_transition_seconds` +The amount of time a condition was in a given state before transitioning. e.g. Alarm := P99(Updated=False) > 5 minutes +- Stability Level: BETA + +### `operator_status_condition_current_status_seconds` +The current amount of time in seconds that a status condition has been in a specific state. Alarm := P99(Updated=Unknown) > 5 minutes +- Stability Level: BETA + +### `operator_status_condition_count` +The number of an condition for a given object, type and status. e.g. Alarm := Available=False > 0 +- Stability Level: BETA + +## Client Go Metrics + +### `client_go_request_total` +Number of HTTP requests, partitioned by status code and method. +- Stability Level: STABLE + +### `client_go_request_duration_seconds` +Request latency in seconds. Broken down by verb, group, version, kind, and subresource. +- Stability Level: STABLE + +## AWS SDK Go Metrics + +### `aws_sdk_go_request_total` +The total number of AWS SDK Go requests +- Stability Level: STABLE + +### `aws_sdk_go_request_retry_count` +The total number of AWS SDK Go retry attempts per request +- Stability Level: STABLE + +### `aws_sdk_go_request_duration_seconds` +Latency of AWS SDK Go requests +- Stability Level: STABLE + +### `aws_sdk_go_request_attempt_total` +The total number of AWS SDK Go request attempts +- Stability Level: STABLE + +### `aws_sdk_go_request_attempt_duration_seconds` +Latency of AWS SDK Go request attempts +- Stability Level: STABLE + +## Leader Election Metrics + +### `leader_election_slowpath_total` +Total number of slow path exercised in renewing leader leases. 'name' is the string used to identify the lease. Please make sure to group by name. +- Stability Level: STABLE + +### `leader_election_master_status` +Gauge of if the reporting system is master of the relevant lease, 0 indicates backup, 1 indicates master. 'name' is the string used to identify the lease. Please make sure to group by name. +- Stability Level: STABLE + diff --git a/website/content/en/v0.35/reference/settings.md b/website/content/en/v1.0/reference/settings.md similarity index 84% rename from website/content/en/v0.35/reference/settings.md rename to website/content/en/v1.0/reference/settings.md index c7dbc250391f..799f5af9ef0a 100644 --- a/website/content/en/v0.35/reference/settings.md +++ b/website/content/en/v1.0/reference/settings.md @@ -12,26 +12,26 @@ Karpenter surfaces environment variables and CLI parameters to allow you to conf | Environment Variable | CLI Flag | Description | |--|--|--| -| ASSUME_ROLE_ARN | \-\-assume-role-arn | Role to assume for calling AWS services.| -| ASSUME_ROLE_DURATION | \-\-assume-role-duration | Duration of assumed credentials in minutes. Default value is 15 minutes. Not used unless aws.assumeRole set. (default = 15m0s)| | BATCH_IDLE_DURATION | \-\-batch-idle-duration | The maximum amount of time with no new pending pods that if exceeded ends the current batching window. If pods arrive faster than this time, the batching window will be extended up to the maxDuration. If they arrive slower, the pods will be batched separately. (default = 1s)| | BATCH_MAX_DURATION | \-\-batch-max-duration | The maximum length of a batch window. The longer this is, the more pods we can consider for provisioning at one time which usually results in fewer but larger nodes. (default = 10s)| | CLUSTER_CA_BUNDLE | \-\-cluster-ca-bundle | Cluster CA bundle for nodes to use for TLS connections with the API server. If not set, this is taken from the controller's TLS configuration.| | CLUSTER_ENDPOINT | \-\-cluster-endpoint | The external kubernetes cluster endpoint for new nodes to connect with. If not specified, will discover the cluster endpoint using DescribeCluster API.| | CLUSTER_NAME | \-\-cluster-name | [REQUIRED] The kubernetes cluster name for resource discovery.| -| DISABLE_WEBHOOK | \-\-disable-webhook | Disable the admission and validation webhooks| +| DISABLE_LEADER_ELECTION | \-\-disable-leader-election | Disable the leader election client before executing the main loop. Disable when running replicated components for high availability is not desired.| +| DISABLE_WEBHOOK | \-\-disable-webhook | Disable the conversion webhooks| | ENABLE_PROFILING | \-\-enable-profiling | Enable the profiling on the metric endpoint| -| FEATURE_GATES | \-\-feature-gates | Optional features can be enabled / disabled using feature gates. Current options are: Drift,SpotToSpotConsolidation (default = Drift=true,SpotToSpotConsolidation=false)| +| FEATURE_GATES | \-\-feature-gates | Optional features can be enabled / disabled using feature gates. Current options are: SpotToSpotConsolidation (default = SpotToSpotConsolidation=false)| | HEALTH_PROBE_PORT | \-\-health-probe-port | The port the health probe endpoint binds to for reporting controller health (default = 8081)| -| INTERRUPTION_QUEUE | \-\-interruption-queue | Interruption queue is disabled if not specified. Enabling interruption handling may require additional permissions on the controller service account. Additional permissions are outlined in the docs.| -| ISOLATED_VPC | \-\-isolated-vpc | If true, then assume we can't reach AWS services which don't have a VPC endpoint. This also has the effect of disabling look-ups to the AWS pricing endpoint.| +| INTERRUPTION_QUEUE | \-\-interruption-queue | Interruption queue is the name of the SQS queue used for processing interruption events from EC2. Interruption handling is disabled if not specified. Enabling interruption handling may require additional permissions on the controller service account. Additional permissions are outlined in the docs.| +| ISOLATED_VPC | \-\-isolated-vpc | If true, then assume we can't reach AWS services which don't have a VPC endpoint. This also has the effect of disabling look-ups to the AWS on-demand pricing endpoint.| | KARPENTER_SERVICE | \-\-karpenter-service | The Karpenter Service name for the dynamic webhook certificate| | KUBE_CLIENT_BURST | \-\-kube-client-burst | The maximum allowed burst of queries to the kube-apiserver (default = 300)| | KUBE_CLIENT_QPS | \-\-kube-client-qps | The smoothed rate of qps to kube-apiserver (default = 200)| -| LEADER_ELECT | \-\-leader-elect | Start leader election client and gain leadership before executing the main loop. Enable this when running replicated components for high availability.| +| LOG_ERROR_OUTPUT_PATHS | \-\-log-error-output-paths | Optional comma separated paths for logging error output (default = stderr)| | LOG_LEVEL | \-\-log-level | Log verbosity level. Can be one of 'debug', 'info', or 'error' (default = info)| +| LOG_OUTPUT_PATHS | \-\-log-output-paths | Optional comma separated paths for directing log output (default = stdout)| | MEMORY_LIMIT | \-\-memory-limit | Memory limit on the container running the controller. The GC soft memory limit is set to 90% of this value. (default = -1)| -| METRICS_PORT | \-\-metrics-port | The port the metric endpoint binds to for operating metrics about the controller itself (default = 8000)| +| METRICS_PORT | \-\-metrics-port | The port the metric endpoint binds to for operating metrics about the controller itself (default = 8080)| | RESERVED_ENIS | \-\-reserved-enis | Reserved ENIs are not included in the calculations for max-pods or kube-reserved. This is most often used in the VPC CNI custom networking setup https://docs.aws.amazon.com/eks/latest/userguide/cni-custom-network.html. (default = 0)| | VM_MEMORY_OVERHEAD_PERCENT | \-\-vm-memory-overhead-percent | The VM memory overhead as a percent that will be subtracted from the total memory for all instance types. (default = 0.075)| | WEBHOOK_METRICS_PORT | \-\-webhook-metrics-port | The port the webhook metric endpoing binds to for operating metrics about the webhook (default = 8001)| @@ -47,7 +47,7 @@ Karpenter uses [feature gates](https://kubernetes.io/docs/reference/command-line |-------------------------|---------|-------|---------|---------| | Drift | false | Alpha | v0.21.x | v0.32.x | | Drift | true | Beta | v0.33.x | | -| SpotToSpotConsolidation | false | Alpha | v0.34.x | | +| SpotToSpotConsolidation | false | Alpha | v0.34.x | | ### Batching Parameters diff --git a/website/content/en/v0.35/reference/threat-model.md b/website/content/en/v1.0/reference/threat-model.md similarity index 96% rename from website/content/en/v0.35/reference/threat-model.md rename to website/content/en/v1.0/reference/threat-model.md index 5f932e72127b..8625ca478002 100644 --- a/website/content/en/v0.35/reference/threat-model.md +++ b/website/content/en/v1.0/reference/threat-model.md @@ -31,11 +31,11 @@ A Cluster Developer has the ability to create pods via `Deployments`, `ReplicaSe Karpenter has permissions to create and manage cloud instances. Karpenter has Kubernetes API permissions to create, update, and remove nodes, as well as evict pods. For a full list of the permissions, see the RBAC rules in the helm chart template. Karpenter also has AWS IAM permissions to create instances with IAM roles. -* [aggregate-clusterrole.yaml](https://github.com/aws/karpenter/blob/v0.35.5/charts/karpenter/templates/aggregate-clusterrole.yaml) -* [clusterrole-core.yaml](https://github.com/aws/karpenter/blob/v0.35.5/charts/karpenter/templates/clusterrole-core.yaml) -* [clusterrole.yaml](https://github.com/aws/karpenter/blob/v0.35.5/charts/karpenter/templates/clusterrole.yaml) -* [rolebinding.yaml](https://github.com/aws/karpenter/blob/v0.35.5/charts/karpenter/templates/rolebinding.yaml) -* [role.yaml](https://github.com/aws/karpenter/blob/v0.35.5/charts/karpenter/templates/role.yaml) +* [aggregate-clusterrole.yaml](https://github.com/aws/karpenter/blob/v1.0.0/charts/karpenter/templates/aggregate-clusterrole.yaml) +* [clusterrole-core.yaml](https://github.com/aws/karpenter/blob/v1.0.0/charts/karpenter/templates/clusterrole-core.yaml) +* [clusterrole.yaml](https://github.com/aws/karpenter/blob/v1.0.0/charts/karpenter/templates/clusterrole.yaml) +* [rolebinding.yaml](https://github.com/aws/karpenter/blob/v1.0.0/charts/karpenter/templates/rolebinding.yaml) +* [role.yaml](https://github.com/aws/karpenter/blob/v1.0.0/charts/karpenter/templates/role.yaml) ## Assumptions diff --git a/website/content/en/v0.35/tasks/_index.md b/website/content/en/v1.0/tasks/_index.md similarity index 100% rename from website/content/en/v0.35/tasks/_index.md rename to website/content/en/v1.0/tasks/_index.md diff --git a/website/content/en/v0.35/tasks/managing-amis.md b/website/content/en/v1.0/tasks/managing-amis.md similarity index 97% rename from website/content/en/v0.35/tasks/managing-amis.md rename to website/content/en/v1.0/tasks/managing-amis.md index 1ef31f141e08..47d2b3bab9b1 100644 --- a/website/content/en/v0.35/tasks/managing-amis.md +++ b/website/content/en/v1.0/tasks/managing-amis.md @@ -58,7 +58,7 @@ For example, you could have: * **Test clusters**: On lower environment clusters, you can run the latest AMIs for your workloads in a safe environment. The `EC2NodeClass` for these clusters could be set with a chosen `amiFamily`, but no `amiSelectorTerms` set. For example, the `NodePool` and `EC2NodeClass` could begin with the following: ```yaml - apiVersion: karpenter.sh/v1beta1 + apiVersion: karpenter.sh/v1 kind: NodePool metadata: name: default @@ -66,11 +66,11 @@ For example, you could have: template: spec: nodeClassRef: - apiVersion: karpenter.k8s.aws/v1beta1 + apiVersion: karpenter.k8s.aws/v1 kind: EC2NodeClass name: default --- - apiVersion: karpenter.k8s.aws/v1beta1 + apiVersion: karpenter.k8s.aws/v1 kind: EC2NodeClass metadata: name: default @@ -120,9 +120,11 @@ You can set Disruption Budgets in a `NodePool` spec. Here is an example: ```yaml +template: + spec: + expireAfter: 1440h disruption: consolidationPolicy: WhenEmpty - expireAfter: 1440h budgets: - nodes: 15% - nodes: "3" @@ -132,7 +134,7 @@ disruption: ``` The `disruption` settings define a few fields that indicate the state of a node that should be disrupted. -The `consolidationPolicy` field indicates that a node should be disrupted if the node is either underutilized (`WhenUnderutilized`) or not running any pods (`WhenEmpty`). +The `consolidationPolicy` field indicates that a node should be disrupted if the node is either empty or underutilized (`WhenEmptyOrUnderutilized`) or not running any pods (`WhenEmpty`). With `expireAfter` set to `1440` hours, the node expires after 60 days. Extending those values causes longer times without disruption. diff --git a/website/content/en/v0.35/troubleshooting.md b/website/content/en/v1.0/troubleshooting.md similarity index 85% rename from website/content/en/v0.35/troubleshooting.md rename to website/content/en/v1.0/troubleshooting.md index 6b362e504e86..c51d4f341ef5 100644 --- a/website/content/en/v0.35/troubleshooting.md +++ b/website/content/en/v1.0/troubleshooting.md @@ -75,12 +75,12 @@ If a long cluster name causes the Karpenter node role name to exceed 64 characte Keep in mind that `KarpenterNodeRole-` is just a recommendation from the getting started guide. Instead of using the eksctl role, you can shorten the name to anything you like, as long as it has the right permissions. -### Unknown field in Provisioner spec +### Unknown field in NodePool or EC2NodeClass spec If you are upgrading from an older version of Karpenter, there may have been changes in the CRD between versions. Attempting to utilize newer functionality which is surfaced in newer versions of the CRD may result in the following error message: ``` -error: error validating "STDIN": error validating data: ValidationError(Provisioner.spec): unknown field "" in sh.karpenter.v1alpha5.Provisioner.spec; if you choose to ignore these errors, turn validation off with --validate=false +Error from server (BadRequest): error when creating "STDIN": NodePool in version "v1" cannot be handled as a NodePool: strict decoding error: unknown field "spec.template.spec.nodeClassRef.foo" ``` If you see this error, you can solve the problem by following the [Custom Resource Definition Upgrade Guidance](../upgrade-guide/#custom-resource-definition-crd-upgrades). @@ -91,11 +91,10 @@ Info on whether there has been a change to the CRD between versions of Karpenter `0.16.0` changed the default replicas from 1 to 2. -Karpenter won't launch capacity to run itself (log related to the `karpenter.sh/provisioner-name DoesNotExist requirement`) +Karpenter won't launch capacity to run itself (log related to the `karpenter.sh/nodepool DoesNotExist requirement`) so it can't provision for the second Karpenter pod. -To solve this you can either reduce the replicas back from 2 to 1, or ensure there is enough capacity that isn't being managed by Karpenter -(these are instances with the name `karpenter.sh/provisioner-name/`) to run both pods. +To solve this you can either reduce the replicas back from 2 to 1, or ensure there is enough capacity that isn't being managed by Karpenter to run both pods. To do so on AWS increase the `minimum` and `desired` parameters on the node group autoscaling group to launch at lease 2 instances. @@ -144,52 +143,6 @@ You can fix this by patching the node objects: kubectl get nodes -ojsonpath='{range .items[*].metadata}{@.name}:{@.finalizers}{"\n"}' | grep "karpenter.sh/termination" | cut -d ':' -f 1 | xargs kubectl patch node --type='json' -p='[{"op": "remove", "path": "/metadata/finalizers"}]' ``` -## Webhooks - -### Failed calling webhook "validation.webhook.provisioners.karpenter.sh" - -If you are not able to create a provisioner due to `Internal error occurred: failed calling webhook "validation.webhook.provisioners.karpenter.sh":` - -Webhooks were renamed in `0.19.0`. There's a bug in ArgoCD's upgrade workflow where webhooks are leaked. This results in Provisioner's failing to be validated, since the validation server no longer corresponds to the webhook definition. - -Delete the stale webhooks. - -```text -kubectl delete mutatingwebhookconfigurations defaulting.webhook.provisioners.karpenter.sh -kubectl delete validatingwebhookconfiguration validation.webhook.provisioners.karpenter.sh -``` - -### Failed calling webhook "defaulting.webhook.karpenter.sh" - -The `defaulting.webhook.karpenter.sh` mutating webhook was removed in `0.27.3`. If you are coming from an older version of Karpenter where this webhook existed and the webhook was not managed by Helm, you may need to delete the stale webhook. - -```text -kubectl delete mutatingwebhookconfigurations defaulting.webhook.karpenter.sh -``` - -If you are not able to create a provisioner due to `Error from server (InternalError): error when creating "provisioner.yaml": Internal error occurred: failed calling webhook "defaulting.webhook.karpenter.sh": Post "https://karpenter-webhook.karpenter.svc:443/default-resource?timeout=10s": context deadline exceeded` - -Verify that the karpenter pod is running (should see 2/2 containers with a "Ready" status) - -```text -kubectl get po -A -l app.kubernetes.io/name=karpenter -NAME READY STATUS RESTARTS AGE -karpenter-7b46fb5c-gcr9z 2/2 Running 0 17h -``` - -Karpenter service has endpoints assigned to it - -```text -kubectl get ep -A -l app.kubernetes.io/name=karpenter -NAMESPACE NAME ENDPOINTS AGE -karpenter karpenter 192.168.39.88:8443,192.168.39.88:8080 16d -``` - -Your security groups are not blocking you from reaching your webhook. - -This is especially relevant if you have used `terraform-eks-module` version `>=18` since that version changed its security -approach, and now it's much more restrictive. - ## Provisioning ### Instances with swap volumes fail to register with control plane @@ -201,7 +154,7 @@ Some instance types (c1.medium and m1.small) are given limited amount of memory ``` ##### Solutions -Disabling swap will allow kubelet to join the cluster successfully, however users should be mindful of performance, and consider adjusting the Provisioner requirements to use larger instance types. +Disabling swap will allow kubelet to join the cluster successfully, however users should be mindful of performance, and consider adjusting the NodePool requirements to use larger instance types. ### DaemonSets can result in deployment failures @@ -209,7 +162,7 @@ For Karpenter versions `0.5.3` and earlier, DaemonSets were not properly conside This sometimes caused nodes to be deployed that could not meet the needs of the requested DaemonSets and workloads. This issue no longer occurs after Karpenter version `0.5.3` (see [PR #1155](https://github.com/aws/karpenter/pull/1155)). -If you are using a pre `0.5.3` version of Karpenter, one workaround is to set your provisioner to only use larger instance types that you know will be big enough for the DaemonSet and the workload. +If you are using a pre `0.5.3` version of Karpenter, one workaround is to set your NodePool to only use larger instance types that you know will be big enough for the DaemonSet and the workload. For more information, see [Issue #1084](https://github.com/aws/karpenter/issues/1084). Examples of this behavior are included in [Issue #1180](https://github.com/aws/karpenter/issues/1180). @@ -224,55 +177,24 @@ This behavior is not unique to Karpenter and can also occur with the standard `k To prevent this, you can set LimitRanges on pod deployments on a per-namespace basis. See the Karpenter [Best Practices Guide](https://aws.github.io/aws-eks-best-practices/karpenter/#use-limitranges-to-configure-defaults-for-resource-requests-and-limits) for further information on the use of LimitRanges. -### Missing subnetSelector and securityGroupSelector tags causes provisioning failures - -Starting with Karpenter `0.5.5`, if you are using Karpenter-generated launch template, provisioners require that [subnetSelector]({{}}) and [securityGroupSelector]({{}}) tags be set to match your cluster. -The [Provisioner]({{}}) section in the Karpenter Getting Started Guide uses the following example: - -```text -kind: AWSNodeTemplate -spec: - subnetSelector: - karpenter.sh/discovery: ${CLUSTER_NAME} - securityGroupSelector: - karpenter.sh/discovery: ${CLUSTER_NAME} -``` - -To check your subnet and security group selectors, type the following: - -```bash -aws ec2 describe-subnets --filters Name=tag:karpenter.sh/discovery,Values=${CLUSTER_NAME} -``` - -*Returns subnets matching the selector* - -```bash -aws ec2 describe-security-groups --filters Name=tag:karpenter.sh/discovery,Values=${CLUSTER_NAME} -``` - -*Returns security groups matching the selector* - -Provisioners created without those tags and run in more recent Karpenter versions will fail with this message when you try to run the provisioner: - -```text - field(s): spec.provider.securityGroupSelector, spec.provider.subnetSelector -``` - ### Pods using Security Groups for Pods stuck in "ContainerCreating" state for up to 30 minutes before transitioning to "Running" -When leveraging [Security Groups for Pods](https://docs.aws.amazon.com/eks/latest/userguide/security-groups-for-pods.html), Karpenter will launch nodes as expected but pods will be stuck in "ContainerCreating" state for up to 30 minutes before transitioning to "Running". This is related to an interaction between Karpenter and the [amazon-vpc-resource-controller](https://github.com/aws/amazon-vpc-resource-controller-k8s) when a pod requests `vpc.amazonaws.com/pod-eni` resources. More info can be found in [issue #1252](https://github.com/aws/karpenter/issues/1252). +When leveraging [Security Groups for Pods](https://docs.aws.amazon.com/eks/latest/userguide/security-groups-for-pods.html), Karpenter will launch nodes as expected but pods will be stuck in "ContainerCreating" state for up to 30 minutes before transitioning to "Running". +This is related to an interaction between Karpenter and the [amazon-vpc-resource-controller](https://github.com/aws/amazon-vpc-resource-controller-k8s) when a pod requests `vpc.amazonaws.com/pod-eni` resources. +More info can be found in [issue #1252](https://github.com/aws/karpenter/issues/1252). -To workaround this problem, add the `vpc.amazonaws.com/has-trunk-attached: "false"` label in your Karpenter Provisioner spec and ensure instance-type requirements include [instance-types which support ENI trunking](https://github.com/aws/amazon-vpc-resource-controller-k8s/blob/master/pkg/aws/vpc/limits.go). +To workaround this problem, add the `vpc.amazonaws.com/has-trunk-attached: "false"` label in your Karpenter NodePool spec and ensure instance-type requirements include [instance-types which support ENI trunking](https://github.com/aws/amazon-vpc-resource-controller-k8s/blob/master/pkg/aws/vpc/limits.go). ```yaml -apiVersion: karpenter.sh/v1alpha5 -kind: Provisioner +apiVersion: karpenter.sh/v1 +kind: NodePool metadata: name: default spec: - labels: - vpc.amazonaws.com/has-trunk-attached: "false" - ttlSecondsAfterEmpty: 30 + template + metadata: + labels: + vpc.amazonaws.com/has-trunk-attached: "false" ``` ### Pods using PVCs can hit volume limits and fail to scale-up @@ -305,7 +227,7 @@ The following is a list of known CSI drivers which support a startupTaint to eli These taints should be configured via `startupTaints` on your `NodePool`. For example, to enable this for EBS, add the following to your `NodePool`: ```yaml -apiVersion: karpenter.sh/v1beta1 +apiVersion: karpenter.sh/v1 kind: NodePool spec: template: @@ -329,7 +251,7 @@ time=2023-06-12T19:18:15Z type=Warning reason=FailedCreatePodSandBox from=kubele By default, the number of pods on a node is limited by both the number of networking interfaces (ENIs) that may be attached to an instance type and the number of IP addresses that can be assigned to each ENI. See [IP addresses per network interface per instance type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#AvailableIpPerENI) for a more detailed information on these instance types' limits. -If the max-pods (configured through your Provisioner [`kubeletConfiguration`]({{}})) is greater than the number of supported IPs for a given instance type, the CNI will fail to assign an IP to the pod and your pod will be left in a `ContainerCreating` state. +If the max-pods (configured through your EC2NodeClass [`kubeletConfiguration`]({{}})) is greater than the number of supported IPs for a given instance type, the CNI will fail to assign an IP to the pod and your pod will be left in a `ContainerCreating` state. If you've enabled [Security Groups per Pod](https://aws.github.io/aws-eks-best-practices/networking/sgpp/), one of the instance's ENIs is reserved as the trunk interface and uses branch interfaces off of that trunk interface to assign different security groups. If you do not have any `SecurityGroupPolicies` configured for your pods, they will be unable to utilize branch interfaces attached to the trunk interface, and IPs will only be available from the non-trunk ENIs. @@ -341,19 +263,19 @@ Note that Karpenter is not aware if [Security Groups per Pod](https://aws.github To avoid this discrepancy between `maxPods` and the supported pod density of the EC2 instance based on ENIs and allocatable IPs, you can perform one of the following actions on your cluster: 1. Enable [Prefix Delegation](https://www.eksworkshop.com/docs/networking/prefix/) to increase the number of allocatable IPs for the ENIs on each instance type -2. Reduce your `maxPods` value to be under the maximum pod density for the instance types assigned to your Provisioner -3. Remove the `maxPods` value from your [`kubeletConfiguration`]({{}}) if you no longer need it and instead rely on the defaulted values from Karpenter and EKS AMIs. +2. Reduce your `maxPods` value to be under the maximum pod density for the instance types assigned to your NodePods +3. Remove the `maxPods` value from your [`kubeletConfiguration`]({{}}) if you no longer need it and instead rely on the defaulted values from Karpenter and EKS AMIs. -For more information on pod density, view the [Pod Density Section in the NodePools doc]({{}}). +For more information on pod density, view the [Pod Density Section in the NodePools doc]({{}}). #### IP exhaustion in a subnet -When a node is launched by Karpenter, it is assigned to a subnet within your VPC based on the [`subnetSelector`]({{}}) value in your [`AWSNodeTemplate`]({{}})). When a subnet becomes IP address constrained, EC2 may think that it can successfully launch an instance in the subnet; however, when the CNI tries to assign IPs to the pods, there are none remaining. In this case, your pod will stay in a `ContainerCreating` state until an IP address is freed in the subnet and the CNI can assign one to the pod. +When a node is launched by Karpenter, it is assigned to a subnet within your VPC based on the [`subnetSelector`]({{}}) value in your [`EC2NodeClass`]({{}})). When a subnet becomes IP address constrained, EC2 may think that it can successfully launch an instance in the subnet; however, when the CNI tries to assign IPs to the pods, there are none remaining. In this case, your pod will stay in a `ContainerCreating` state until an IP address is freed in the subnet and the CNI can assign one to the pod. ##### Solutions 1. Use `topologySpreadConstraints` on `topology.kubernetes.io/zone` to spread your pods and nodes more evenly across zones -2. Increase the IP address space (CIDR) for the subnets selected by your `AWSNodeTemplate` +2. Increase the IP address space (CIDR) for the subnets selected by your `EC2NodeClass` 3. Use [custom networking](https://www.eksworkshop.com/docs/networking/custom-networking/) to assign separate IP address spaces to your pods and your nodes 4. [Run your EKS cluster on IPv6](https://aws.github.io/aws-eks-best-practices/networking/ipv6/) (Note: IPv6 clusters have some known limitations which should be well-understood before choosing to use one) @@ -479,7 +401,7 @@ Karpenter determines node initialization using three factors: 1. Node readiness 2. Expected resources are registered -3. Provisioner startup taints are removed +3. NodePool startup taints are removed #### Node Readiness @@ -496,9 +418,9 @@ Common resources that don't register and leave nodes in a non-initialized state: 1. `nvidia.com/gpu` (or any gpu-based resource): A GPU instance type that supports the `nvidia.com/gpu` resource is launched but the daemon/daemonset to register the resource on the node doesn't exist 2. `vpc.amazonaws.com/pod-eni`: An instance type is launched by the `ENABLE_POD_ENI` value is set to `false` in the `vpc-cni` plugin. Karpenter will expect that the `vpc.amazonaws.com/pod-eni` will be registered, but it never will. -#### Provisioner startup taints are removed +#### NodePool startup taints are removed -Karpenter expects all startup taints specified in `.spec.startupTaints` of the provisioner to be completely removed from node `.spec.taints` before it will consider the node initialized. +Karpenter expects all startup taints specified in `.spec.template.spec.startupTaints` of the NodePool to be completely removed from node `.spec.taints` before it will consider the node initialized. ### Node NotReady @@ -513,7 +435,7 @@ The easiest way to start debugging is to connect to the instance and get the Kub ```bash # List the nodes managed by Karpenter -kubectl get node -l karpenter.sh/provisioner-name +kubectl get node -l karpenter.sh/nodepool # Extract the instance ID (replace with a node name from the above listing) INSTANCE_ID=$(kubectl get node -ojson | jq -r ".spec.providerID" | cut -d \/ -f5) # Connect to the instance @@ -526,7 +448,7 @@ For Bottlerocket, you'll need to get access to the root filesystem: ```bash # List the nodes managed by Karpenter -kubectl get node -l karpenter.sh/provisioner-name +kubectl get node -l karpenter.sh/nodepool # Extract the instance ID (replace with a node name from the above listing) INSTANCE_ID=$(kubectl get node -ojson | jq -r ".spec.providerID" | cut -d \/ -f5) # Connect to the instance @@ -613,7 +535,7 @@ This means that your CNI plugin is out of date. You can find instructions on how ### Node terminates before ready on failed encrypted EBS volume If you are using a custom launch template and an encrypted EBS volume, the IAM principal launching the node may not have sufficient permissions to use the KMS customer managed key (CMK) for the EC2 EBS root volume. -This issue also applies to [Block Device Mappings]({{}}) specified in the Provisioner. +This issue also applies to [Block Device Mappings]({{}}) specified in the EC2NodeClass. In either case, this results in the node terminating almost immediately upon creation. Keep in mind that it is possible that EBS Encryption can be enabled without your knowledge. diff --git a/website/content/en/v0.35/upgrading/_index.md b/website/content/en/v1.0/upgrading/_index.md similarity index 100% rename from website/content/en/v0.35/upgrading/_index.md rename to website/content/en/v1.0/upgrading/_index.md diff --git a/website/content/en/v0.35/upgrading/compatibility.md b/website/content/en/v1.0/upgrading/compatibility.md similarity index 95% rename from website/content/en/v0.35/upgrading/compatibility.md rename to website/content/en/v1.0/upgrading/compatibility.md index f948450e8319..01d6b0541c37 100644 --- a/website/content/en/v0.35/upgrading/compatibility.md +++ b/website/content/en/v1.0/upgrading/compatibility.md @@ -15,9 +15,9 @@ Before you begin upgrading Karpenter, consider Karpenter compatibility issues re [comment]: <> (the content below is generated from hack/docs/compataiblitymetrix_gen_docs.go) -| KUBERNETES | 1.23 | 1.24 | 1.25 | 1.26 | 1.27 | 1.28 | 1.29 | -|------------|----------|----------|----------|----------|----------|----------|------------| -| karpenter | \>= 0.21 | \>= 0.21 | \>= 0.25 | \>= 0.28 | \>= 0.28 | \>= 0.31 | \>= 0.34.0 | +| KUBERNETES | 1.24 | 1.25 | 1.26 | 1.27 | 1.28 | 1.29 | 1.30 | +|------------|---------------------|----------|----------|----------|----------|----------|------------| +| karpenter | \>= 0.21 \<= 0.37 | \>= 0.25 | \>= 0.28 | \>= 0.28 | \>= 0.31 | \>= 0.34 | \>= 0.37 | [comment]: <> (end docs generated content from hack/docs/compataiblitymetrix_gen_docs.go) diff --git a/website/content/en/v0.35/upgrading/upgrade-guide.md b/website/content/en/v1.0/upgrading/upgrade-guide.md similarity index 85% rename from website/content/en/v0.35/upgrading/upgrade-guide.md rename to website/content/en/v1.0/upgrading/upgrade-guide.md index 89c67d12d222..a4dcaf3ad1c1 100644 --- a/website/content/en/v0.35/upgrading/upgrade-guide.md +++ b/website/content/en/v1.0/upgrading/upgrade-guide.md @@ -10,6 +10,10 @@ Karpenter is a controller that runs in your cluster, but it is not tied to a spe Use your existing upgrade mechanisms to upgrade your core add-ons in Kubernetes and keep Karpenter up to date on bug fixes and new features. This guide contains information needed to upgrade to the latest release of Karpenter, along with compatibility issues you need to be aware of when upgrading from earlier Karpenter versions. +{{% alert title="Warning" color="warning" %}} +With the release of Karpenter v1.0.0, the Karpenter team will be dropping support for karpenter versions v0.32 and below. We recommend upgrading to the latest version of Karpenter and keeping Karpenter up-to-date for bug fixes and new features. +{{% /alert %}} + ### CRD Upgrades Karpenter ships with a few Custom Resource Definitions (CRDs). These CRDs are published: @@ -20,23 +24,84 @@ Karpenter ships with a few Custom Resource Definitions (CRDs). These CRDs are pu ``` {{% alert title="Note" color="warning" %}} -If you get the error `invalid ownership metadata; label validation error:` while installing the `karpenter-crd` chart from an older version of Karpenter, follow the [Troubleshooting Guide]({{}}) for details on how to resolve these errors. +If you get the error `invalid ownership metadata; label validation error:` while installing the `karpenter-crd` chart from an older version of Karpenter, follow the [Troubleshooting Guide]({{}}) for details on how to resolve these errors. {{% /alert %}} * As part of the helm chart [karpenter](https://gallery.ecr.aws/karpenter/karpenter) - [source](https://github.com/aws/karpenter/blob/main/charts/karpenter/crds). Helm [does not manage the lifecycle of CRDs using this method](https://helm.sh/docs/chart_best_practices/custom_resource_definitions/), the tool will only install the CRD during the first installation of the Helm chart. Subsequent chart upgrades will not add or remove CRDs, even if the CRDs have changed. When CRDs are changed, we will make a note in the version's upgrade guide. -In general, you can reapply the CRDs in the `crds` directory of the Karpenter Helm chart: - -```shell -kubectl apply -f https://raw.githubusercontent.com/aws/karpenter/v0.35.5/pkg/apis/crds/karpenter.sh_nodepools.yaml -kubectl apply -f https://raw.githubusercontent.com/aws/karpenter/v0.35.5/pkg/apis/crds/karpenter.sh_nodeclaims.yaml -kubectl apply -f https://raw.githubusercontent.com/aws/karpenter/v0.35.5/pkg/apis/crds/karpenter.k8s.aws_ec2nodeclasses.yaml -``` - +### Upgrading to `1.0.0`+ + +{{% alert title="Warning" color="warning" %}} +Karpenter `1.0.0` introduces v1 APIs, including _significant_ changes to the API and upgrade procedures for the Karpenter controllers. **Do not** upgrade to `1.0.0`+ without referencing the [v1 Migration Upgrade Procedure]({{}}). + +This version adds [conversion webhooks](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/#webhook-conversion) to automatically pull the v1 API version of previously applied v1beta1 NodePools, EC2NodeClasses, and NodeClaims. Karpenter will stop serving the v1beta1 API version at v1.1.0 and will drop the conversion webhooks at that time. Migrate all stored manifests to v1 API versions on Karpenter v1.0+. +{{% /alert %}} + +Below is the full changelog for v1, copied from the [v1 Migration Upgrade Procedure]({{}}). + +* Features: + * AMI Selector Terms has a new Alias field which can only be set by itself in `EC2NodeClass.Spec.AMISelectorTerms` + * Disruption Budgets by Reason was added to `NodePool.Spec.Disruption.Budgets` + * TerminationGracePeriod was added to `NodePool.Spec.Template.Spec`. + * LOG_OUTPUT_PATHS and LOG_ERROR_OUTPUT_PATHS environment variables added +* API Rename: NodePool’s ConsolidationPolicy `WhenUnderutilized` is now renamed to `WhenEmptyOrUnderutilized` +* Behavior Changes: + * Expiration is now forceful and begins draining as soon as it’s expired. Karpenter does not wait for replacement capacity to be available before draining, but will start provisioning a replacement as soon as the node is expired and begins draining. + * Karpenter's generated NodeConfig now takes precedence when generating UserData with the AL2023 `amiFamily`. If you're setting any values managed by Karpenter in your AL2023 UserData, configure these through Karpenter natively (e.g. kubelet configuration fields). + * Karpenter now adds a `karpenter.sh/unregistered:NoExecute` taint to nodes in injected UserData when using alias in AMISelectorTerms or non-Custom AMIFamily. When using `amiFamily: Custom`, users will need to add this taint into their UserData, where Karpenter will automatically remove it when provisioning nodes. + * Karpenter now waits for underlying instances to be completely terminated before removing the associated nodes. This means it may take longer for nodes to be deleted and for nodeclaims to get cleaned up. +* API Moves: + * ExpireAfter has moved from the `NodePool.Spec.Disruption` block to `NodePool.Spec.Template.Spec`, and is now a drift-able field. + * `Kubelet` was moved to the EC2NodeClass from the NodePool. +* RBAC changes: added `delete pods` | added `get, patch crds` | added `update nodes` | removed `create nodes` +* Breaking API (Manual Migration Needed): + * Ubuntu is dropped as a first class supported AMI Family + * `karpenter.sh/do-not-consolidate` (annotation), `karpenter.sh/do-not-evict` (annotation), and `karpenter.sh/managed-by` (tag) are all removed. `karpenter.sh/managed-by`, which currently stores the cluster name in its value, will be replaced by eks:eks-cluster-name + * The taint used to mark nodes for disruption and termination changed from `karpenter.sh/disruption=disrupting:NoSchedule` to `karpenter.sh/disrupted:NoSchedule`. It is not recommended to tolerate this taint, however, if you were tolerating it in your applications, you'll need to adjust your taints to reflect this. +* Environment Variable Changes: + * Environment Variable Changes + * LOGGING_CONFIG, ASSUME_ROLE_ARN, ASSUME_ROLE_DURATION Dropped + * LEADER_ELECT renamed to DISABLE_LEADER_ELECTION + * `FEATURE_GATES.DRIFT=true` was dropped and promoted to Stable, and cannot be disabled. + * Users currently opting out of drift, disabling the drift feature flag will no longer be able to do so. +* Defaults changed: + * API: Karpenter will drop support for IMDS access from containers by default on new EC2NodeClasses by updating the default of `httpPutResponseHopLimit` from 2 to 1. + * API: ConsolidateAfter is required. Users couldn’t set this before with ConsolidationPolicy: WhenUnderutilized, where this is now required. Users can set it to 0 to have the same behavior as in v1beta1. + * API: All `NodeClassRef` fields are now all required, and apiVersion has been renamed to group + * API: AMISelectorTerms are required. Setting an Alias cannot be done with any other type of term, and must match the AMI Family that's set or be Custom. + * Helm: Deployment spec TopologySpreadConstraint to have required zonal spread over preferred. Users who had one node running their Karpenter deployments need to either: + * Have two nodes in different zones to ensure both Karpenter replicas schedule + * Scale down their Karpenter replicas from 2 to 1 in the helm chart + * Edit and relax the topology spread constraint in their helm chart from DoNotSchedule to ScheduleAnyway + * Helm/Binary: `controller.METRICS_PORT` default changed back to 8080 + +### Upgrading to `0.37.0`+ + +{{% alert title="Warning" color="warning" %}} +`0.33.0`+ _only_ supports Karpenter v1beta1 APIs and will not work with existing Provisioner, AWSNodeTemplate or Machine alpha APIs. Do not upgrade to `0.37.0`+ without first [upgrading to `0.32.x`]({{}}). This version supports both the alpha and beta APIs, allowing you to migrate all of your existing APIs to beta APIs without experiencing downtime. +{{% /alert %}} + +* Karpenter now adds a readiness status condition to the EC2NodeClass. Make sure to upgrade your Custom Resource Definitions before proceeding with the upgrade. Failure to do so will result in Karpenter being unable to provision new nodes. +* Karpenter no longer updates the logger name when creating controller loggers. We now adhere to the controller-runtime standard, where the logger name will be set as `"logger": "controller"` always and the controller name will be stored in the structured value `"controller"` +* Karpenter updated the NodeClass controller naming in the following way: `nodeclass` -> `nodeclass.status`, `nodeclass.hash`, `nodeclass.termination` +* Karpenter's NodeClaim status conditions no longer include the `severity` field + +### Upgrading to `0.36.0`+ + +{{% alert title="Warning" color="warning" %}} +`0.33.0`+ _only_ supports Karpenter v1beta1 APIs and will not work with existing Provisioner, AWSNodeTemplate or Machine alpha APIs. Do not upgrade to `0.36.0`+ without first [upgrading to `0.32.x`]({{}}). This version supports both the alpha and beta APIs, allowing you to migrate all of your existing APIs to beta APIs without experiencing downtime. +{{% /alert %}} + +{{% alert title="Warning" color="warning" %}} + v0.36.x introduces update to drift that restricts rollback. When rolling back from >=v0.36.0, note that v0.32.9+, v0.33.4+, v0.34.5+, v0.35.4+ are the patch versions that support rollback. If Karpenter is rolled back to an older patch version, Karpenter can potentially drift all the nodes in the cluster. +{{% /alert %}} + +* Karpenter changed the name of the `karpenter_cloudprovider_instance_type_price_estimate` metric to `karpenter_cloudprovider_instance_type_offering_price_estimate` to align with the new `karpenter_cloudprovider_instance_type_offering_available` metric. The `region` label was also dropped from the metric, since this can be inferred from the environment that Karpenter is running in. + ### Upgrading to `0.35.0`+ {{% alert title="Warning" color="warning" %}} diff --git a/website/content/en/v1.0/upgrading/v1-migration.md b/website/content/en/v1.0/upgrading/v1-migration.md new file mode 100644 index 000000000000..69930b2ea9c1 --- /dev/null +++ b/website/content/en/v1.0/upgrading/v1-migration.md @@ -0,0 +1,428 @@ +--- +title: "v1 Migration" +linkTitle: "v1 Migration" +weight: 30 +description: > + Upgrade information for migrating to v1 +--- + +This migration guide is designed to help you migrate Karpenter from v1beta1 APIs to v1 (v0.33-v0.37). +Use this document as a reference to the changes that were introduced in this release and as a guide to how you need to update the manifests and other Karpenter objects you created in previous Karpenter releases. + +Before you begin upgrading to `v1.0.0`, you should know that: + +* Every Karpenter upgrade from pre-v1.0.0 versions must upgrade to minor version `v1.0.0`. +* You must be upgrading to `v1.0.0` from a version of Karpenter that only supports v1beta1 APIs, e.g. NodePools, NodeClaims, and NodeClasses (v0.33+). +* Karpenter `v1.0.0`+ supports Karpenter v1 and v1beta1 APIs and will not work with earlier Provisioner, AWSNodeTemplate or Machine v1alpha1 APIs. Do not upgrade to `v1.0.0`+ without first [upgrading to `0.32.x`]({{}}) or later and then upgrading to v0.33. +* Version `v1.0.0` adds [conversion webhooks](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/#webhook-conversion) to automatically pull the v1 API version of previously applied v1beta1 NodePools, EC2NodeClasses, and NodeClaims. Karpenter will stop serving the v1beta1 API version at v1.1.0 and will drop the conversion webhooks at that time. You will need to migrate all stored manifests to v1 API versions on Karpenter v1.0+. Keep in mind that this is a conversion and not dual support, which means that resources are updated in-place rather than migrated over from the previous version. +* If you need to rollback the upgrade to v1, you need to upgrade to a special patch version of the minor version you came from. For instance, if you came from v0.33.5, you'll need to downgrade back to v0.33.6. More details on how to do this in [Downgrading]({{}}). +* Validate that you are running at least Kubernetes 1.25. Use the [compatibility matrix]({{}}) to confirm you are on a supported Kubernetes version. +* Karpenter runs a helm post-install-hook as part of upgrading to and from v1.0.0. If you're running Karpenter on a non x86_64 node, you'll need to update your `values.postInstallHook.image` values in your helm `values.yaml` file to point to a compatible image with kubectl. For instance, [an ARM compatible version](https://hub.docker.com/layers/bitnami/kubectl/1.30/images/sha256-d63c6609dd5c336fd036bd303fd4ce5f272e73ddd1923d32c12d62b7149067ed?context=explore). + +See the [Changelog]({{}}) for details about actions you should take before upgrading to v1.0 or v1.1. + +## Upgrade Procedure + +Please read through the entire procedure before beginning the upgrade. There are major changes in this upgrade, so please evaluate the list of breaking changes before continuing. + +{{% alert title="Note" color="warning" %}} +The upgrade guide will first require upgrading to your latest patch version prior to upgrade to v1.0.0. This will be to allow the conversion webhooks to operate and minimize downtime of the Karpenter controller when requesting the Karpenter custom resources. +{{% /alert %}} + +1. Set environment variables for your cluster to upgrade to the latest patch version of the current Karpenter version you're running on: + + ```bash + export KARPENTER_NAMESPACE=kube-system + export KARPENTER_IAM_ROLE_ARN="arn:${AWS_PARTITION}:iam::${AWS_ACCOUNT_ID}:role/${CLUSTER_NAME}-karpenter" + export AWS_PARTITION="aws" # if you are not using standard partitions, you may need to configure to aws-cn / aws-us-gov + export CLUSTER_NAME="${USER}-karpenter-demo" + export AWS_REGION="us-west-2" + export AWS_ACCOUNT_ID="$(aws sts get-caller-identity --query Account --output text)" + ``` + + +2. Determine the current Karpenter version: + ```bash + kubectl get pod -A | grep karpenter + kubectl describe pod -n "${KARPENTER_NAMESPACE}" karpenter-xxxxxxxxxx-xxxxx | grep Image: + ``` + Sample output: + ```bash + Image: public.ecr.aws/karpenter/controller:0.37.1@sha256:157f478f5db1fe999f5e2d27badcc742bf51cc470508b3cebe78224d0947674f + ``` + + The Karpenter version you are running must be between minor version `v0.33` and `v0.37`. To be able to roll back from Karpenter v1, you must rollback to on the following patch release versions for your minor version, which will include the conversion webhooks for a smooth rollback: + + * v0.37.1 + * v0.36.3 + * v0.35.6 + * v0.34.7 + * v0.33.6 + +3. Review for breaking changes between v0.33 and v0.37: If you are already running Karpenter v0.37.x, you can skip this step. If you are running an earlier Karpenter version, you need to review the [Upgrade Guide]({{}}) for each minor release. + +4. Set environment variables for upgrading to the latest patch version. Note that `v0.33.6` and `v0.34.7` both need to include the v prefix, whereas `v0.35+` should not. + + ```bash + export KARPENTER_VERSION= + ``` + +6. Apply the latest patch version of your current minor version's Custom Resource Definitions (CRDs): + + ```bash + helm upgrade --install karpenter-crd oci://public.ecr.aws/karpenter/karpenter-crd --version "${KARPENTER_VERSION}" --namespace "${KARPENTER_NAMESPACE}" --create-namespace \ + --set webhook.enabled=true \ + --set webhook.serviceName=karpenter \ + --set webhook.serviceNamespace="${KARPENTER_NAMESPACE}" \ + --set webhook.port=8443 + ``` + + +7. Upgrade Karpenter to the latest patch version of your current minor version's. At the end of this step, conversion webhooks will run but will not convert any version. + + ```bash + # Service account annotation can be dropped when using pod identity + helm upgrade --install karpenter oci://public.ecr.aws/karpenter/karpenter --version ${KARPENTER_VERSION} --namespace "${KARPENTER_NAMESPACE}" --create-namespace \ + --set serviceAccount.annotations."eks\.amazonaws\.com/role-arn"=${KARPENTER_IAM_ROLE_ARN} \ + --set settings.clusterName=${CLUSTER_NAME} \ + --set settings.interruptionQueue=${CLUSTER_NAME} \ + --set controller.resources.requests.cpu=1 \ + --set controller.resources.requests.memory=1Gi \ + --set controller.resources.limits.cpu=1 \ + --set controller.resources.limits.memory=1Gi \ + --set webhook.enabled=true \ + --set webhook.port=8443 \ + --wait + ``` + +8. Set environment variables for first upgrading to v1.0.0 + + ```bash + export KARPENTER_VERSION=1.0.0 + ``` + + +9. Update your existing policy using the following to the v1.0.0 controller policy: + Notable Changes to the IAM Policy include additional tag-scoping for the `eks:eks-cluster-name` tag for instances and instance profiles. + + ```bash + TEMPOUT=$(mktemp) + curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/v"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml > ${TEMPOUT} \ + && aws cloudformation deploy \ + --stack-name "Karpenter-${CLUSTER_NAME}" \ + --template-file "${TEMPOUT}" \ + --capabilities CAPABILITY_NAMED_IAM \ + --parameter-overrides "ClusterName=${CLUSTER_NAME}" + ``` + +10. Apply the v1.0.0 Custom Resource Definitions (CRDs): + + ```bash + helm upgrade --install karpenter-crd oci://public.ecr.aws/karpenter/karpenter-crd --version "${KARPENTER_VERSION}" --namespace "${KARPENTER_NAMESPACE}" --create-namespace \ + --set webhook.enabled=true \ + --set webhook.serviceName=karpenter \ + --set webhook.serviceNamespace="${KARPENTER_NAMESPACE}" \ + --set webhook.port=8443 + ``` + +11. Upgrade Karpenter to the new version. At the end of this step, conversion webhooks run to convert the Karpenter CRDs to v1. + + ```bash + # Service account annotion can be dropped when using pod identity + helm upgrade --install karpenter oci://public.ecr.aws/karpenter/karpenter --version ${KARPENTER_VERSION} --namespace "${KARPENTER_NAMESPACE}" --create-namespace \ + --set serviceAccount.annotations."eks\.amazonaws\.com/role-arn"=${KARPENTER_IAM_ROLE_ARN} \ + --set settings.clusterName=${CLUSTER_NAME} \ + --set settings.interruptionQueue=${CLUSTER_NAME} \ + --set controller.resources.requests.cpu=1 \ + --set controller.resources.requests.memory=1Gi \ + --set controller.resources.limits.cpu=1 \ + --set controller.resources.limits.memory=1Gi \ + --wait + ``` + + {{% alert title="Note" color="warning" %}} + Karpenter has deprecated and moved a number of Helm values as part of the v1 release. Ensure that you upgrade to the newer version of these helm values during your migration to v1. You can find detail for all the settings that were moved in the [v1 Upgrade Reference]({{}}). + {{% /alert %}} + +12. Once upgraded, you won't need to roll your nodes to be compatible with v1.1.0, except if you have multiple NodePools with different `kubelet`s that are referencing the same EC2NodeClass. Karpenter has moved the `kubelet` to the EC2NodeClass in v1. NodePools with different `kubelet` referencing the same EC2NodeClass will be compatible with v1.0.0, but will not be in v1.1.0. + +When you have completed the migration to `1.0.0` CRDs, Karpenter will be able to serve both the `v1beta1` versions and the `v1` versions of NodePools, NodeClaims, and EC2NodeClasses. +The results of upgrading these CRDs include the following: + +* The storage version of these resources change to v1. After the upgrade, Karpenter starts converting these resources to v1 storage versions in real time. Users should experience no differences from this change. +* You are still able to GET and make updates using the v1beta1 versions, by for example doing `kubectl get nodepools.v1beta1.karpenter.sh`. + + +## Post upgrade considerations + +Your NodePool and EC2NodeClass objects are auto-converted to the new v1 storage version during the upgrade. Consider getting the latest versions of those objects to update any stored manifests where you were previously applying the v1beta1 version. + + * [NodePools]({{}}): Get the latest copy of your NodePool (`kubectl get nodepool default -o yaml > nodepool.yaml`) and review the [Changelog]({{}}) for changes to NodePool objects. Make modifications as needed. + * [EC2NodeClasses]({{}}): Get the latest copy of your EC2NodeClass (`kubectl get ec2nodeclass default -o yaml > ec2nodeclass.yaml`) and review the [Changelog]({{}}) for changes to EC2NodeClass objects. Make modifications as needed. + +When you are satisfied with your NodePool and EC2NodeClass files, apply them as follows: + +```bash +kubectl apply -f nodepool.yaml +kubectl apply -f ec2nodeclass.yaml +``` + +## Changelog +Refer to the [Full Changelog]({{}}) for more. + +Because Karpenter `v1.0.0` will run both `v1` and `v1beta1` versions of NodePools and EC2NodeClasses, you don't immediately have to upgrade the stored manifests that you have to v1. +However, in preparation for later Karpenter upgrades (which will not support `v1beta1`, review the following changes from v1beta1 to v1. + +Karpenter `v1.0.0` changes are divided into two different categories: those you must do before `1.0.0` upgrades and those you must do before `1.1.0` upgrades. + +### Changes required before upgrading to `v1.0.0` + +Apply the following changes to your NodePools and EC2NodeClasses, as appropriate, before upgrading them to v1. + +* **Deprecated annotations, labels and tags are removed for v1.0.0**: For v1, `karpenter.sh/do-not-consolidate` (annotation), `karpenter.sh/do-not-evict +(annotation)`, and `karpenter.sh/managed-by` (tag) all have support removed. +The `karpenter.sh/managed-by`, which currently stores the cluster name in its value, is replaced by `eks:eks-cluster-name`, to allow +for [EKS Pod Identity ABAC policies](https://docs.aws.amazon.com/eks/latest/userguide/pod-id-abac.html). + +* **Zap logging config removed**: Support for setting the Zap logging config was deprecated in beta and is now removed for v1. View the [Logging Configuration Section of the v1beta1 Migration Guide]({{}}) for more details. + +* **metadataOptions could break workloads**: If you have workload pods that are not using `hostNetworking`, the updated default `metadataOptions` could cause your containers to break when you apply new EC2NodeClasses on v1. + +* **Ubuntu AMIFamily Removed**: + + Support for automatic AMI selection and UserData generation for Ubuntu has been dropped with Karpenter `v1.0.0`. + To continue using Ubuntu AMIs you will need to specify an AMI using `amiSelectorTerms`. + + UserData generation can be achieved using the AL2 AMIFamily which has an identical UserData format. + However, compatibility is not guaranteed long-term and changes to either AL2 or Ubuntu's UserData format may introduce incompatibilities. + If this occurs, the Custom AMIFamily should be used for Ubuntu and UserData will need to be entirely maintained by the user. + + If you are upgrading to `v1.0.0` and already have v1beta1 Ubuntu EC2NodeClasses, all you need to do is specify `amiSelectorTerms` and Karpenter will translate your NodeClasses to the v1 equivalent (as shown below). + Failure to specify `amiSelectorTerms` will result in the EC2NodeClass and all referencing NodePools to show as NotReady, causing Karpenter to ignore these NodePools and EC2NodeClasses for Provisioning and Drift. + + ```yaml + # Original v1beta1 EC2NodeClass + version: karpenter.k8s.aws/v1beta1 + kind: EC2NodeClass + spec: + amiFamily: Ubuntu + amiSelectorTerms: + - id: ami-foo + --- + # Conversion Webhook Output + version: karpenter.k8s.aws/v1 + kind: EC2NodeClass + metadata: + annotations: + compatibility.karpenter.k8s.aws/v1beta1-ubuntu: amiFamily,blockDeviceMappings + spec: + amiFamily: AL2 + amiSelectorTerms: + - id: ami-foo + blockDeviceMappings: + - deviceName: '/dev/sda1' + rootVolume: true + ebs: + encrypted: true + volumeType: gp3 + volumeSize: 20Gi + ``` + +* **amiSelectorTerms and amiFamily**: For v1, `amiFamily` is no longer required if you instead specify an `alias` in `amiSelectorTerms` in your `EC2NodeClass`. You need to update your `amiSelectorTerms` and `amiFamily` if you are using: + * A Custom amiFamily. You must ensure that the node you add the `karpenter.sh/unregistered:NoExecute` taint in your UserData. + * An Ubuntu AMI, as described earlier. + +### Before upgrading to `v1.1.0` + +Apply the following changes to your NodePools and EC2NodeClasses, as appropriate, before upgrading them to `v1.1.0` (though okay to make these changes for `1.0.0`) + +* **v1beta1 support gone**: In `v1.1.0`, v1beta1 is not supported. So you need to: + * Migrate all Karpenter yaml files [NodePools]({{}}), [EC2NodeClasses]({{}}) to v1. + * Know that all resources in the cluster also need to be on v1. It's possible (although unlikely) that some resources still may be stored as v1beta1 in ETCD if no writes had been made to them since the v1 upgrade. You could use a tool such as [kube-storage-version-migrator](https://github.com/kubernetes-sigs/kube-storage-version-migrator) to handle this. + * Know that you cannot rollback to v1beta1 once you have upgraded to `v1.1.0`. + +* **Kubelet Configuration**: If you have multiple NodePools pointing to the same EC2NodeClass that have different kubeletConfigurations, +then you have to manually add more EC2NodeClasses and point their NodePools to them. This will induce drift and you will have to roll your cluster. +If you have multiple NodePools pointing to the same EC2NodeClass, but they have the same configuration, then you can proceed with the migration +without having drift or having any additional NodePools or EC2NodeClasses configured. + +* **Remove kubelet annotation from NodePools**: During the upgrade process Karpenter will rely on the `compatibility.karpenter.sh/v1beta1-kubelet-conversion` annotation to determine whether to use the v1beta1 NodePool kubelet configuration or the v1 EC2NodeClass kubelet configuration. The `compatibility.karpenter.sh/v1beta1-kubelet-conversion` NodePool annotation takes precedence over the EC2NodeClass Kubelet configuration when launching nodes. Remove the kubelet-configuration annotation (`compatibility.karpenter.sh/v1beta1-kubelet-conversion`) from your NodePools once you have migrated kubelet from the NodePool to the EC2NodeClass. + +Keep in mind that rollback, without replacing the Karpenter nodes, will not be supported to an earlier version of Karpenter once the annotation is removed. This annotation is only used to support the kubelet configuration migration path, but will not be supported in v1.1. + +### Downgrading + +Once the Karpenter CRDs are upgraded to v1, conversion webhooks are needed to help convert APIs that are stored in etcd from v1 to v1beta1. Also changes to the CRDs will need to at least include the latest version of the CRD in this case being v1. The patch versions of the v1beta1 Karpenter controller that include the conversion wehooks include: + +* v0.37.1 +* v0.36.3 +* v0.35.6 +* v0.34.7 +* v0.33.6 + +{{% alert title="Note" color="warning" %}} +When rolling back from v1, Karpenter will not retain data that was only valid in v1 APIs. For instance, if you were upgrading from v0.33.5 to v1, updated the `NodePool.Spec.Disruption.Budgets` field and then rolled back to v0.33.6, Karpenter would not retain the `NodePool.Spec.Disruption.Budgets` field, as that was introduced in v0.34.x. If you are configuring the kubelet field, and have removed the `compatibility.karpenter.sh/v1beta1-kubelet-conversion` annotation, rollback is not supported without replacing your nodes between EC2NodeClass and NodePool. +{{% /alert %}} + +{{% alert title="Note" color="warning" %}} +Since both v1beta1 and v1 will be served, `kubectl` will default to returning the `v1` version of your CRDs. To interact with the v1beta1 version of your CRDs, you'll need to add the full resource path (including api version) into `kubectl` calls. For example: `k get nodeclaim.v1beta1.karpenter.sh` +{{% /alert %}} + +1. Set environment variables + +```bash +export KARPENTER_NAMESPACE="kube-system" +# Note: v0.33.6 and v0.34.7 include the v prefix, omit it for versions v0.35+ +export KARPENTER_VERSION="" +export KARPENTER_IAM_ROLE_ARN="arn:${AWS_PARTITION}:iam::${AWS_ACCOUNT_ID}:role/${CLUSTER_NAME}-karpenter" +export CLUSTER_NAME="" +export TEMPOUT="$(mktemp)" +``` + +{{% alert title="Warning" color="warning" %}} +If you open a new shell to run steps in this procedure, you need to set some or all of the environment variables again. +To remind yourself of these values, type: + +```bash +echo "${KARPENTER_NAMESPACE}" "${KARPENTER_VERSION}" "${CLUSTER_NAME}" "${TEMPOUT}" +``` + +{{% /alert %}} + +2. Rollback the Karpenter Policy + +**v0.33.6 and v0.34.7:** +```bash +curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml > ${TEMPOUT} \ + && aws cloudformation deploy \ + --stack-name "Karpenter-${CLUSTER_NAME}" \ + --template-file "${TEMPOUT}" \ + --capabilities CAPABILITY_NAMED_IAM \ + --parameter-overrides "ClusterName=${CLUSTER_NAME}" +``` + +**v0.35+:** +```bash +curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/v"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml > ${TEMPOUT} \ + && aws cloudformation deploy \ + --stack-name "Karpenter-${CLUSTER_NAME}" \ + --template-file "${TEMPOUT}" \ + --capabilities CAPABILITY_NAMED_IAM \ + --parameter-overrides "ClusterName=${CLUSTER_NAME}" +``` + +3. Rollback the CRDs + +```bash +helm upgrade --install karpenter-crd oci://public.ecr.aws/karpenter/karpenter-crd --version "${KARPENTER_VERSION}" --namespace "${KARPENTER_NAMESPACE}" --create-namespace \ + --set webhook.enabled=true \ + --set webhook.serviceName=karpenter \ + --set webhook.serviceNamespace="${KARPENTER_NAMESPACE}" \ + --set webhook.port=8443 +``` + +4. Rollback the Karpenter Controller + +```bash +# Service account annotation can be dropped when using pod identity +helm upgrade --install karpenter oci://public.ecr.aws/karpenter/karpenter --version ${KARPENTER_VERSION} --namespace "${KARPENTER_NAMESPACE}" --create-namespace \ + --set serviceAccount.annotations."eks\.amazonaws\.com/role-arn"=${KARPENTER_IAM_ROLE_ARN} \ + --set settings.clusterName=${CLUSTER_NAME} \ + --set settings.interruptionQueue=${CLUSTER_NAME} \ + --set controller.resources.requests.cpu=1 \ + --set controller.resources.requests.memory=1Gi \ + --set controller.resources.limits.cpu=1 \ + --set controller.resources.limits.memory=1Gi \ + --set webhook.enabled=true \ + --set webhook.port=8443 \ + --wait +``` + +Karpenter should now be pulling and operating against the v1beta1 APIVersion as it was prior to the upgrade + +## Full Changelog +* Features: + * AMI Selector Terms has a new Alias field which can only be set by itself in `EC2NodeClass.Spec.AMISelectorTerms` + * Disruption Budgets by Reason was added to `NodePool.Spec.Disruption.Budgets` + * TerminationGracePeriod was added to `NodePool.Spec.Template.Spec`. + * LOG_OUTPUT_PATHS and LOG_ERROR_OUTPUT_PATHS environment variables added +* API Rename: NodePool’s ConsolidationPolicy `WhenUnderutilized` is now renamed to `WhenEmptyOrUnderutilized` +* Behavior Changes: + * Expiration is now forceful and begins draining as soon as it’s expired. Karpenter does not wait for replacement capacity to be available before draining, but will start provisioning a replacement as soon as the node is expired and begins draining. + * Karpenter's generated NodeConfig now takes precedence when generating UserData with the AL2023 `amiFamily`. If you're setting any values managed by Karpenter in your AL2023 UserData, configure these through Karpenter natively (e.g. kubelet configuration fields). + * Karpenter now adds a `karpenter.sh/unregistered:NoExecute` taint to nodes in injected UserData when using alias in AMISelectorTerms or non-Custom AMIFamily. When using `amiFamily: Custom`, users will need to add this taint into their UserData, where Karpenter will automatically remove it when provisioning nodes. + * Discovered standard AL2023 AMIs will no longer be considered compatible with GPU / accelerator workloads. If you're using an AL2023 EC2NodeClass (without AMISelectorTerms) for these workloads, you will need to select your AMI via AMISelectorTerms (non-alias). + * Karpenter now waits for underlying instances to be completely terminated before removing the associated nodes. This means it may take longer for nodes to be deleted and for nodeclaims to get cleaned up. +* API Moves: + * ExpireAfter has moved from the `NodePool.Spec.Disruption` block to `NodePool.Spec.Template.Spec`, and is now a drift-able field. + * `Kubelet` was moved to the EC2NodeClass from the NodePool. +* RBAC changes: added `delete pods` | added `get, patch crds` | added `update nodes` | removed `create nodes` +* Breaking API (Manual Migration Needed): + * Ubuntu is dropped as a first class supported AMI Family + * `karpenter.sh/do-not-consolidate` (annotation), `karpenter.sh/do-not-evict` (annotation), and `karpenter.sh/managed-by` (tag) are all removed. `karpenter.sh/managed-by`, which currently stores the cluster name in its value, will be replaced by eks:eks-cluster-name + * The taint used to mark nodes for disruption and termination changed from `karpenter.sh/disruption=disrupting:NoSchedule` to `karpenter.sh/disrupted:NoSchedule`. It is not recommended to tolerate this taint, however, if you were tolerating it in your applications, you'll need to adjust your taints to reflect this. +* Environment Variable Changes: + * Environment Variable Changes + * LOGGING_CONFIG, ASSUME_ROLE_ARN, ASSUME_ROLE_DURATION Dropped + * LEADER_ELECT renamed to DISABLE_LEADER_ELECTION + * `FEATURE_GATES.DRIFT=true` was dropped and promoted to Stable, and cannot be disabled. + * Users currently opting out of drift, disabling the drift feature flag will no longer be able to do so. +* Defaults changed: + * API: Karpenter will drop support for IMDS access from containers by default on new EC2NodeClasses by updating the default of `httpPutResponseHopLimit` from 2 to 1. + * API: ConsolidateAfter is required. Users couldn’t set this before with ConsolidationPolicy: WhenUnderutilized, where this is now required. Users can set it to 0 to have the same behavior as in v1beta1. + * API: All `NodeClassRef` fields are now all required, and apiVersion has been renamed to group + * API: AMISelectorTerms are required. Setting an Alias cannot be done with any other type of term, and must match the AMI Family that's set or be Custom. + * Helm: Deployment spec TopologySpreadConstraint to have required zonal spread over preferred. Users who had one node running their Karpenter deployments need to either: + * Have two nodes in different zones to ensure both Karpenter replicas schedule + * Scale down their Karpenter replicas from 2 to 1 in the helm chart + * Edit and relax the topology spread constraint in their helm chart from DoNotSchedule to ScheduleAnyway + * Helm/Binary: `controller.METRICS_PORT` default changed back to 8080 + +### Updated metrics + +Changes to Karpenter metrics from v1beta1 to v1 are shown in the following tables. + +This table shows metrics names that changed from v1beta1 to v1: + +| Metric type | v1beta1 metrics name | new v1 metrics name | +|--|--|--| +| Node | karpenter_nodes_termination_time_seconds | karpenter_nodes_termination_duration_seconds | +| Node | karpenter_nodes_terminated | karpenter_nodes_terminated_total | +| Node | karpenter_nodes_leases_deleted | karpenter_nodes_leases_deleted_total | +| Node | karpenter_nodes_created | karpenter_nodes_created_total | +| Pod | karpenter_pods_startup_time_seconds | karpenter_pods_startup_duration_seconds | +| Disruption | karpenter_disruption_replacement_nodeclaim_failures_total | karpenter_voluntary_disruption_queue_failures_total | +| Disruption | karpenter_disruption_evaluation_duration_seconds | karpenter_voluntary_disruption_decision_evaluation_duration_seconds | +| Disruption | karpenter_disruption_eligible_nodes | karpenter_voluntary_disruption_eligible_nodes | +| Disruption | karpenter_disruption_consolidation_timeouts_total | karpenter_voluntary_disruption_consolidation_timeouts_total | +| Disruption | karpenter_disruption_budgets_allowed_disruptions | karpenter_nodepools_allowed_disruptions | +| Disruption | karpenter_disruption_actions_performed_total | karpenter_voluntary_disruption_decisions_total | +| Provisioner | karpenter_provisioner_scheduling_simulation_duration_seconds | karpenter_scheduler_scheduling_duration_seconds | +| Provisioner | karpenter_provisioner_scheduling_queue_depth | karpenter_scheduler_queue_depth | +| Interruption | karpenter_interruption_received_messages | karpenter_interruption_received_messages_total | +| Interruption | karpenter_interruption_deleted_messages | karpenter_interruption_deleted_messages_total | +| Interruption | karpenter_interruption_message_latency_time_seconds | karpenter_interruption_message_queue_duration_seconds | +| NodePool | karpenter_nodepool_usage | karpenter_nodepools_usage | +| NodePool | karpenter_nodepool_limit | karpenter_nodepools_limit | +| NodeClaim | karpenter_nodeclaims_terminated | karpenter_nodeclaims_terminated_total | +| NodeClaim | karpenter_nodeclaims_disrupted | karpenter_nodeclaims_disrupted_total | +| NodeClaim | karpenter_nodeclaims_created | karpenter_nodeclaims_created_total | + +This table shows v1beta1 metrics that were dropped for v1: + +| Metric type | Metric dropped for v1 | +|--|--| +| Disruption | karpenter_disruption_replacement_nodeclaim_initialized_seconds | +| Disruption | karpenter_disruption_queue_depth | +| Disruption | karpenter_disruption_pods_disrupted_total | +| | karpenter_consistency_errors | +| NodeClaim | karpenter_nodeclaims_registered | +| NodeClaim | karpenter_nodeclaims_launched | +| NodeClaim | karpenter_nodeclaims_initialized | +| NodeClaim | karpenter_nodeclaims_drifted | +| Provisioner | karpenter_provisioner_scheduling_duration_seconds | +| Interruption | karpenter_interruption_actions_performed | + +{{% alert title="Note" color="warning" %}} +Karpenter now waits for the underlying instance to be completely terminated before deleting a node and orchestrates this by emitting `NodeClaimNotFoundError`. With this change we expect to see an increase in the `NodeClaimNotFoundError`. Customers can filter out this error by label in order to get accurate values for `karpenter_cloudprovider_errors_total` metric. Use this Prometheus filter expression - `({controller!="node.termination"} or {controller!="nodeclaim.termination"}) and {error!="NodeClaimNotFoundError"}`. +{{% /alert %}} diff --git a/website/hugo.yaml b/website/hugo.yaml index 80c2b0830b97..6384f15012e7 100644 --- a/website/hugo.yaml +++ b/website/hugo.yaml @@ -76,12 +76,12 @@ params: url: "https://slack.k8s.io/" icon: fab fa-slack desc: "Chat with us on Slack in the #aws-provider channel" - latest_release_version: "0.37.0" + latest_release_version: "1.0.0" latest_k8s_version: "1.30" versions: + - v1.0 - v0.37 - v0.36 - - v0.35 - v0.32 - preview menu: