diff --git a/kubernetes/main/apps/database/cloudnative-pg/cluster/cluster16.yaml b/kubernetes/main/apps/database/cloudnative-pg/cluster/cluster16.yaml index ac15510f6..d0c96e76e 100644 --- a/kubernetes/main/apps/database/cloudnative-pg/cluster/cluster16.yaml +++ b/kubernetes/main/apps/database/cloudnative-pg/cluster/cluster16.yaml @@ -52,7 +52,7 @@ spec: key: AWS_SECRET_ACCESS_KEY bootstrap: recovery: - source: &previousCluster postgres16-v6 + source: &previousCluster main-postgres16-v1 externalClusters: - name: *previousCluster barmanObjectStore: diff --git a/kubernetes/main/apps/database/kustomization.yaml b/kubernetes/main/apps/database/kustomization.yaml index 6ebe2b6eb..6a60f9b1a 100644 --- a/kubernetes/main/apps/database/kustomization.yaml +++ b/kubernetes/main/apps/database/kustomization.yaml @@ -4,6 +4,6 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - ./namespace.yaml - - ./cloudnative-pg/ks.yaml + # - ./cloudnative-pg/ks.yaml - ./dragonfly/ks.yaml - ./pgadmin/ks.yaml diff --git a/kubernetes/main/apps/default/filebrowser/app/helmrelease.yaml b/kubernetes/main/apps/default/filebrowser/app/helmrelease.yaml index a4ac11aa2..73395e203 100644 --- a/kubernetes/main/apps/default/filebrowser/app/helmrelease.yaml +++ b/kubernetes/main/apps/default/filebrowser/app/helmrelease.yaml @@ -57,7 +57,7 @@ spec: capabilities: { drop: ["ALL"] } resources: requests: - cpu: 100m + cpu: 50m limits: memory: 2Gi defaultPodOptions: diff --git a/kubernetes/main/apps/default/livebook/app/helmrelease.yaml b/kubernetes/main/apps/default/livebook/app/helmrelease.yaml index 828933256..444241a5a 100644 --- a/kubernetes/main/apps/default/livebook/app/helmrelease.yaml +++ b/kubernetes/main/apps/default/livebook/app/helmrelease.yaml @@ -71,7 +71,7 @@ spec: requests: cpu: 10m limits: - cpu: 1000m + cpu: 500m memory: 1Gi defaultPodOptions: securityContext: diff --git a/kubernetes/main/apps/rook-ceph/rook-ceph/app/helmrelease.yaml b/kubernetes/main/apps/rook-ceph/rook-ceph/app/helmrelease.yaml index e7fbaef9b..9754e5343 100644 --- a/kubernetes/main/apps/rook-ceph/rook-ceph/app/helmrelease.yaml +++ b/kubernetes/main/apps/rook-ceph/rook-ceph/app/helmrelease.yaml @@ -37,6 +37,6 @@ spec: enabled: true resources: requests: + cpu: 50m # unchangable memory: 128Mi # unchangable - cpu: 100m # unchangable limits: {} diff --git a/kubernetes/main/apps/rook-ceph/rook-ceph/cluster/helmrelease.yaml b/kubernetes/main/apps/rook-ceph/rook-ceph/cluster/helmrelease.yaml index 3c81695e7..e160fe333 100644 --- a/kubernetes/main/apps/rook-ceph/rook-ceph/cluster/helmrelease.yaml +++ b/kubernetes/main/apps/rook-ceph/rook-ceph/cluster/helmrelease.yaml @@ -73,7 +73,7 @@ spec: useAllDevices: false deviceFilter: sda config: - osdsPerDevice: "2" + osdsPerDevice: "1" placement: mgr: &placement nodeAffinity: @@ -86,7 +86,7 @@ spec: resources: mgr: requests: - cpu: 100m + cpu: 50m memory: 512Mi limits: memory: 2Gi @@ -98,13 +98,13 @@ spec: memory: 1Gi osd: requests: - cpu: 500m - memory: 2Gi + cpu: 150m + memory: 1Gi limits: - memory: 6Gi + memory: 4Gi mgr-sidecar: requests: - cpu: 50m + cpu: 20m memory: 128Mi limits: memory: 256Mi @@ -162,10 +162,10 @@ spec: app.kubernetes.io/part-of: *cephFileSystemName resources: requests: - cpu: 100m + cpu: 50m memory: 1Gi limits: - memory: 4Gi + memory: 2Gi storageClass: enabled: true isDefault: false @@ -205,7 +205,7 @@ spec: port: 80 resources: requests: - cpu: 100m + cpu: 80m memory: 1Gi limits: memory: 2Gi @@ -220,7 +220,7 @@ spec: reclaimPolicy: Delete volumeBindingMode: Immediate parameters: - region: us-east-1 + region: us-west-1 ingress: enabled: true ingressClassName: internal diff --git a/kubernetes/main/flux/config/flux.yaml b/kubernetes/main/flux/config/flux.yaml index 973cbfe50..ba872326a 100644 --- a/kubernetes/main/flux/config/flux.yaml +++ b/kubernetes/main/flux/config/flux.yaml @@ -64,7 +64,7 @@ spec: - name: manager resources: limits: - cpu: 2000m + cpu: 1000m memory: 2Gi target: kind: Deployment diff --git a/scripts/wipe-disk.sh b/scripts/wipe-disk.sh index fbb32b190..ec8b5c420 100644 --- a/scripts/wipe-disk.sh +++ b/scripts/wipe-disk.sh @@ -5,10 +5,17 @@ # Run this in the rook ceph tools pod container on each node: -DISK="/dev/sda"; \ +# One liner for the rook ceph tools pod container: +FDISK_BEFORE=$(fdisk -l); \ + DISK="/dev/sda"; \ sgdisk --zap-all $DISK; \ dd if=/dev/zero of="$DISK" bs=1M count=100 oflag=direct,dsync; \ blkdiscard $DISK; \ - partprobe $DISK + partprobe $DISK; \ + # You may also need to remove LVM and device mapper data: + ls /dev/mapper/ceph-* | xargs -I% -- dmsetup remove %; \ + rm -rf /dev/ceph-*; \ + rm -rf /dev/mapper/ceph--*; \ + diff <(echo "$FDISK_BEFORE") <(fdisk -l) # Then, delete the operator pod to re-run the osd-prepare job pods