From 53f4c7ff440836688cc9f20ba4c5f709eabc4290 Mon Sep 17 00:00:00 2001 From: Mohamad Choupan Date: Fri, 15 Nov 2024 22:04:47 +0330 Subject: [PATCH 01/11] feat: change github action --- .github/workflows/go.yaml | 4 ++++ docker/AuthServiceDockerfile | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/workflows/go.yaml b/.github/workflows/go.yaml index dbb655071..876c5e4eb 100644 --- a/.github/workflows/go.yaml +++ b/.github/workflows/go.yaml @@ -132,6 +132,10 @@ jobs: chmod +x ./build/* fi for f in $(cat ./service-list); do echo "$f=true" >> "$GITHUB_OUTPUT"; done + - name: Run LS + run: | + ls -la + - name: Pack build if: github.event_name != 'pull_request' run: | diff --git a/docker/AuthServiceDockerfile b/docker/AuthServiceDockerfile index 094124795..ab8682b3d 100644 --- a/docker/AuthServiceDockerfile +++ b/docker/AuthServiceDockerfile @@ -4,4 +4,4 @@ RUN apk --no-cache add ca-certificates FROM scratch COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ COPY ./build/auth-service / -CMD [ "/auth-service" ] \ No newline at end of file +CMD [ "/auth-service" ] From 83cb6bbf1a98be25988c692c8655e14f6abebda9 Mon Sep 17 00:00:00 2001 From: Mohamad Choupan Date: Fri, 15 Nov 2024 22:08:51 +0330 Subject: [PATCH 02/11] feat: change github action --- .github/workflows/go.yaml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/go.yaml b/.github/workflows/go.yaml index 876c5e4eb..483f6f554 100644 --- a/.github/workflows/go.yaml +++ b/.github/workflows/go.yaml @@ -134,8 +134,9 @@ jobs: for f in $(cat ./service-list); do echo "$f=true" >> "$GITHUB_OUTPUT"; done - name: Run LS run: | - ls -la - + ls build/* -la + ls build_services/* -la + - name: Pack build if: github.event_name != 'pull_request' run: | From f9dd7bd5602dc1cb5e3f4ac5a65c19fb6bed3f53 Mon Sep 17 00:00:00 2001 From: Mohamad Choupan Date: Fri, 15 Nov 2024 22:10:34 +0330 Subject: [PATCH 03/11] feat: change github action --- .github/workflows/go.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/go.yaml b/.github/workflows/go.yaml index 483f6f554..01942310a 100644 --- a/.github/workflows/go.yaml +++ b/.github/workflows/go.yaml @@ -134,8 +134,8 @@ jobs: for f in $(cat ./service-list); do echo "$f=true" >> "$GITHUB_OUTPUT"; done - name: Run LS run: | - ls build/* -la - ls build_services/* -la + ls build/ -la + ls build_services/ -la - name: Pack build if: github.event_name != 'pull_request' From 2cd8ad14c8ef29fc1fd48dbc45684dce48942f27 Mon Sep 17 00:00:00 2001 From: Mohamad Choupan Date: Fri, 15 Nov 2024 22:14:22 +0330 Subject: [PATCH 04/11] feat: change github action --- .github/workflows/go.yaml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/go.yaml b/.github/workflows/go.yaml index 01942310a..6236cbe68 100644 --- a/.github/workflows/go.yaml +++ b/.github/workflows/go.yaml @@ -134,8 +134,7 @@ jobs: for f in $(cat ./service-list); do echo "$f=true" >> "$GITHUB_OUTPUT"; done - name: Run LS run: | - ls build/ -la - ls build_services/ -la + ls -la - name: Pack build if: github.event_name != 'pull_request' From 497180ba8ca9d4fbf1de1ffc09e666faa6d9e8d6 Mon Sep 17 00:00:00 2001 From: Mohamad Choupan Date: Fri, 15 Nov 2024 22:16:35 +0330 Subject: [PATCH 05/11] feat: change github action --- .github/workflows/go.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/go.yaml b/.github/workflows/go.yaml index 6236cbe68..6f5ada8f8 100644 --- a/.github/workflows/go.yaml +++ b/.github/workflows/go.yaml @@ -136,6 +136,8 @@ jobs: run: | ls -la + ls build -la + - name: Pack build if: github.event_name != 'pull_request' run: | From d8dc9204b2e6b7de4e333747df5de181ac873400 Mon Sep 17 00:00:00 2001 From: Mohamad Choupan Date: Fri, 15 Nov 2024 22:21:58 +0330 Subject: [PATCH 06/11] feat: change auth docker file --- docker/AuthServiceDockerfile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docker/AuthServiceDockerfile b/docker/AuthServiceDockerfile index ab8682b3d..5d5c58e4f 100644 --- a/docker/AuthServiceDockerfile +++ b/docker/AuthServiceDockerfile @@ -1,4 +1,5 @@ -FROM docker.io/golang:alpine as build +FROM golang:1.16-buster as build + RUN apk --no-cache add ca-certificates FROM scratch From 8cf9d281375106efdfe2ba998e35adaff2b2344a Mon Sep 17 00:00:00 2001 From: Mohamad Choupan Date: Fri, 15 Nov 2024 22:41:04 +0330 Subject: [PATCH 07/11] feat: change auth docker file --- .github/workflows/go.yaml | 8 +------- docker/AuthServiceDockerfile | 5 ++--- 2 files changed, 3 insertions(+), 10 deletions(-) diff --git a/.github/workflows/go.yaml b/.github/workflows/go.yaml index 6f5ada8f8..c0e32e696 100644 --- a/.github/workflows/go.yaml +++ b/.github/workflows/go.yaml @@ -98,7 +98,7 @@ jobs: fetch-depth: 5 - name: Set up Go - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: go-version-file: "./go.mod" cache: false @@ -132,12 +132,6 @@ jobs: chmod +x ./build/* fi for f in $(cat ./service-list); do echo "$f=true" >> "$GITHUB_OUTPUT"; done - - name: Run LS - run: | - ls -la - - ls build -la - - name: Pack build if: github.event_name != 'pull_request' run: | diff --git a/docker/AuthServiceDockerfile b/docker/AuthServiceDockerfile index 5d5c58e4f..094124795 100644 --- a/docker/AuthServiceDockerfile +++ b/docker/AuthServiceDockerfile @@ -1,8 +1,7 @@ -FROM golang:1.16-buster as build - +FROM docker.io/golang:alpine as build RUN apk --no-cache add ca-certificates FROM scratch COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ COPY ./build/auth-service / -CMD [ "/auth-service" ] +CMD [ "/auth-service" ] \ No newline at end of file From 13cfda65aa6eccdfecc48c21e4ec1cd640fb07f2 Mon Sep 17 00:00:00 2001 From: Mohamad Choupan Date: Fri, 15 Nov 2024 22:44:14 +0330 Subject: [PATCH 08/11] feat: change auth docker file --- .github/workflows/go.yaml | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/.github/workflows/go.yaml b/.github/workflows/go.yaml index c0e32e696..040debae6 100644 --- a/.github/workflows/go.yaml +++ b/.github/workflows/go.yaml @@ -87,11 +87,6 @@ jobs: GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }} steps: - - name: Install musl cc - uses: awalsh128/cache-apt-pkgs-action@v1 - with: - packages: musl-tools musl-dev musl - - name: Checkout code uses: actions/checkout@v3 with: @@ -127,7 +122,7 @@ jobs: mkdir -p ./build if [ ! -z "$(cat ./build_services)" ]; then for f in $(cat ./build_services); do - CC=/usr/bin/musl-gcc GOPRIVATE="github.com/opengovern" GOOS=linux GOARCH=amd64 go build -v -ldflags "-linkmode external -extldflags '-static' -s -w" -tags musl -o ./build/ ./cmd/$f; + GOPRIVATE="github.com/opengovern" go build -v -ldflags "-linkmode external -extldflags '-static' -s -w" -tags musl -o ./build/ ./cmd/$f; done chmod +x ./build/* fi From 8fbaf36b5b3314a879c4fbd7be860e4bf8830ec0 Mon Sep 17 00:00:00 2001 From: Mohamad Choupan Date: Fri, 15 Nov 2024 22:58:51 +0330 Subject: [PATCH 09/11] feat: revert action --- .github/workflows/go.yaml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/.github/workflows/go.yaml b/.github/workflows/go.yaml index 040debae6..c0e32e696 100644 --- a/.github/workflows/go.yaml +++ b/.github/workflows/go.yaml @@ -87,6 +87,11 @@ jobs: GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }} steps: + - name: Install musl cc + uses: awalsh128/cache-apt-pkgs-action@v1 + with: + packages: musl-tools musl-dev musl + - name: Checkout code uses: actions/checkout@v3 with: @@ -122,7 +127,7 @@ jobs: mkdir -p ./build if [ ! -z "$(cat ./build_services)" ]; then for f in $(cat ./build_services); do - GOPRIVATE="github.com/opengovern" go build -v -ldflags "-linkmode external -extldflags '-static' -s -w" -tags musl -o ./build/ ./cmd/$f; + CC=/usr/bin/musl-gcc GOPRIVATE="github.com/opengovern" GOOS=linux GOARCH=amd64 go build -v -ldflags "-linkmode external -extldflags '-static' -s -w" -tags musl -o ./build/ ./cmd/$f; done chmod +x ./build/* fi From 47569a505c2312fdd7bdbe913f965324a99777e7 Mon Sep 17 00:00:00 2001 From: Mohamad Choupan Date: Fri, 15 Nov 2024 23:46:56 +0330 Subject: [PATCH 10/11] feat: remove watage service --- .github/workflows/go.yaml | 36 - build_services | 4 +- cmd/wastage-service/main.go | 35 - docker/AssistantServiceDockerfile | 8 - docker/CICDDockerfile | 3 - docker/GPTServiceDockerfile | 5 - docker/OnboardServiceDockerfile | 7 - docker/WastageServiceDockerfile | 7 - scripts/export_data.sh | 2 +- scripts/import_psql_data.sh | 2 +- services/wastage/api/api.go | 52 - services/wastage/api/entity/Organization.go | 19 - services/wastage/api/entity/User.go | 19 - .../wastage/api/entity/aws_ec2_instance.go | 113 - services/wastage/api/entity/aws_rds.go | 104 - services/wastage/api/entity/azure_vm.go | 8 - services/wastage/api/entity/configuration.go | 7 - services/wastage/api/entity/gcp_compute.go | 88 - services/wastage/api/entity/kubernetes_pod.go | 55 - services/wastage/api/wastage/api.go | 1064 -------- .../api/wastage/grpc-server/aws_plugin.go | 615 ----- .../api/wastage/grpc-server/gcp_plugin.go | 190 -- .../wastage/api/wastage/grpc-server/grpc.go | 91 - .../wastage/grpc-server/kubernetes_plugin.go | 873 ------- services/wastage/api/wastage/limit/consts.go | 15 - services/wastage/api/wastage/limit/limit.go | 177 -- services/wastage/cmd.go | 133 - services/wastage/config/service.go | 18 - services/wastage/cost/aws_rds.go | 262 -- services/wastage/cost/azurerm_compute.go | 63 - services/wastage/cost/ec2_instance.go | 215 -- services/wastage/cost/gcp_compute.go | 128 - services/wastage/cost/svc.go | 11 - services/wastage/db/connector/service.go | 41 - services/wastage/db/model/credential.go | 12 - services/wastage/db/model/data_age.go | 8 - services/wastage/db/model/ebs_volume_types.go | 108 - .../wastage/db/model/ec2_instance_types.go | 377 --- .../db/model/gcp_compute_machinetype.go | 40 - services/wastage/db/model/gcp_compute_sku.go | 118 - .../db/model/gcp_compute_storagetype.go | 43 - services/wastage/db/model/organization.go | 15 - services/wastage/db/model/preprocess_job.go | 21 - services/wastage/db/model/rds_db_instance.go | 187 -- services/wastage/db/model/rds_db_storage.go | 299 --- services/wastage/db/model/rds_products.go | 151 -- services/wastage/db/model/uasge_v2.go | 61 - services/wastage/db/model/usage.go | 16 - services/wastage/db/model/user.go | 15 - services/wastage/db/repo/credential.go | 63 - services/wastage/db/repo/data_age.go | 59 - services/wastage/db/repo/ebs_volume_types.go | 478 ---- .../wastage/db/repo/ec2_instance_types.go | 255 -- .../db/repo/gcp_compute_machinetype.go | 182 -- services/wastage/db/repo/gcp_compute_sku.go | 199 -- .../db/repo/gcp_compute_storagetype.go | 179 -- services/wastage/db/repo/organization.go | 60 - services/wastage/db/repo/preprocess_job.go | 63 - services/wastage/db/repo/rds_db_instance.go | 254 -- services/wastage/db/repo/rds_db_storage.go | 605 ----- services/wastage/db/repo/rds_product.go | 188 -- services/wastage/db/repo/usage.go | 54 - services/wastage/db/repo/usage_v2.go | 306 --- services/wastage/db/repo/user.go | 60 - services/wastage/ingestion/gcp.go | 411 --- services/wastage/ingestion/service.go | 546 ---- .../recommendation/aws_ec2_instance.go | 943 ------- services/wastage/recommendation/aws_rds.go | 864 ------- .../recommendation/gcp_compute_instance.go | 911 ------- .../wastage/recommendation/gcp_limitations.go | 2232 ----------------- .../recommendation/kubernetes_objects.go | 671 ----- .../recommendation/kubernetes_objects_test.go | 72 - .../preferences/aws_rds/preferences.go | 25 - .../preferences/ec2instance/preferences.go | 53 - .../preferences/gcp_compute/preferences.go | 20 - services/wastage/recommendation/service.go | 37 - .../wastage/recommendation/service_test.go | 171 -- services/wastage/recommendation/utils.go | 393 --- 78 files changed, 3 insertions(+), 16292 deletions(-) delete mode 100644 cmd/wastage-service/main.go delete mode 100644 docker/AssistantServiceDockerfile delete mode 100644 docker/CICDDockerfile delete mode 100644 docker/GPTServiceDockerfile delete mode 100644 docker/OnboardServiceDockerfile delete mode 100644 docker/WastageServiceDockerfile delete mode 100644 services/wastage/api/api.go delete mode 100644 services/wastage/api/entity/Organization.go delete mode 100644 services/wastage/api/entity/User.go delete mode 100644 services/wastage/api/entity/aws_ec2_instance.go delete mode 100644 services/wastage/api/entity/aws_rds.go delete mode 100644 services/wastage/api/entity/azure_vm.go delete mode 100644 services/wastage/api/entity/configuration.go delete mode 100644 services/wastage/api/entity/gcp_compute.go delete mode 100644 services/wastage/api/entity/kubernetes_pod.go delete mode 100644 services/wastage/api/wastage/api.go delete mode 100644 services/wastage/api/wastage/grpc-server/aws_plugin.go delete mode 100644 services/wastage/api/wastage/grpc-server/gcp_plugin.go delete mode 100644 services/wastage/api/wastage/grpc-server/grpc.go delete mode 100644 services/wastage/api/wastage/grpc-server/kubernetes_plugin.go delete mode 100644 services/wastage/api/wastage/limit/consts.go delete mode 100644 services/wastage/api/wastage/limit/limit.go delete mode 100644 services/wastage/cmd.go delete mode 100644 services/wastage/config/service.go delete mode 100644 services/wastage/cost/aws_rds.go delete mode 100644 services/wastage/cost/azurerm_compute.go delete mode 100644 services/wastage/cost/ec2_instance.go delete mode 100644 services/wastage/cost/gcp_compute.go delete mode 100644 services/wastage/cost/svc.go delete mode 100644 services/wastage/db/connector/service.go delete mode 100644 services/wastage/db/model/credential.go delete mode 100644 services/wastage/db/model/data_age.go delete mode 100644 services/wastage/db/model/ebs_volume_types.go delete mode 100644 services/wastage/db/model/ec2_instance_types.go delete mode 100644 services/wastage/db/model/gcp_compute_machinetype.go delete mode 100644 services/wastage/db/model/gcp_compute_sku.go delete mode 100644 services/wastage/db/model/gcp_compute_storagetype.go delete mode 100644 services/wastage/db/model/organization.go delete mode 100644 services/wastage/db/model/preprocess_job.go delete mode 100644 services/wastage/db/model/rds_db_instance.go delete mode 100644 services/wastage/db/model/rds_db_storage.go delete mode 100644 services/wastage/db/model/rds_products.go delete mode 100644 services/wastage/db/model/uasge_v2.go delete mode 100644 services/wastage/db/model/usage.go delete mode 100644 services/wastage/db/model/user.go delete mode 100644 services/wastage/db/repo/credential.go delete mode 100644 services/wastage/db/repo/data_age.go delete mode 100644 services/wastage/db/repo/ebs_volume_types.go delete mode 100644 services/wastage/db/repo/ec2_instance_types.go delete mode 100644 services/wastage/db/repo/gcp_compute_machinetype.go delete mode 100644 services/wastage/db/repo/gcp_compute_sku.go delete mode 100644 services/wastage/db/repo/gcp_compute_storagetype.go delete mode 100644 services/wastage/db/repo/organization.go delete mode 100644 services/wastage/db/repo/preprocess_job.go delete mode 100644 services/wastage/db/repo/rds_db_instance.go delete mode 100644 services/wastage/db/repo/rds_db_storage.go delete mode 100644 services/wastage/db/repo/rds_product.go delete mode 100644 services/wastage/db/repo/usage.go delete mode 100644 services/wastage/db/repo/usage_v2.go delete mode 100644 services/wastage/db/repo/user.go delete mode 100644 services/wastage/ingestion/gcp.go delete mode 100644 services/wastage/ingestion/service.go delete mode 100644 services/wastage/recommendation/aws_ec2_instance.go delete mode 100644 services/wastage/recommendation/aws_rds.go delete mode 100644 services/wastage/recommendation/gcp_compute_instance.go delete mode 100644 services/wastage/recommendation/gcp_limitations.go delete mode 100644 services/wastage/recommendation/kubernetes_objects.go delete mode 100644 services/wastage/recommendation/kubernetes_objects_test.go delete mode 100644 services/wastage/recommendation/preferences/aws_rds/preferences.go delete mode 100644 services/wastage/recommendation/preferences/ec2instance/preferences.go delete mode 100644 services/wastage/recommendation/preferences/gcp_compute/preferences.go delete mode 100644 services/wastage/recommendation/service.go delete mode 100644 services/wastage/recommendation/service_test.go delete mode 100644 services/wastage/recommendation/utils.go diff --git a/.github/workflows/go.yaml b/.github/workflows/go.yaml index c0e32e696..2718c3b00 100644 --- a/.github/workflows/go.yaml +++ b/.github/workflows/go.yaml @@ -77,7 +77,6 @@ jobs: steampipe-plugin-opengovernance: ${{ steps.build_services.outputs.steampipe-plugin-opengovernance }} integration-service: ${{ steps.build_services.outputs.integration-service }} es-sink-service: ${{ steps.build_services.outputs.es-sink-service }} - wastage-service: ${{ steps.build_services.outputs.wastage-service }} information-service: ${{ steps.build_services.outputs.information-service }} query-runner-worker: ${{ steps.build_services.outputs.query-runner-worker }} demo-importer-worker: ${{ steps.build_services.outputs.demo-importer-worker }} @@ -710,41 +709,6 @@ jobs: file: docker/SteampipePluginOpengovernanceDockerfile context: . - deploy-wastage-service: - runs-on: ubuntu-latest - needs: - - build - - tag - permissions: - id-token: write - contents: read - environment: docker - if: needs.build.outputs.wastage-service == 'true' && github.event_name != 'pull_request' - steps: - - name: Checkout code - uses: actions/checkout@v3 - - name: Download artifact - uses: actions/download-artifact@v3 - with: - name: build - path: . - - name: Unpack artifact - run: | - tar -xvf build.tar.gz - - name: Log in to the Container registry - uses: docker/login-action@65b78e6e13532edd9afa3aa52ac7964289d1a9c1 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GHCR_PAT }} - - name: Build and push Docker images - uses: docker/build-push-action@v4 - with: - push: true - tags: | - ghcr.io/${{ github.repository_owner }}/wastage-service:${{ needs.tag.outputs.latest_tag }} - file: docker/WastageServiceDockerfile - context: . deploy-information-service: runs-on: ubuntu-latest diff --git a/build_services b/build_services index 9ef05695a..56d540834 100644 --- a/build_services +++ b/build_services @@ -9,13 +9,11 @@ hopper inventory-service metadata-service migrator-worker -onboard-service +integration-service reporter summarizer-worker swagger-ui -gpt-service analytics-worker cost-estimator-service -wastage-service query-runner-worker demo-importer-worker \ No newline at end of file diff --git a/cmd/wastage-service/main.go b/cmd/wastage-service/main.go deleted file mode 100644 index 28c9c4606..000000000 --- a/cmd/wastage-service/main.go +++ /dev/null @@ -1,35 +0,0 @@ -package main - -import ( - "context" - "fmt" - "github.com/opengovern/opengovernance/services/wastage" - "os" - "os/signal" - "syscall" -) - -func main() { - ctx := context.Background() - ctx, cancel := context.WithCancel(ctx) - - c := make(chan os.Signal, 1) - signal.Notify(c, os.Interrupt, syscall.SIGTERM) - defer func() { - signal.Stop(c) - cancel() - }() - - go func() { - select { - case <-c: - cancel() - case <-ctx.Done(): - } - }() - - if err := wastage.Command().ExecuteContext(ctx); err != nil { - fmt.Println(err) - os.Exit(1) - } -} diff --git a/docker/AssistantServiceDockerfile b/docker/AssistantServiceDockerfile deleted file mode 100644 index c5d695a83..000000000 --- a/docker/AssistantServiceDockerfile +++ /dev/null @@ -1,8 +0,0 @@ -# syntax=docker/dockerfile:1 -FROM docker.io/golang:alpine as build -RUN apk --no-cache add ca-certificates - -FROM scratch -COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ -COPY ./build/assistant-service / -CMD [ "/assistant-service" ] \ No newline at end of file diff --git a/docker/CICDDockerfile b/docker/CICDDockerfile deleted file mode 100644 index 2e5615330..000000000 --- a/docker/CICDDockerfile +++ /dev/null @@ -1,3 +0,0 @@ -FROM docker.io/golang:alpine -RUN apk update && apk upgrade -RUN apk --no-cache add make git bash musl-dev gcc build-base util-linux-dev diff --git a/docker/GPTServiceDockerfile b/docker/GPTServiceDockerfile deleted file mode 100644 index 7755df6d5..000000000 --- a/docker/GPTServiceDockerfile +++ /dev/null @@ -1,5 +0,0 @@ -ARG PLUGIN_REGISTRY -FROM ${PLUGIN_REGISTRY}/kaytu-ai:v0.0.2 - -COPY ./build/gpt-service / -CMD [ "/gpt-service" ] \ No newline at end of file diff --git a/docker/OnboardServiceDockerfile b/docker/OnboardServiceDockerfile deleted file mode 100644 index 860863cac..000000000 --- a/docker/OnboardServiceDockerfile +++ /dev/null @@ -1,7 +0,0 @@ -FROM docker.io/golang:alpine as build -RUN apk --no-cache add ca-certificates - -FROM scratch -COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ -COPY ./build/onboard-service / -CMD [ "/onboard-service" ] \ No newline at end of file diff --git a/docker/WastageServiceDockerfile b/docker/WastageServiceDockerfile deleted file mode 100644 index b10564d76..000000000 --- a/docker/WastageServiceDockerfile +++ /dev/null @@ -1,7 +0,0 @@ -FROM docker.io/golang:alpine as build -RUN apk --no-cache add ca-certificates - -FROM docker.io/golang:alpine -COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ -COPY ./build/wastage-service / -CMD [ "/wastage-service" ] \ No newline at end of file diff --git a/scripts/export_data.sh b/scripts/export_data.sh index 71739679c..13b630986 100644 --- a/scripts/export_data.sh +++ b/scripts/export_data.sh @@ -25,7 +25,7 @@ done mkdir -p /tmp/demo-data/postgres pg_dump --dbname="postgresql://$POSTGRESQL_USERNAME:$POSTGRESQL_PASSWORD@$POSTGRESQL_HOST:$POSTGRESQL_PORT/describe" > /tmp/demo-data/postgres/describe.sql -pg_dump --dbname="postgresql://$POSTGRESQL_USERNAME:$POSTGRESQL_PASSWORD@$POSTGRESQL_HOST:$POSTGRESQL_PORT/onboard" > /tmp/demo-data/postgres/onboard.sql +pg_dump --dbname="postgresql://$POSTGRESQL_USERNAME:$POSTGRESQL_PASSWORD@$POSTGRESQL_HOST:$POSTGRESQL_PORT/integration" > /tmp/demo-data/postgres/integration.sql pg_dump --dbname="postgresql://$POSTGRESQL_USERNAME:$POSTGRESQL_PASSWORD@$POSTGRESQL_HOST:$POSTGRESQL_PORT/metadata" > /tmp/demo-data/postgres/metadata.sql cd /tmp diff --git a/scripts/import_psql_data.sh b/scripts/import_psql_data.sh index bb60d1473..025306882 100644 --- a/scripts/import_psql_data.sh +++ b/scripts/import_psql_data.sh @@ -11,7 +11,7 @@ echo "$POSTGRESQL_USERNAME" echo "$POSTGRESQL_PASSWORD" PGPASSWORD="$POSTGRESQL_PASSWORD" psql --host="$POSTGRESQL_HOST" --port="$POSTGRESQL_PORT" --username "$POSTGRESQL_USERNAME" --dbname "describe" < /demo-data/postgres/describe.sql -PGPASSWORD="$POSTGRESQL_PASSWORD" psql --host="$POSTGRESQL_HOST" --port="$POSTGRESQL_PORT" --username "$POSTGRESQL_USERNAME" --dbname "onboard" < /demo-data/postgres/onboard.sql +PGPASSWORD="$POSTGRESQL_PASSWORD" psql --host="$POSTGRESQL_HOST" --port="$POSTGRESQL_PORT" --username "$POSTGRESQL_USERNAME" --dbname "integration" < /demo-data/postgres/integration.sql PGPASSWORD="$POSTGRESQL_PASSWORD" psql --host="$POSTGRESQL_HOST" --port="$POSTGRESQL_PORT" --username "$POSTGRESQL_USERNAME" --dbname "metadata" < /demo-data/postgres/metadata.sql PGPASSWORD="$POSTGRESQL_PASSWORD" psql --host="$POSTGRESQL_HOST" --port="$POSTGRESQL_PORT" --username "$POSTGRESQL_USERNAME" --dbname "onboard" -c "DELETE FROM credentials;" diff --git a/services/wastage/api/api.go b/services/wastage/api/api.go deleted file mode 100644 index 1b07de4ac..000000000 --- a/services/wastage/api/api.go +++ /dev/null @@ -1,52 +0,0 @@ -package api - -import ( - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" - "github.com/alitto/pond" - "github.com/labstack/echo/v4" - "github.com/opengovern/opengovernance/services/wastage/api/wastage" - "github.com/opengovern/opengovernance/services/wastage/api/wastage/limit" - "github.com/opengovern/opengovernance/services/wastage/config" - "github.com/opengovern/opengovernance/services/wastage/cost" - "github.com/opengovern/opengovernance/services/wastage/db/repo" - "github.com/opengovern/opengovernance/services/wastage/ingestion" - "github.com/opengovern/opengovernance/services/wastage/recommendation" - "go.uber.org/zap" -) - -type API struct { - cfg config.WastageConfig - blobClient *azblob.Client - blobWorkerPool *pond.WorkerPool - costSvc *cost.Service - recomSvc *recommendation.Service - ingestionSvc *ingestion.Service - limitsSvc *limit.Service - usageRepo repo.UsageV2Repo - usageV1Repo repo.UsageRepo - userRepo repo.UserRepo - orgRepo repo.OrganizationRepo - logger *zap.Logger -} - -func New(cfg config.WastageConfig, logger *zap.Logger, blobClient *azblob.Client, blobWorkerPool *pond.WorkerPool, costSvc *cost.Service, recomSvc *recommendation.Service, ingestionSvc *ingestion.Service, limitsSvc *limit.Service, usageV1Repo repo.UsageRepo, usageRepo repo.UsageV2Repo, userRepo repo.UserRepo, orgRepo repo.OrganizationRepo) *API { - return &API{ - cfg: cfg, - blobClient: blobClient, - blobWorkerPool: blobWorkerPool, - costSvc: costSvc, - recomSvc: recomSvc, - limitsSvc: limitsSvc, - ingestionSvc: ingestionSvc, - usageV1Repo: usageV1Repo, - usageRepo: usageRepo, - userRepo: userRepo, - orgRepo: orgRepo, - logger: logger.Named("api"), - } -} - -func (api *API) Register(e *echo.Echo) { - qThr := wastage.New(api.cfg, api.blobClient, api.blobWorkerPool, api.costSvc, api.recomSvc, api.ingestionSvc, api.limitsSvc, api.usageV1Repo, api.usageRepo, api.userRepo, api.orgRepo, api.logger) - qThr.Register(e) -} diff --git a/services/wastage/api/entity/Organization.go b/services/wastage/api/entity/Organization.go deleted file mode 100644 index 6b9e74c16..000000000 --- a/services/wastage/api/entity/Organization.go +++ /dev/null @@ -1,19 +0,0 @@ -package entity - -import ( - "github.com/opengovern/opengovernance/services/wastage/db/model" - "time" -) - -type Organization struct { - OrganizationId string `json:"organization_id"` - PremiumUntil *time.Time `json:"premium_until"` -} - -// ToModel convert to model.Organization -func (o *Organization) ToModel() *model.Organization { - return &model.Organization{ - OrganizationId: o.OrganizationId, - PremiumUntil: o.PremiumUntil, - } -} diff --git a/services/wastage/api/entity/User.go b/services/wastage/api/entity/User.go deleted file mode 100644 index 52e4d7e8d..000000000 --- a/services/wastage/api/entity/User.go +++ /dev/null @@ -1,19 +0,0 @@ -package entity - -import ( - "github.com/opengovern/opengovernance/services/wastage/db/model" - "time" -) - -type User struct { - UserId string `json:"user_id"` - PremiumUntil *time.Time `json:"premium_until"` -} - -// ToModel convert to model.User -func (u *User) ToModel() *model.User { - return &model.User{ - UserId: u.UserId, - PremiumUntil: u.PremiumUntil, - } -} diff --git a/services/wastage/api/entity/aws_ec2_instance.go b/services/wastage/api/entity/aws_ec2_instance.go deleted file mode 100644 index d602832b5..000000000 --- a/services/wastage/api/entity/aws_ec2_instance.go +++ /dev/null @@ -1,113 +0,0 @@ -package entity - -import ( - types2 "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types" - "github.com/aws/aws-sdk-go-v2/service/ec2/types" -) - -type EC2Placement struct { - Tenancy types.Tenancy `json:"tenancy"` - AvailabilityZone string `json:"availabilityZone"` - HashedHostId string `json:"hashedHostId"` -} - -type EC2Instance struct { - HashedInstanceId string `json:"hashedInstanceId"` - State types.InstanceStateName `json:"state"` - InstanceType types.InstanceType `json:"instanceType"` - Platform string `json:"platform"` - ThreadsPerCore int32 `json:"threadsPerCore"` - CoreCount int32 `json:"coreCount"` - EbsOptimized bool `json:"ebsOptimized"` - InstanceLifecycle types.InstanceLifecycleType `json:"instanceLifecycle"` - Monitoring *types.MonitoringState `json:"monitoring"` - Placement *EC2Placement `json:"placement"` - UsageOperation string `json:"usageOperation"` - Tenancy types.Tenancy `json:"tenancy"` -} - -type EC2Volume struct { - HashedVolumeId string `json:"hashedVolumeId"` - VolumeType types.VolumeType `json:"volumeType"` - Size *int32 `json:"size"` - Iops *int32 `json:"iops"` - AvailabilityZone *string `json:"availabilityZone"` - Throughput *float64 `json:"throughput"` -} - -type EC2InstanceWastageRequest struct { - RequestId *string `json:"requestId"` - CliVersion *string `json:"cliVersion"` - Identification map[string]string `json:"identification"` - Instance EC2Instance `json:"instance"` - Volumes []EC2Volume `json:"volumes"` - VolumeCount int `json:"volumeCount"` - Metrics map[string][]types2.Datapoint `json:"metrics"` - VolumeMetrics map[string]map[string][]types2.Datapoint `json:"volumeMetrics"` - Region string `json:"region"` - Preferences map[string]*string `json:"preferences"` - Loading bool `json:"loading"` -} - -type RightsizingEC2Instance struct { - InstanceType string `json:"instanceType"` - Region string `json:"region"` - Cost float64 `json:"cost"` - CostComponents map[string]float64 `json:"costComponents"` - Processor string `json:"processor"` - Architecture string `json:"architecture"` - VCPU int64 `json:"vCPU"` - Memory float64 `json:"memory"` - EBSBandwidth string `json:"ebsBandwidth"` - EBSIops string `json:"ebsIops"` - NetworkThroughput string `json:"networkThroughput"` - ENASupported string `json:"enaSupported"` - LicensePrice float64 `json:"licensePrice"` - License string `json:"license"` -} - -type Usage struct { - Avg *float64 - Min *float64 - Max *float64 - Last *types2.Datapoint -} - -type RightSizingRecommendation struct { - Current RightsizingEC2Instance `json:"current"` - Recommended *RightsizingEC2Instance `json:"recommended"` - - VCPU Usage `json:"vCPU"` - Memory Usage `json:"memory"` - EBSBandwidth Usage `json:"ebsBandwidth"` - EBSIops Usage `json:"ebsIops"` - NetworkThroughput Usage `json:"networkThroughput"` - - Description string `json:"description"` -} - -type RightsizingEBSVolume struct { - Tier types.VolumeType `json:"tier"` - VolumeSize *int32 `json:"volumeSize"` - BaselineIOPS int32 `json:"baselineIOPS"` - ProvisionedIOPS *int32 `json:"provisionedIOPS"` - BaselineThroughput float64 `json:"baselineThroughput"` - ProvisionedThroughput *float64 `json:"provisionedThroughput"` - Cost float64 `json:"cost"` - CostComponents map[string]float64 `json:"costComponents"` -} - -type EBSVolumeRecommendation struct { - Current RightsizingEBSVolume - Recommended *RightsizingEBSVolume - - IOPS Usage `json:"iops"` - Throughput Usage `json:"throughput"` - - Description string `json:"description"` -} - -type EC2InstanceWastageResponse struct { - RightSizing RightSizingRecommendation `json:"rightSizing"` - VolumeRightSizing map[string]EBSVolumeRecommendation `json:"volumes"` -} diff --git a/services/wastage/api/entity/aws_rds.go b/services/wastage/api/entity/aws_rds.go deleted file mode 100644 index 195dd93e2..000000000 --- a/services/wastage/api/entity/aws_rds.go +++ /dev/null @@ -1,104 +0,0 @@ -package entity - -import types2 "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types" - -type AwsRdsClusterType string - -const ( - AwsRdsClusterTypeSingleInstance AwsRdsClusterType = "Single-AZ" - AwsRdsClusterTypeMultiAzOneInstance AwsRdsClusterType = "Multi-AZ" - AwsRdsClusterTypeMultiAzTwoInstance AwsRdsClusterType = "Multi-AZ (readable standbys)" -) - -type AwsRds struct { - HashedInstanceId string `json:"hashedInstanceId"` - AvailabilityZone string `json:"availabilityZone"` - InstanceType string `json:"instanceType"` - Engine string `json:"engine"` - EngineVersion string `json:"engineVersion"` - LicenseModel string `json:"licenseModel"` - BackupRetentionPeriod *int32 `json:"backupRetentionPeriod"` - ClusterType AwsRdsClusterType `json:"clusterType"` - PerformanceInsightsEnabled bool `json:"performanceInsightsEnabled"` - PerformanceInsightsRetentionPeriod *int32 `json:"performanceInsightsRetentionPeriod"` - - StorageType *string `json:"storageType"` - StorageSize *int32 `json:"storageSize"` - StorageIops *int32 `json:"storageIops"` - StorageThroughput *float64 `json:"storageThroughput"` -} - -type RightsizingAwsRds struct { - Region string `json:"region"` - InstanceType string `json:"instanceType"` - Engine string `json:"engine"` - EngineVersion string `json:"engineVersion"` - ClusterType AwsRdsClusterType `json:"clusterType"` - Processor string `json:"processor"` - Architecture string `json:"architecture"` - VCPU int64 `json:"vCPU"` - MemoryGb int64 `json:"memoryGb"` - - StorageType *string `json:"storageType"` - StorageSize *int32 `json:"storageSize"` - StorageIops *int32 `json:"storageIops"` - StorageThroughput *float64 `json:"storageThroughput"` - - Cost float64 `json:"cost"` - CostComponents map[string]float64 `json:"costComponents"` - ComputeCost float64 `json:"computeCost"` - ComputeCostComponents map[string]float64 `json:"computeCostComponents"` - StorageCost float64 `json:"storageCost"` - StorageCostComponents map[string]float64 `json:"storageCostComponents"` -} - -type AwsRdsRightsizingRecommendation struct { - Current RightsizingAwsRds `json:"current"` - Recommended *RightsizingAwsRds `json:"recommended"` - - VCPU Usage `json:"vCPU"` - FreeMemoryBytes Usage `json:"freeMemoryBytes"` - FreeStorageBytes Usage `json:"freeStorageBytes"` - NetworkThroughputBytes Usage `json:"networkThroughputBytes"` - StorageIops Usage `json:"storageIops"` - StorageThroughput Usage `json:"storageThroughputBytes"` - VolumeBytesUsed Usage `json:"volumeBytesUsed"` - - Description string `json:"description"` -} - -type AwsRdsWastageRequest struct { - RequestId *string `json:"requestId"` - CliVersion *string `json:"cliVersion"` - Identification map[string]string `json:"identification"` - Instance AwsRds `json:"instance"` - Metrics map[string][]types2.Datapoint `json:"metrics"` - Region string `json:"region"` - Preferences map[string]*string `json:"preferences"` - Loading bool `json:"loading"` -} - -type AwsRdsWastageResponse struct { - RightSizing AwsRdsRightsizingRecommendation `json:"rightSizing"` -} - -type AwsRdsCluster struct { - HashedClusterId string `json:"hashedClusterId"` - Engine string `json:"engine"` -} - -type AwsClusterWastageRequest struct { - RequestId *string `json:"requestId"` - CliVersion *string `json:"cliVersion"` - Identification map[string]string `json:"identification"` - Cluster AwsRdsCluster `json:"cluster"` - Instances []AwsRds `json:"instances"` - Metrics map[string]map[string][]types2.Datapoint `json:"metrics"` - Region string `json:"region"` - Preferences map[string]*string `json:"preferences"` - Loading bool `json:"loading"` -} - -type AwsClusterWastageResponse struct { - RightSizing map[string]AwsRdsRightsizingRecommendation `json:"rightSizing"` -} diff --git a/services/wastage/api/entity/azure_vm.go b/services/wastage/api/entity/azure_vm.go deleted file mode 100644 index b873d69bb..000000000 --- a/services/wastage/api/entity/azure_vm.go +++ /dev/null @@ -1,8 +0,0 @@ -package entity - -type AzureVM struct { - Id string `json:"id"` - Zone string `json:"zone"` - Region string `json:"region"` - InstanceType string `json:"instance_type"` -} diff --git a/services/wastage/api/entity/configuration.go b/services/wastage/api/entity/configuration.go deleted file mode 100644 index b39166c03..000000000 --- a/services/wastage/api/entity/configuration.go +++ /dev/null @@ -1,7 +0,0 @@ -package entity - -type Configuration struct { - EC2LazyLoad int `json:"ec2LazyLoad"` - RDSLazyLoad int `json:"rdsLazyLoad"` - KubernetesLazyLoad int `json:"kubernetesLazyLoad"` -} diff --git a/services/wastage/api/entity/gcp_compute.go b/services/wastage/api/entity/gcp_compute.go deleted file mode 100644 index 519a47ab1..000000000 --- a/services/wastage/api/entity/gcp_compute.go +++ /dev/null @@ -1,88 +0,0 @@ -package entity - -import "time" - -type GcpComputeInstance struct { - HashedInstanceId string `json:"hashedInstanceId"` - Zone string `json:"zone"` - MachineType string `json:"machineType"` -} - -type GcpComputeDisk struct { - HashedDiskId string `json:"hashedDiskId"` - Zone string `json:"zone"` - Region string `json:"region"` - DiskType string `json:"diskType"` - DiskSize *int64 `json:"diskSize"` - ProvisionedIops *int64 `json:"provisionedIops"` -} - -type RightsizingGcpComputeInstance struct { - Zone string `json:"zone"` - Region string `json:"region"` - MachineType string `json:"machineType"` - MachineFamily string `json:"machineFamily"` - CPU int64 `json:"cpu"` - MemoryMb int64 `json:"memoryMb"` - - Cost float64 `json:"cost"` -} - -type GcpComputeInstanceRightsizingRecommendation struct { - Current RightsizingGcpComputeInstance `json:"current"` - Recommended *RightsizingGcpComputeInstance `json:"recommended"` - - CPU Usage `json:"cpu"` - Memory Usage `json:"memory"` - - Description string `json:"description"` -} - -type GcpComputeInstanceWastageRequest struct { - RequestId *string `json:"requestId"` - CliVersion *string `json:"cliVersion"` - Identification map[string]string `json:"identification"` - Instance GcpComputeInstance `json:"instance"` - Disks []GcpComputeDisk `json:"disks"` - Metrics map[string][]Datapoint `json:"metrics"` - DisksMetrics map[string]map[string][]Datapoint `json:"diskMetrics"` - Region string `json:"region"` - Preferences map[string]*string `json:"preferences"` - Loading bool `json:"loading"` -} - -type RightsizingGcpComputeDisk struct { - Zone string `json:"zone"` - Region string `json:"region"` - DiskType string `json:"diskType"` - DiskSize int64 `json:"diskSize"` - ReadIopsLimit int64 `json:"readIopsLimit"` - WriteIopsLimit int64 `json:"writeIopsLimit"` - ReadThroughputLimit float64 `json:"readThroughputLimit"` - WriteThroughputLimit float64 `json:"writeThroughputLimit"` - - Cost float64 `json:"cost"` -} - -type GcpComputeDiskRecommendation struct { - Current RightsizingGcpComputeDisk - Recommended *RightsizingGcpComputeDisk - - ReadIops Usage `json:"readIops"` - WriteIops Usage `json:"writeIops"` - ReadThroughput Usage `json:"readThroughput"` - WriteThroughput Usage `json:"writeThroughput"` - - Description string `json:"description"` -} - -type GcpComputeInstanceWastageResponse struct { - RightSizing GcpComputeInstanceRightsizingRecommendation `json:"rightSizing"` - VolumeRightSizing map[string]GcpComputeDiskRecommendation `json:"volumes"` -} - -type Datapoint struct { - StartTime time.Time - EndTime time.Time - Value float64 -} diff --git a/services/wastage/api/entity/kubernetes_pod.go b/services/wastage/api/entity/kubernetes_pod.go deleted file mode 100644 index 687ea4095..000000000 --- a/services/wastage/api/entity/kubernetes_pod.go +++ /dev/null @@ -1,55 +0,0 @@ -package entity - -import ( - corev1 "k8s.io/api/core/v1" -) - -type RightsizingKubernetesContainer struct { - Name string `json:"name"` - - MemoryRequest float64 `json:"memoryRequest"` - MemoryLimit float64 `json:"memoryLimit"` - - CPURequest float64 `json:"cpuRequest"` - CPULimit float64 `json:"cpuLimit"` -} - -type KubernetesContainerRightsizingRecommendation struct { - Name string `json:"name"` - - Current RightsizingKubernetesContainer `json:"current"` - Recommended *RightsizingKubernetesContainer `json:"recommended"` - - MemoryTrimmedMean *float64 `json:"memoryTrimmedMean"` - MemoryMax *float64 `json:"memoryMax"` - CPUTrimmedMean *float64 `json:"cpuTrimmedMean"` - CPUMax *float64 `json:"cpuMax"` - - Description string `json:"description"` -} - -type KubernetesPodRightsizingRecommendation struct { - Name string `json:"name"` - - ContainersRightsizing []KubernetesContainerRightsizingRecommendation `json:"containersRightsizing"` -} - -type KubernetesContainerMetrics struct { - CPU map[string]float64 `json:"cpu"` - Memory map[string]float64 `json:"memory"` -} - -type KubernetesPodWastageRequest struct { - RequestId *string `json:"requestId"` - CliVersion *string `json:"cliVersion"` - Identification map[string]string `json:"identification"` - Pod corev1.Pod `json:"pod"` - Namespace string `json:"namespace"` - Preferences map[string]*string `json:"preferences"` - Metrics map[string]KubernetesContainerMetrics `json:"metrics"` // container name -> metrics - Loading bool `json:"loading"` -} - -type KubernetesPodWastageResponse struct { - RightSizing KubernetesPodRightsizingRecommendation `json:"rightSizing"` -} diff --git a/services/wastage/api/wastage/api.go b/services/wastage/api/wastage/api.go deleted file mode 100644 index 70ddc9295..000000000 --- a/services/wastage/api/wastage/api.go +++ /dev/null @@ -1,1064 +0,0 @@ -package wastage - -import ( - "encoding/json" - "errors" - "fmt" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" - "github.com/alitto/pond" - "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types" - types2 "github.com/aws/aws-sdk-go-v2/service/ec2/types" - "github.com/google/uuid" - "github.com/labstack/echo/v4" - "github.com/opengovern/og-util/pkg/api" - "github.com/opengovern/og-util/pkg/httpclient" - "github.com/opengovern/og-util/pkg/httpserver" - "github.com/opengovern/opengovernance/pkg/utils" - "github.com/opengovern/opengovernance/services/wastage/api/entity" - "github.com/opengovern/opengovernance/services/wastage/api/wastage/limit" - "github.com/opengovern/opengovernance/services/wastage/config" - "github.com/opengovern/opengovernance/services/wastage/cost" - "github.com/opengovern/opengovernance/services/wastage/db/model" - "github.com/opengovern/opengovernance/services/wastage/db/repo" - "github.com/opengovern/opengovernance/services/wastage/ingestion" - "github.com/opengovern/opengovernance/services/wastage/recommendation" - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/propagation" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" - "golang.org/x/mod/semver" - "golang.org/x/net/context" - "math" - "net/http" - "strconv" - "strings" - "time" -) - -type API struct { - cfg config.WastageConfig - tracer trace.Tracer - logger *zap.Logger - blobClient *azblob.Client - blobWorkerPool *pond.WorkerPool - costSvc *cost.Service - usageRepo repo.UsageV2Repo - usageV1Repo repo.UsageRepo - userRepo repo.UserRepo - orgRepo repo.OrganizationRepo - recomSvc *recommendation.Service - ingestionSvc *ingestion.Service - limitsSvc *limit.Service -} - -func New(cfg config.WastageConfig, blobClient *azblob.Client, blobWorkerPool *pond.WorkerPool, costSvc *cost.Service, recomSvc *recommendation.Service, ingestionService *ingestion.Service, limitsSvc *limit.Service, usageV1Repo repo.UsageRepo, usageRepo repo.UsageV2Repo, userRepo repo.UserRepo, orgRepo repo.OrganizationRepo, logger *zap.Logger) API { - return API{ - cfg: cfg, - blobClient: blobClient, - blobWorkerPool: blobWorkerPool, - costSvc: costSvc, - recomSvc: recomSvc, - usageRepo: usageRepo, - usageV1Repo: usageV1Repo, - userRepo: userRepo, - orgRepo: orgRepo, - ingestionSvc: ingestionService, - limitsSvc: limitsSvc, - tracer: otel.GetTracerProvider().Tracer("wastage.http.sources"), - logger: logger.Named("wastage-api"), - } -} - -func (s API) Register(e *echo.Echo) { - g := e.Group("/api/v1/wastage") - g.POST("/configuration", s.Configuration) - g.POST("/ec2-instance", httpserver.AuthorizeHandler(s.EC2Instance, api.ViewerRole)) - g.POST("/aws-rds", httpserver.AuthorizeHandler(s.AwsRDS, api.ViewerRole)) - g.POST("/aws-rds-cluster", httpserver.AuthorizeHandler(s.AwsRDSCluster, api.ViewerRole)) - i := e.Group("/api/v1/wastage-ingestion") - i.PUT("/ingest/:service", httpserver.AuthorizeHandler(s.TriggerIngest, api.AdminRole)) - i.GET("/usages/:id", httpserver.AuthorizeHandler(s.GetUsage, api.AdminRole)) - i.GET("/usages/accountID", httpserver.AuthorizeHandler(s.GetUsageIDByAccountID, api.AdminRole)) - i.GET("/usages/accountID/last", httpserver.AuthorizeHandler(s.GetLastUsageIDByAccountID, api.AdminRole)) - i.PUT("/usages/migrate", s.MigrateUsages) - i.PUT("/usages/migrate/v2", s.MigrateUsagesV2) - i.PUT("/usages/fill-rds-costs", s.FillRdsCosts) - i.POST("/user", httpserver.AuthorizeHandler(s.CreateUser, api.AdminRole)) - i.PUT("/user/:userId", httpserver.AuthorizeHandler(s.UpdateUser, api.AdminRole)) - i.POST("/organization", httpserver.AuthorizeHandler(s.CreateOrganization, api.AdminRole)) - i.PUT("/organization/:organizationId", httpserver.AuthorizeHandler(s.UpdateOrganization, api.AdminRole)) -} - -func (s API) Configuration(c echo.Context) error { - return c.JSON(http.StatusOK, entity.Configuration{ - EC2LazyLoad: 20, - RDSLazyLoad: 20, - KubernetesLazyLoad: 10000, - }) -} - -// EC2Instance godoc -// -// @Summary List wastage in EC2 Instances -// @Description List wastage in EC2 Instances -// @Security BearerToken -// @Tags wastage -// @Produce json -// @Param request body entity.EC2InstanceWastageRequest true "Request" -// @Success 200 {object} entity.EC2InstanceWastageResponse -// @Router /wastage/api/v1/wastage/ec2-instance [post] -func (s API) EC2Instance(echoCtx echo.Context) error { - ctx := echoCtx.Request().Context() - start := time.Now() - ctx = otel.GetTextMapPropagator().Extract(ctx, propagation.HeaderCarrier(echoCtx.Request().Header)) - ctx, span := s.tracer.Start(ctx, "get") - defer span.End() - - var req entity.EC2InstanceWastageRequest - if err := echoCtx.Bind(&req); err != nil { - return echo.NewHTTPError(http.StatusBadRequest, err.Error()) - } - if err := echoCtx.Validate(&req); err != nil { - return echo.NewHTTPError(http.StatusBadRequest, err.Error()) - } - - var resp entity.EC2InstanceWastageResponse - var err error - - stats := model.Statistics{ - AccountID: req.Identification["account"], - OrgEmail: req.Identification["org_m_email"], - ResourceID: req.Instance.HashedInstanceId, - Auth0UserId: httpserver.GetUserID(echoCtx), - } - statsOut, _ := json.Marshal(stats) - - fullReqJson, _ := json.Marshal(req) - metrics := req.Metrics - volMetrics := req.VolumeMetrics - req.Metrics = nil - req.VolumeMetrics = nil - trimmedReqJson, _ := json.Marshal(req) - req.Metrics = metrics - req.VolumeMetrics = volMetrics - - if req.RequestId == nil { - id := uuid.New().String() - req.RequestId = &id - } - - s.blobWorkerPool.Submit(func() { - _, err = s.blobClient.UploadBuffer(context.Background(), s.cfg.AzBlob.Container, fmt.Sprintf("ec2-instance/%s.json", *req.RequestId), fullReqJson, &azblob.UploadBufferOptions{AccessTier: utils.GetPointer(blob.AccessTierCold)}) - if err != nil { - s.logger.Error("failed to upload usage to blob storage", zap.Error(err)) - } - }) - - usage := model.UsageV2{ - ApiEndpoint: "ec2-instance", - Request: trimmedReqJson, - RequestId: req.RequestId, - CliVersion: req.CliVersion, - Response: nil, - FailureMessage: nil, - Statistics: statsOut, - } - err = s.usageRepo.Create(&usage) - if err != nil { - return err - } - - defer func() { - if err != nil { - fmsg := err.Error() - usage.FailureMessage = &fmsg - } else { - usage.Response, _ = json.Marshal(resp) - id := uuid.New() - responseId := id.String() - usage.ResponseId = &responseId - - recom := entity.RightsizingEC2Instance{} - if resp.RightSizing.Recommended != nil { - recom = *resp.RightSizing.Recommended - } - - instanceCost := resp.RightSizing.Current.Cost - recomInstanceCost := recom.Cost - - volumeCurrentCost := 0.0 - volumeRecomCost := 0.0 - for _, v := range resp.VolumeRightSizing { - volumeCurrentCost += v.Current.Cost - if v.Recommended != nil { - volumeRecomCost += v.Recommended.Cost - } - } - - stats.CurrentCost = instanceCost + volumeCurrentCost - stats.RecommendedCost = recomInstanceCost + volumeRecomCost - stats.Savings = (instanceCost + volumeCurrentCost) - (recomInstanceCost + volumeRecomCost) - stats.EC2InstanceCurrentCost = instanceCost - stats.EC2InstanceRecommendedCost = recomInstanceCost - stats.EC2InstanceSavings = instanceCost - recomInstanceCost - stats.EBSCurrentCost = volumeCurrentCost - stats.EBSRecommendedCost = volumeRecomCost - stats.EBSSavings = volumeCurrentCost - volumeRecomCost - stats.EBSVolumeCount = len(resp.VolumeRightSizing) - - statsOut, _ := json.Marshal(stats) - usage.Statistics = statsOut - } - err = s.usageRepo.Update(usage.ID, usage) - if err != nil { - s.logger.Error("failed to update usage", zap.Error(err), zap.Any("usage", usage)) - } - }() - - if req.Instance.State != types2.InstanceStateNameRunning { - err = echo.NewHTTPError(http.StatusBadRequest, "instance is not running") - return err - } - - if req.Loading { - return echoCtx.JSON(http.StatusOK, entity.EC2InstanceWastageResponse{}) - } - - usageAverageType := recommendation.UsageAverageTypeMax - if req.CliVersion == nil || semver.Compare("v"+*req.CliVersion, "v0.5.2") < 0 { - return echo.NewHTTPError(http.StatusBadRequest, "plugin version is no longer supported - please update to the latest version") - } - - userId := httpserver.GetUserID(echoCtx) - ok, err := s.limitsSvc.CheckAccountsLimit(ctx, userId, req.Identification["org_m_email"], req.Identification["account"]) - if err != nil { - s.logger.Error("failed to check profile limit", zap.Error(err)) - return err - } - if !ok { - err = s.limitsSvc.CheckPremiumAndSendErr(ctx, userId, req.Identification["org_m_email"], "profile") - if err != nil { - return err - } - } - - ok, err = s.limitsSvc.CheckEC2InstanceLimit(ctx, userId, req.Identification["org_m_email"]) - if err != nil { - s.logger.Error("failed to check aws ec2 instance limit", zap.Error(err)) - return err - } - if !ok { - err = s.limitsSvc.CheckPremiumAndSendErr(ctx, userId, req.Identification["org_m_email"], "ec2 instance") - if err != nil { - return err - } - } - - ec2RightSizingRecom, err := s.recomSvc.EC2InstanceRecommendation(ctx, req.Region, req.Instance, req.Volumes, req.Metrics, req.VolumeMetrics, req.Preferences, usageAverageType) - if err != nil { - err = fmt.Errorf("failed to get ec2 instance recommendation: %s", err.Error()) - return err - } - - ebsRightSizingRecoms := make(map[string]entity.EBSVolumeRecommendation) - for _, vol := range req.Volumes { - //ok, err := checkEBSVolumeLimit(s.usageRepo, httpserver.GetUserID(c), req.Identification["org_m_email"]) - //if err != nil { - // s.logger.Error("failed to check aws ebs volume limit", zap.Error(err)) - // return err - //} - //if !ok { - // err = s.checkPremiumAndSendErr(c, req.Identification["org_m_email"], "ebs volume") - // if err != nil { - // return err - // } - //} - var ebsRightSizingRecom *entity.EBSVolumeRecommendation - ebsRightSizingRecom, err = s.recomSvc.EBSVolumeRecommendation(ctx, req.Region, vol, req.VolumeMetrics[vol.HashedVolumeId], req.Preferences, usageAverageType) - if err != nil { - err = fmt.Errorf("failed to get ebs volume %s recommendation: %s", vol.HashedVolumeId, err.Error()) - return err - } - ebsRightSizingRecoms[vol.HashedVolumeId] = *ebsRightSizingRecom - } - elapsed := time.Since(start).Seconds() - usage.Latency = &elapsed - err = s.usageRepo.Update(usage.ID, usage) - if err != nil { - s.logger.Error("failed to update usage", zap.Error(err), zap.Any("usage", usage)) - } - - // DO NOT change this, resp is used in updating usage - resp = entity.EC2InstanceWastageResponse{ - RightSizing: *ec2RightSizingRecom, - VolumeRightSizing: ebsRightSizingRecoms, - } - // DO NOT change this, resp is used in updating usage - return echoCtx.JSON(http.StatusOK, resp) -} - -// AwsRDS godoc -// -// @Summary List wastage in AWS RDS -// @Description List wastage in AWS RDS -// @Security BearerToken -// @Tags wastage -// @Produce json -// @Param request body entity.AwsRdsWastageRequest true "Request" -// @Success 200 {object} entity.AwsRdsWastageResponse -// @Router /wastage/api/v1/wastage/aws-rds [post] -func (s API) AwsRDS(echoCtx echo.Context) error { - ctx := echoCtx.Request().Context() - start := time.Now() - ctx = otel.GetTextMapPropagator().Extract(ctx, propagation.HeaderCarrier(echoCtx.Request().Header)) - ctx, span := s.tracer.Start(ctx, "get") - defer span.End() - - var req entity.AwsRdsWastageRequest - if err := echoCtx.Bind(&req); err != nil { - return echo.NewHTTPError(http.StatusBadRequest, err.Error()) - } - if err := echoCtx.Validate(&req); err != nil { - return echo.NewHTTPError(http.StatusBadRequest, err.Error()) - } - - var resp entity.AwsRdsWastageResponse - var err error - - stats := model.Statistics{ - AccountID: req.Identification["account"], - OrgEmail: req.Identification["org_m_email"], - ResourceID: req.Instance.HashedInstanceId, - Auth0UserId: httpserver.GetUserID(echoCtx), - } - statsOut, _ := json.Marshal(stats) - - fullReqJson, _ := json.Marshal(req) - metrics := req.Metrics - req.Metrics = nil - trimmedReqJson, _ := json.Marshal(req) - req.Metrics = metrics - - if req.RequestId == nil { - id := uuid.New().String() - req.RequestId = &id - } - - s.blobWorkerPool.Submit(func() { - _, err = s.blobClient.UploadBuffer(context.Background(), s.cfg.AzBlob.Container, fmt.Sprintf("aws-rds/%s.json", *req.RequestId), fullReqJson, &azblob.UploadBufferOptions{AccessTier: utils.GetPointer(blob.AccessTierCold)}) - if err != nil { - s.logger.Error("failed to upload usage to blob storage", zap.Error(err)) - } - }) - usage := model.UsageV2{ - ApiEndpoint: "aws-rds", - Request: trimmedReqJson, - RequestId: req.RequestId, - CliVersion: req.CliVersion, - Response: nil, - FailureMessage: nil, - Statistics: statsOut, - } - err = s.usageRepo.Create(&usage) - if err != nil { - s.logger.Error("failed to create usage", zap.Error(err)) - return err - } - - defer func() { - if err != nil { - fmsg := err.Error() - usage.FailureMessage = &fmsg - } else { - usage.Response, _ = json.Marshal(resp) - id := uuid.New() - responseId := id.String() - usage.ResponseId = &responseId - - recom := entity.RightsizingAwsRds{} - if resp.RightSizing.Recommended != nil { - recom = *resp.RightSizing.Recommended - } - stats.CurrentCost = resp.RightSizing.Current.Cost - stats.RecommendedCost = recom.Cost - stats.Savings = resp.RightSizing.Current.Cost - recom.Cost - stats.RDSInstanceCurrentCost = resp.RightSizing.Current.Cost - stats.RDSInstanceRecommendedCost = recom.Cost - stats.RDSInstanceSavings = resp.RightSizing.Current.Cost - recom.Cost - - statsOut, _ := json.Marshal(stats) - usage.Statistics = statsOut - } - err = s.usageRepo.Update(usage.ID, usage) - if err != nil { - s.logger.Error("failed to update usage", zap.Error(err), zap.Any("usage", usage)) - } - }() - if req.Loading { - return echoCtx.JSON(http.StatusOK, entity.AwsRdsWastageResponse{}) - } - - usageAverageType := recommendation.UsageAverageTypeMax - if req.CliVersion == nil || semver.Compare("v"+*req.CliVersion, "v0.5.2") < 0 { - return echo.NewHTTPError(http.StatusBadRequest, "plugin version is no longer supported - please update to the latest version") - } - - userId := httpserver.GetUserID(echoCtx) - - ok, err := s.limitsSvc.CheckAccountsLimit(ctx, userId, req.Identification["org_m_email"], req.Identification["account"]) - if err != nil { - s.logger.Error("failed to check profile limit", zap.Error(err)) - return err - } - if !ok { - err = s.limitsSvc.CheckPremiumAndSendErr(ctx, userId, req.Identification["org_m_email"], "profile") - if err != nil { - return err - } - } - - ok, err = s.limitsSvc.CheckRDSInstanceLimit(ctx, userId, req.Identification["org_m_email"]) - if err != nil { - s.logger.Error("failed to check aws rds instance limit", zap.Error(err)) - return err - } - if !ok { - err = s.limitsSvc.CheckPremiumAndSendErr(ctx, userId, req.Identification["org_m_email"], "rds instance") - if err != nil { - return err - } - } - - rdsRightSizingRecom, err := s.recomSvc.AwsRdsRecommendation(ctx, req.Region, req.Instance, req.Metrics, req.Preferences, usageAverageType) - if err != nil { - s.logger.Error("failed to get aws rds recommendation", zap.Error(err)) - return err - } - - elapsed := time.Since(start).Seconds() - usage.Latency = &elapsed - err = s.usageRepo.Update(usage.ID, usage) - if err != nil { - s.logger.Error("failed to update usage", zap.Error(err), zap.Any("usage", usage)) - } - - // DO NOT change this, resp is used in updating usage - resp = entity.AwsRdsWastageResponse{ - RightSizing: *rdsRightSizingRecom, - } - // DO NOT change this, resp is used in updating usage - return echoCtx.JSON(http.StatusOK, resp) -} - -// AwsRDSCluster godoc -// -// @Summary List wastage in AWS RDS Cluster -// @Description List wastage in AWS RDS Cluster -// @Security BearerToken -// @Tags wastage -// @Produce json -// @Param request body entity.AwsClusterWastageRequest true "Request" -// @Success 200 {object} entity.AwsClusterWastageResponse -// @Router /wastage/api/v1/wastage/aws-rds-cluster [post] -func (s API) AwsRDSCluster(echoCtx echo.Context) error { - ctx := echoCtx.Request().Context() - start := time.Now() - ctx = otel.GetTextMapPropagator().Extract(ctx, propagation.HeaderCarrier(echoCtx.Request().Header)) - ctx, span := s.tracer.Start(ctx, "get") - defer span.End() - - var req entity.AwsClusterWastageRequest - if err := echoCtx.Bind(&req); err != nil { - return echo.NewHTTPError(http.StatusBadRequest, err.Error()) - } - if err := echoCtx.Validate(&req); err != nil { - return echo.NewHTTPError(http.StatusBadRequest, err.Error()) - } - - var resp entity.AwsClusterWastageResponse - var err error - - stats := model.Statistics{ - AccountID: req.Identification["account"], - OrgEmail: req.Identification["org_m_email"], - ResourceID: req.Cluster.HashedClusterId, - Auth0UserId: httpserver.GetUserID(echoCtx), - } - statsOut, _ := json.Marshal(stats) - - fullReqJson, _ := json.Marshal(req) - metrics := req.Metrics - req.Metrics = nil - trimmedReqJson, _ := json.Marshal(req) - req.Metrics = metrics - - if req.RequestId == nil { - id := uuid.New().String() - req.RequestId = &id - } - - s.blobWorkerPool.Submit(func() { - _, err = s.blobClient.UploadBuffer(context.Background(), s.cfg.AzBlob.Container, fmt.Sprintf("aws-rds-cluster/%s.json", *req.RequestId), fullReqJson, &azblob.UploadBufferOptions{AccessTier: utils.GetPointer(blob.AccessTierCold)}) - if err != nil { - s.logger.Error("failed to upload usage to blob storage", zap.Error(err)) - } - }) - usage := model.UsageV2{ - ApiEndpoint: "aws-rds-cluster", - Request: trimmedReqJson, - RequestId: req.RequestId, - CliVersion: req.CliVersion, - Response: nil, - FailureMessage: nil, - Statistics: statsOut, - } - err = s.usageRepo.Create(&usage) - if err != nil { - s.logger.Error("failed to create usage", zap.Error(err)) - return err - } - - defer func() { - if err != nil { - fmsg := err.Error() - usage.FailureMessage = &fmsg - } else { - usage.Response, _ = json.Marshal(resp) - id := uuid.New() - responseId := id.String() - usage.ResponseId = &responseId - - recom := entity.RightsizingAwsRds{} - for _, instance := range resp.RightSizing { - recom.Region = instance.Recommended.Region - recom.InstanceType = instance.Recommended.InstanceType - recom.Engine = instance.Recommended.Engine - recom.EngineVersion = instance.Recommended.EngineVersion - recom.ClusterType = instance.Recommended.ClusterType - recom.VCPU += instance.Recommended.VCPU - recom.MemoryGb += instance.Recommended.MemoryGb - recom.StorageType = instance.Recommended.StorageType - recom.StorageSize = instance.Recommended.StorageSize - recom.StorageIops = instance.Recommended.StorageIops - recom.StorageThroughput = instance.Recommended.StorageThroughput - - recom.Cost += instance.Recommended.Cost - recom.ComputeCost += instance.Recommended.ComputeCost - recom.StorageCost += instance.Recommended.StorageCost - - stats.CurrentCost += instance.Current.Cost - stats.RDSInstanceCurrentCost += instance.Current.Cost - } - stats.Savings = stats.CurrentCost - recom.Cost - stats.RDSInstanceSavings = stats.CurrentCost - recom.Cost - stats.RecommendedCost = recom.Cost - stats.RDSInstanceRecommendedCost = recom.Cost - - statsOut, _ := json.Marshal(stats) - usage.Statistics = statsOut - } - err = s.usageRepo.Update(usage.ID, usage) - if err != nil { - s.logger.Error("failed to update usage", zap.Error(err), zap.Any("usage", usage)) - } - }() - if req.Loading { - return echoCtx.JSON(http.StatusOK, entity.AwsClusterWastageResponse{}) - } - - usageAverageType := recommendation.UsageAverageTypeMax - if req.CliVersion == nil || semver.Compare("v"+*req.CliVersion, "v0.5.2") < 0 { - return echo.NewHTTPError(http.StatusBadRequest, "plugin version is no longer supported - please update to the latest version") - } - - resp = entity.AwsClusterWastageResponse{ - RightSizing: make(map[string]entity.AwsRdsRightsizingRecommendation), - } - - userId := httpserver.GetUserID(echoCtx) - - ok, err := s.limitsSvc.CheckAccountsLimit(ctx, userId, req.Identification["org_m_email"], req.Identification["account"]) - if err != nil { - s.logger.Error("failed to check profile limit", zap.Error(err)) - return err - } - if !ok { - err = s.limitsSvc.CheckPremiumAndSendErr(ctx, userId, req.Identification["org_m_email"], "profile") - if err != nil { - return err - } - } - - ok, err = s.limitsSvc.CheckRDSClusterLimit(ctx, userId, req.Identification["org_m_email"]) - if err != nil { - s.logger.Error("failed to check aws rds cluster limit", zap.Error(err)) - return err - } - if !ok { - err = s.limitsSvc.CheckPremiumAndSendErr(ctx, userId, req.Identification["org_m_email"], "rds cluster") - if err != nil { - return err - } - } - - var aggregatedInstance *entity.AwsRds - var aggregatedMetrics map[string][]types.Datapoint - for _, instance := range req.Instances { - instance := instance - rdsRightSizingRecom, err2 := s.recomSvc.AwsRdsRecommendation(ctx, req.Region, instance, req.Metrics[instance.HashedInstanceId], req.Preferences, usageAverageType) - if err2 != nil { - s.logger.Error("failed to get aws rds recommendation", zap.Error(err)) - err = err2 - return err - } - resp.RightSizing[instance.HashedInstanceId] = *rdsRightSizingRecom - if aggregatedInstance == nil { - aggregatedInstance = &instance - } - if aggregatedMetrics == nil { - aggregatedMetrics = req.Metrics[instance.HashedInstanceId] - } else { - for key, value := range req.Metrics[instance.HashedInstanceId] { - switch key { - case "FreeableMemory", "FreeStorageSpace": - aggregatedMetrics[key] = recommendation.MergeDatapoints(aggregatedMetrics[key], value, func(aa, bb float64) float64 { return math.Min(aa, bb) }) - default: - aggregatedMetrics[key] = recommendation.MergeDatapoints(aggregatedMetrics[key], value, func(aa, bb float64) float64 { return math.Max(aa, bb) }) - } - } - } - } - if aggregatedInstance == nil { - return echoCtx.JSON(http.StatusBadRequest, "no instances found in the request") - } - rdsClusterRightSizingRecom, err := s.recomSvc.AwsRdsRecommendation(ctx, req.Region, *aggregatedInstance, aggregatedMetrics, req.Preferences, usageAverageType) - if err != nil { - s.logger.Error("failed to get aws rds recommendation", zap.Error(err)) - return err - } - - if !strings.Contains(strings.ToLower(req.Cluster.Engine), "aurora") { - for k, instance := range resp.RightSizing { - instance := instance - instance.Recommended = rdsClusterRightSizingRecom.Recommended - instance.Description = rdsClusterRightSizingRecom.Description - resp.RightSizing[k] = instance - } - } else { - // TODO Handle aurora storage somehow - } - - elapsed := time.Since(start).Seconds() - usage.Latency = &elapsed - err = s.usageRepo.Update(usage.ID, usage) - if err != nil { - s.logger.Error("failed to update usage", zap.Error(err), zap.Any("usage", usage)) - } - - return echoCtx.JSON(http.StatusOK, resp) -} - -func (s API) TriggerIngest(echoCtx echo.Context) error { - ctx := echoCtx.Request().Context() - ctx = otel.GetTextMapPropagator().Extract(ctx, propagation.HeaderCarrier(echoCtx.Request().Header)) - ctx, span := s.tracer.Start(ctx, "get") - defer span.End() - - service := echoCtx.Param("service") - - s.logger.Info(fmt.Sprintf("Ingester is going to be triggered for %s", service)) - - switch service { - case "aws-ec2-instance": - err := s.ingestionSvc.DataAgeRepo.Delete("AWS::EC2::Instance") - if err != nil { - s.logger.Error("failed to delete data age", zap.Error(err), zap.String("service", service)) - return err - } - s.logger.Info("deleted data age for AWS::EC2::Instance ingestion will be triggered soon") - case "aws-rds": - err := s.ingestionSvc.DataAgeRepo.Delete("AWS::RDS::Instance") - if err != nil { - s.logger.Error("failed to delete data age", zap.Error(err), zap.String("service", service)) - return err - } - s.logger.Info("deleted data age for AWS::RDS::Instance ingestion will be triggered soon") - case "gcp-compute-instance": - err := s.ingestionSvc.DataAgeRepo.Delete("GCPComputeEngine") - if err != nil { - s.logger.Error("failed to delete data age", zap.Error(err), zap.String("service", service)) - return err - } - s.logger.Info("deleted data age for GCPComputeEngine ingestion will be triggered soon") - default: - s.logger.Error(fmt.Sprintf("Service %s not supported", service)) - } - - s.logger.Info(fmt.Sprintf("Ingester triggered for %s", service)) - - return echoCtx.NoContent(http.StatusOK) -} - -func (s API) MigrateUsages(echoCtx echo.Context) error { - go func() { - ctx := context.Background() - s.logger.Info("Usage table migration started") - - for { - usage, err := s.usageV1Repo.GetRandomNotMoved() - if err != nil { - s.logger.Error("error while getting usage_v1 usages list", zap.Error(err)) - break - } - if usage == nil { - break - } - if usage.Endpoint == "aws-rds" { - var requestBody entity.AwsRdsWastageRequest - err = json.Unmarshal(usage.Request, &requestBody) - if err != nil { - s.logger.Error("failed to unmarshal request body", zap.Any("usage_id", usage.ID), zap.Error(err)) - continue - } - requestId := fmt.Sprintf("usage_v1_%v", usage.ID) - cliVersion := "unknown" - requestBody.RequestId = &requestId - requestBody.CliVersion = &cliVersion - - url := "https://api.kaytu.io/kaytu/wastage/api/v1/wastage/aws-rds" - - payload, err := json.Marshal(requestBody) - if err != nil { - s.logger.Error("failed to marshal request body", zap.Any("usage_id", usage.ID), zap.Error(err)) - continue - } - - if _, err := httpclient.DoRequest(ctx, http.MethodPost, url, httpclient.FromEchoContext(echoCtx).ToHeaders(), payload, nil); err != nil { - s.logger.Error("failed to rerun request", zap.Any("usage_id", usage.ID), zap.Error(err)) - } - - usage.Moved = true - err = s.usageV1Repo.Update(usage.ID, *usage) - if err != nil { - s.logger.Error("failed to update usage moved flag", zap.Any("usage_id", usage.ID), zap.Error(err)) - continue - } - } else { - var requestBody entity.EC2InstanceWastageRequest - err = json.Unmarshal(usage.Request, &requestBody) - if err != nil { - s.logger.Error("failed to unmarshal request body", zap.Any("usage_id", usage.ID), zap.Error(err)) - continue - } - requestId := fmt.Sprintf("usage_v1_%v", usage.ID) - cliVersion := "unknown" - requestBody.RequestId = &requestId - requestBody.CliVersion = &cliVersion - - url := "https://api.kaytu.io/kaytu/wastage/api/v1/wastage/ec2-instance" - - payload, err := json.Marshal(requestBody) - if err != nil { - s.logger.Error("failed to marshal request body", zap.Any("usage_id", usage.ID), zap.Error(err)) - continue - } - - if _, err := httpclient.DoRequest(ctx, http.MethodPost, url, httpclient.FromEchoContext(echoCtx).ToHeaders(), payload, nil); err != nil { - s.logger.Error("failed to rerun request", zap.Any("usage_id", usage.ID), zap.Error(err)) - } - - usage.Moved = true - err = s.usageV1Repo.Update(usage.ID, *usage) - if err != nil { - s.logger.Error("failed to update usage moved flag", zap.Any("usage_id", usage.ID), zap.Error(err)) - continue - } - } - } - - }() - - return echoCtx.NoContent(http.StatusOK) -} - -func (s API) MigrateUsagesV2(c echo.Context) error { - go func() { - //ctx := context.Background() - s.logger.Info("Usage table migration started") - - for { - usage, err := s.usageRepo.GetRandomNullStatistics() - if err != nil { - s.logger.Error("error while getting null statistic usages list", zap.Error(err)) - break - } - if usage == nil { - break - } - if usage.ApiEndpoint == "aws-rds" { - var requestBody entity.AwsRdsWastageRequest - var responseBody entity.AwsRdsWastageResponse - err = json.Unmarshal(usage.Request, &requestBody) - if err != nil { - s.logger.Error("failed to unmarshal request body", zap.Any("usage_id", usage.ID), zap.Error(err)) - continue - } - stats := model.Statistics{ - AccountID: requestBody.Identification["account"], - OrgEmail: requestBody.Identification["org_m_email"], - ResourceID: requestBody.Instance.HashedInstanceId, - } - - err = json.Unmarshal(usage.Response, &responseBody) - if err == nil { - recom := entity.RightsizingAwsRds{} - if responseBody.RightSizing.Recommended != nil { - recom = *responseBody.RightSizing.Recommended - } - stats.CurrentCost = responseBody.RightSizing.Current.Cost - stats.RecommendedCost = recom.Cost - stats.Savings = responseBody.RightSizing.Current.Cost - recom.Cost - stats.RDSInstanceCurrentCost = responseBody.RightSizing.Current.Cost - stats.RDSInstanceRecommendedCost = recom.Cost - stats.RDSInstanceSavings = responseBody.RightSizing.Current.Cost - recom.Cost - } - - out, err := json.Marshal(stats) - if err != nil { - s.logger.Error("failed to marshal stats", zap.Any("usage_id", usage.ID), zap.Error(err)) - continue - } - usage.Statistics = out - - err = s.usageRepo.Update(usage.ID, *usage) - if err != nil { - s.logger.Error("failed to update usage moved flag", zap.Any("usage_id", usage.ID), zap.Error(err)) - continue - } - } else { - var requestBody entity.EC2InstanceWastageRequest - var responseBody entity.EC2InstanceWastageResponse - err = json.Unmarshal(usage.Request, &requestBody) - if err != nil { - s.logger.Error("failed to unmarshal request body", zap.Any("usage_id", usage.ID), zap.Error(err)) - continue - } - stats := model.Statistics{ - AccountID: requestBody.Identification["account"], - OrgEmail: requestBody.Identification["org_m_email"], - ResourceID: requestBody.Instance.HashedInstanceId, - } - - err = json.Unmarshal(usage.Response, &responseBody) - if err == nil { - recom := entity.RightsizingEC2Instance{} - if responseBody.RightSizing.Recommended != nil { - recom = *responseBody.RightSizing.Recommended - } - - instanceCost := responseBody.RightSizing.Current.Cost - recomInstanceCost := recom.Cost - - volumeCurrentCost := 0.0 - volumeRecomCost := 0.0 - for _, v := range responseBody.VolumeRightSizing { - volumeCurrentCost += v.Current.Cost - if v.Recommended != nil { - volumeRecomCost += v.Recommended.Cost - } - } - - stats.CurrentCost = instanceCost + volumeCurrentCost - stats.RecommendedCost = recomInstanceCost + volumeRecomCost - stats.Savings = (instanceCost + volumeCurrentCost) - (recomInstanceCost + volumeRecomCost) - stats.EC2InstanceCurrentCost = instanceCost - stats.EC2InstanceRecommendedCost = recomInstanceCost - stats.EC2InstanceSavings = instanceCost - recomInstanceCost - stats.EBSCurrentCost = volumeCurrentCost - stats.EBSRecommendedCost = volumeRecomCost - stats.EBSSavings = volumeCurrentCost - volumeRecomCost - stats.EBSVolumeCount = len(responseBody.VolumeRightSizing) - } - - out, err := json.Marshal(stats) - if err != nil { - s.logger.Error("failed to marshal stats", zap.Any("usage_id", usage.ID), zap.Error(err)) - continue - } - usage.Statistics = out - - err = s.usageRepo.Update(usage.ID, *usage) - if err != nil { - s.logger.Error("failed to update usage moved flag", zap.Any("usage_id", usage.ID), zap.Error(err)) - continue - } - } - } - - }() - - return c.NoContent(http.StatusOK) -} - -func (s API) GetUsageIDByAccountID(echoCtx echo.Context) error { - accountId := echoCtx.QueryParam("accountID") - randomID := echoCtx.QueryParam("randomID") - auth0UserId := echoCtx.QueryParam("auth0UserId") - endpoint := echoCtx.QueryParam("endpoint") - - usage, err := s.usageRepo.GetByAccountID(endpoint, accountId, auth0UserId, randomID) - if err != nil { - return err - } - - return echoCtx.JSON(http.StatusOK, usage) -} - -func (s API) GetLastUsageIDByAccountID(echoCtx echo.Context) error { - accountId := echoCtx.QueryParam("accountID") - randomID := echoCtx.QueryParam("randomID") - auth0UserId := echoCtx.QueryParam("auth0UserId") - endpoint := echoCtx.QueryParam("endpoint") - groupByType := echoCtx.QueryParam("groupBy") - - usage, err := s.usageRepo.GetLastByAccountID(endpoint, accountId, auth0UserId, randomID, groupByType) - if err != nil { - return err - } - - return echoCtx.JSON(http.StatusOK, usage) -} - -func (s API) GetUsage(echoCtx echo.Context) error { - idStr := echoCtx.Param("id") - id, err := strconv.ParseUint(idStr, 10, 64) - if err != nil { - return err - } - - usage, err := s.usageRepo.Get(uint(id)) - if err != nil { - return err - } - - return echoCtx.JSON(http.StatusOK, usage) -} - -func (s API) FillRdsCosts(echoCtx echo.Context) error { - go func() { - //ctx := context.Background() - s.logger.Info("Filling RDS costs started") - - for { - usage, err := s.usageRepo.GetCostZero() - if err != nil { - s.logger.Error("error while getting null statistic usages list", zap.Error(err)) - break - } - if usage == nil { - break - } - if usage.ApiEndpoint == "aws-rds" { - var responseBody entity.AwsRdsWastageResponse - err = json.Unmarshal(usage.Response, &responseBody) - if err == nil { - responseBody.RightSizing.Current.Cost = responseBody.RightSizing.Current.ComputeCost + responseBody.RightSizing.Current.StorageCost - responseBody.RightSizing.Recommended.Cost = responseBody.RightSizing.Recommended.ComputeCost + responseBody.RightSizing.Recommended.StorageCost - } - - out, err := json.Marshal(responseBody) - if err != nil { - s.logger.Error("failed to marshal stats", zap.Any("usage_id", usage.ID), zap.Error(err)) - continue - } - usage.Response = out - - err = s.usageRepo.Update(usage.ID, *usage) - if err != nil { - s.logger.Error("failed to update usage moved flag", zap.Any("usage_id", usage.ID), zap.Error(err)) - continue - } - } - } - - }() - - return echoCtx.NoContent(http.StatusOK) -} - -func (s API) CreateUser(echoCtx echo.Context) error { - var user entity.User - err := echoCtx.Bind(&user) - if err != nil { - return err - } - - err = s.userRepo.Create(user.ToModel()) - if err != nil { - return err - } - - return echoCtx.JSON(http.StatusCreated, user) -} - -func (s API) UpdateUser(echoCtx echo.Context) error { - idString := echoCtx.Param("userId") - if idString == "" { - return errors.New("userId is required") - } - - premiumUntil, err := strconv.ParseInt(echoCtx.QueryParam("premiumUntil"), 10, 64) - if err != nil { - return err - } - - premiumUntilTime := time.UnixMilli(premiumUntil) - user := model.User{ - UserId: idString, - PremiumUntil: &premiumUntilTime, - } - err = s.userRepo.Update(idString, &user) - if err != nil { - return err - } - return echoCtx.JSON(http.StatusOK, user) -} - -func (s API) CreateOrganization(echoCtx echo.Context) error { - var org entity.Organization - err := echoCtx.Bind(&org) - if err != nil { - return err - } - - err = s.orgRepo.Create(org.ToModel()) - if err != nil { - return err - } - - return echoCtx.JSON(http.StatusCreated, org) -} - -func (s API) UpdateOrganization(echoCtx echo.Context) error { - idString := echoCtx.Param("organizationId") - if idString == "" { - return errors.New("organizationId is required") - } - - premiumUntil, err := strconv.ParseInt(echoCtx.QueryParam("premiumUntil"), 10, 64) - if err != nil { - return err - } - - premiumUntilTime := time.UnixMilli(premiumUntil) - org := model.Organization{ - OrganizationId: idString, - PremiumUntil: &premiumUntilTime, - } - err = s.orgRepo.Update(idString, &org) - if err != nil { - return err - } - return echoCtx.JSON(http.StatusOK, org) -} diff --git a/services/wastage/api/wastage/grpc-server/aws_plugin.go b/services/wastage/api/wastage/grpc-server/aws_plugin.go deleted file mode 100644 index def68c068..000000000 --- a/services/wastage/api/wastage/grpc-server/aws_plugin.go +++ /dev/null @@ -1,615 +0,0 @@ -package grpc_server - -import ( - "context" - "encoding/json" - "fmt" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" - "github.com/alitto/pond" - types2 "github.com/aws/aws-sdk-go-v2/service/ec2/types" - "github.com/google/uuid" - "github.com/labstack/echo/v4" - "github.com/opengovern/og-util/pkg/httpserver" - "github.com/opengovern/opengovernance/pkg/utils" - "github.com/opengovern/opengovernance/services/wastage/api/entity" - "github.com/opengovern/opengovernance/services/wastage/api/wastage/limit" - "github.com/opengovern/opengovernance/services/wastage/config" - "github.com/opengovern/opengovernance/services/wastage/db/model" - "github.com/opengovern/opengovernance/services/wastage/db/repo" - "github.com/opengovern/opengovernance/services/wastage/recommendation" - aws "github.com/opengovern/plugin-aws/plugin/proto/src/golang" - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" - "golang.org/x/mod/semver" - "google.golang.org/grpc/metadata" - "math" - "net/http" - "strings" - "time" -) - -type awsPluginServer struct { - aws.OptimizationServer - - cfg config.WastageConfig - - tracer trace.Tracer - logger *zap.Logger - - blobClient *azblob.Client - blobWorkerPool *pond.WorkerPool - - usageRepo repo.UsageV2Repo - recomSvc *recommendation.Service - - limitService *limit.Service -} - -func newAwsPluginServer(logger *zap.Logger, cfg config.WastageConfig, blobClient *azblob.Client, blobWorkerPool *pond.WorkerPool, - usageRepo repo.UsageV2Repo, recomSvc *recommendation.Service, limitService *limit.Service) *awsPluginServer { - - return &awsPluginServer{ - cfg: cfg, - tracer: otel.GetTracerProvider().Tracer("wastage.http.sources"), - logger: logger.Named("grpc"), - blobClient: blobClient, - blobWorkerPool: blobWorkerPool, - usageRepo: usageRepo, - recomSvc: recomSvc, - limitService: limitService, - } -} - -func (s *awsPluginServer) EC2InstanceOptimization(ctx context.Context, req *aws.EC2InstanceOptimizationRequest) (*aws.EC2InstanceOptimizationResponse, error) { - start := time.Now() - ctx, span := s.tracer.Start(ctx, "get") - defer span.End() - - var resp aws.EC2InstanceOptimizationResponse - var err error - - md, ok := metadata.FromIncomingContext(ctx) - if !ok { - return nil, fmt.Errorf("failed to get incoming context") - } - - userIds := md.Get(httpserver.XPlatformUserIDHeader) - userId := "" - if len(userIds) == 0 { - return nil, fmt.Errorf("user not found") - } - userId = userIds[0] - - stats := model.Statistics{ - AccountID: req.Identification["account"], - OrgEmail: req.Identification["org_m_email"], - ResourceID: req.Instance.HashedInstanceId, - Auth0UserId: userId, - } - statsOut, _ := json.Marshal(stats) - - fullReqJson, _ := json.Marshal(req) - metrics := req.Metrics - volMetrics := req.VolumeMetrics - req.Metrics = nil - req.VolumeMetrics = nil - trimmedReqJson, _ := json.Marshal(req) - req.Metrics = metrics - req.VolumeMetrics = volMetrics - - var requestId *string - var cliVersion *string - if req.RequestId != nil { - requestId = &req.RequestId.Value - } - if req.CliVersion != nil { - cliVersion = &req.CliVersion.Value - } - - if requestId == nil { - id := uuid.New().String() - requestId = &id - } - - s.blobWorkerPool.Submit(func() { - _, err = s.blobClient.UploadBuffer(context.Background(), s.cfg.AzBlob.Container, fmt.Sprintf("ec2-instance/%s.json", req.RequestId.String()), fullReqJson, &azblob.UploadBufferOptions{AccessTier: utils.GetPointer(blob.AccessTierCold)}) - if err != nil { - s.logger.Error("failed to upload usage to blob storage", zap.Error(err)) - } - }) - - usage := model.UsageV2{ - ApiEndpoint: "ec2-instance", - Request: trimmedReqJson, - RequestId: requestId, - CliVersion: cliVersion, - Response: nil, - FailureMessage: nil, - Statistics: statsOut, - } - err = s.usageRepo.Create(&usage) - if err != nil { - return nil, err - } - - defer func() { - if err != nil { - fmsg := err.Error() - usage.FailureMessage = &fmsg - } else { - usage.Response, _ = json.Marshal(resp) - id := uuid.New() - responseId := id.String() - usage.ResponseId = &responseId - - recom := aws.RightsizingEC2Instance{} - if resp.RightSizing != nil && resp.RightSizing.Recommended != nil { - recom = *resp.RightSizing.Recommended - } - - instanceCost := resp.RightSizing.Current.Cost - recomInstanceCost := recom.Cost - - volumeCurrentCost := 0.0 - volumeRecomCost := 0.0 - for _, v := range resp.VolumeRightSizing { - volumeCurrentCost += v.Current.Cost - if v.Recommended != nil { - volumeRecomCost += v.Recommended.Cost - } - } - - stats.CurrentCost = instanceCost + volumeCurrentCost - stats.RecommendedCost = recomInstanceCost + volumeRecomCost - stats.Savings = (instanceCost + volumeCurrentCost) - (recomInstanceCost + volumeRecomCost) - stats.EC2InstanceCurrentCost = instanceCost - stats.EC2InstanceRecommendedCost = recomInstanceCost - stats.EC2InstanceSavings = instanceCost - recomInstanceCost - stats.EBSCurrentCost = volumeCurrentCost - stats.EBSRecommendedCost = volumeRecomCost - stats.EBSSavings = volumeCurrentCost - volumeRecomCost - stats.EBSVolumeCount = len(resp.VolumeRightSizing) - - statsOut, _ := json.Marshal(stats) - usage.Statistics = statsOut - } - err = s.usageRepo.Update(usage.ID, usage) - if err != nil { - s.logger.Error("failed to update usage", zap.Error(err), zap.Any("usage", usage)) - } - }() - - if req.Instance.State != string(types2.InstanceStateNameRunning) { - err = echo.NewHTTPError(http.StatusBadRequest, "instance is not running") - return nil, err - } - - if req.Loading { - return nil, nil - } - - usageAverageType := recommendation.UsageAverageTypeMax - if req.CliVersion == nil || semver.Compare("v"+req.CliVersion.GetValue(), "v0.5.2") < 0 { - return nil, fmt.Errorf("plugin version is no longer supported - please update to the latest version") - } - - //ok, err = s.limitService.CheckAccountsLimit(ctx, userId, req.Identification["org_m_email"], req.Identification["account"]) - //if err != nil { - // s.logger.Error("failed to check profile limit", zap.Error(err)) - // return nil, err - //} - //if !ok { - // err = s.limitService.CheckPremiumAndSendErr(ctx, userId, req.Identification["org_m_email"], "profile") - // if err != nil { - // return nil, err - // } - //} - - //ok, err = s.limitService.CheckEC2InstanceLimit(ctx, userId, req.Identification["org_m_email"]) - //if err != nil { - // s.logger.Error("failed to check aws ec2 instance limit", zap.Error(err)) - // return nil, err - //} - //if !ok { - // err = s.limitService.CheckPremiumAndSendErr(ctx, userId, req.Identification["org_m_email"], "ec2 instance") - // if err != nil { - // return nil, err - // } - //} - - ec2RightSizingRecom, err := s.recomSvc.EC2InstanceRecommendationGrpc(ctx, req.Region, req.Instance, req.Volumes, req.Metrics, req.VolumeMetrics, req.Preferences, usageAverageType) - if err != nil { - err = fmt.Errorf("failed to get ec2 instance recommendation: %s", err.Error()) - return nil, err - } - - ebsRightSizingRecoms := make(map[string]*aws.EBSVolumeRecommendation) - for _, vol := range req.Volumes { - var ebsRightSizingRecom *aws.EBSVolumeRecommendation - ebsRightSizingRecom, err = s.recomSvc.EBSVolumeRecommendationGrpc(ctx, req.Region, vol, req.VolumeMetrics[vol.HashedVolumeId], req.Preferences, usageAverageType) - if err != nil { - err = fmt.Errorf("failed to get ebs volume %s recommendation: %s", vol.HashedVolumeId, err.Error()) - return nil, err - } - ebsRightSizingRecoms[vol.HashedVolumeId] = ebsRightSizingRecom - } - elapsed := time.Since(start).Seconds() - usage.Latency = &elapsed - err = s.usageRepo.Update(usage.ID, usage) - if err != nil { - s.logger.Error("failed to update usage", zap.Error(err), zap.Any("usage", usage)) - } - - // DO NOT change this, resp is used in updating usage - resp = aws.EC2InstanceOptimizationResponse{ - RightSizing: ec2RightSizingRecom, - VolumeRightSizing: ebsRightSizingRecoms, - } - // DO NOT change this, resp is used in updating usage - return &resp, nil -} - -func (s *awsPluginServer) RDSInstanceOptimization(ctx context.Context, req *aws.RDSInstanceOptimizationRequest) (*aws.RDSInstanceOptimizationResponse, error) { - start := time.Now() - ctx, span := s.tracer.Start(ctx, "get") - defer span.End() - - md, ok := metadata.FromIncomingContext(ctx) - if !ok { - return nil, fmt.Errorf("failed to get incoming context") - } - - userIds := md.Get(httpserver.XPlatformUserIDHeader) - userId := "" - if len(userIds) == 0 { - return nil, fmt.Errorf("user not found") - } - userId = userIds[0] - - var resp aws.RDSInstanceOptimizationResponse - var err error - - stats := model.Statistics{ - AccountID: req.Identification["account"], - OrgEmail: req.Identification["org_m_email"], - ResourceID: req.Instance.HashedInstanceId, - Auth0UserId: userId, - } - statsOut, _ := json.Marshal(stats) - - fullReqJson, _ := json.Marshal(req) - metrics := req.Metrics - req.Metrics = nil - trimmedReqJson, _ := json.Marshal(req) - req.Metrics = metrics - - var requestId *string - var cliVersion *string - if req.RequestId != nil { - requestId = &req.RequestId.Value - } - if req.CliVersion != nil { - cliVersion = &req.CliVersion.Value - } - - if requestId == nil { - id := uuid.New().String() - requestId = &id - } - - s.blobWorkerPool.Submit(func() { - _, err = s.blobClient.UploadBuffer(context.Background(), s.cfg.AzBlob.Container, fmt.Sprintf("aws-rds/%s.json", req.RequestId.String()), fullReqJson, &azblob.UploadBufferOptions{AccessTier: utils.GetPointer(blob.AccessTierCold)}) - if err != nil { - s.logger.Error("failed to upload usage to blob storage", zap.Error(err)) - } - }) - usage := model.UsageV2{ - ApiEndpoint: "aws-rds", - Request: trimmedReqJson, - RequestId: requestId, - CliVersion: cliVersion, - Response: nil, - FailureMessage: nil, - Statistics: statsOut, - } - err = s.usageRepo.Create(&usage) - if err != nil { - s.logger.Error("failed to create usage", zap.Error(err)) - return nil, err - } - - defer func() { - if err != nil { - fmsg := err.Error() - usage.FailureMessage = &fmsg - } else { - usage.Response, _ = json.Marshal(resp) - id := uuid.New() - responseId := id.String() - usage.ResponseId = &responseId - - recom := aws.RightsizingAwsRds{} - if resp.RightSizing.Recommended != nil { - recom = *resp.RightSizing.Recommended - } - stats.CurrentCost = resp.RightSizing.Current.Cost - stats.RecommendedCost = recom.Cost - stats.Savings = resp.RightSizing.Current.Cost - recom.Cost - stats.RDSInstanceCurrentCost = resp.RightSizing.Current.Cost - stats.RDSInstanceRecommendedCost = recom.Cost - stats.RDSInstanceSavings = resp.RightSizing.Current.Cost - recom.Cost - - statsOut, _ := json.Marshal(stats) - usage.Statistics = statsOut - } - err = s.usageRepo.Update(usage.ID, usage) - if err != nil { - s.logger.Error("failed to update usage", zap.Error(err), zap.Any("usage", usage)) - } - }() - if req.Loading { - return nil, nil - } - - usageAverageType := recommendation.UsageAverageTypeMax - if req.CliVersion == nil || semver.Compare("v"+req.CliVersion.GetValue(), "v0.5.2") < 0 { - return nil, fmt.Errorf("plugin version is no longer supported - please update to the latest version") - } - - //ok, err = s.limitService.CheckAccountsLimit(ctx, userId, req.Identification["org_m_email"], req.Identification["account"]) - //if err != nil { - // s.logger.Error("failed to check profile limit", zap.Error(err)) - // return nil, err - //} - //if !ok { - // err = s.limitService.CheckPremiumAndSendErr(ctx, userId, req.Identification["org_m_email"], "profile") - // if err != nil { - // return nil, err - // } - //} - - ok, err = s.limitService.CheckRDSInstanceLimit(ctx, userId, req.Identification["org_m_email"]) - if err != nil { - s.logger.Error("failed to check aws rds instance limit", zap.Error(err)) - return nil, err - } - if !ok { - err = s.limitService.CheckPremiumAndSendErr(ctx, userId, req.Identification["org_m_email"], "rds instance") - if err != nil { - return nil, err - } - } - - rdsRightSizingRecom, err := s.recomSvc.AwsRdsRecommendationGrpc(ctx, req.Region, req.Instance, req.Metrics, req.Preferences, usageAverageType) - if err != nil { - s.logger.Error("failed to get aws rds recommendation", zap.Error(err)) - return nil, err - } - - elapsed := time.Since(start).Seconds() - usage.Latency = &elapsed - err = s.usageRepo.Update(usage.ID, usage) - if err != nil { - s.logger.Error("failed to update usage", zap.Error(err), zap.Any("usage", usage)) - } - - // DO NOT change this, resp is used in updating usage - resp = aws.RDSInstanceOptimizationResponse{ - RightSizing: rdsRightSizingRecom, - } - // DO NOT change this, resp is used in updating usage - return &resp, nil -} - -func (s *awsPluginServer) RDSClusterOptimization(ctx context.Context, req *aws.RDSClusterOptimizationRequest) (*aws.RDSClusterOptimizationResponse, error) { - start := time.Now() - ctx, span := s.tracer.Start(ctx, "get") - defer span.End() - - var resp aws.RDSClusterOptimizationResponse - var err error - - md, ok := metadata.FromIncomingContext(ctx) - if !ok { - return nil, fmt.Errorf("failed to get incoming context") - } - - userIds := md.Get(httpserver.XPlatformUserIDHeader) - userId := "" - if len(userIds) == 0 { - return nil, fmt.Errorf("user not found") - } - userId = userIds[0] - - stats := model.Statistics{ - AccountID: req.Identification["account"], - OrgEmail: req.Identification["org_m_email"], - ResourceID: req.Cluster.HashedClusterId, - Auth0UserId: userId, - } - statsOut, _ := json.Marshal(stats) - - fullReqJson, _ := json.Marshal(req) - metrics := req.Metrics - req.Metrics = nil - trimmedReqJson, _ := json.Marshal(req) - req.Metrics = metrics - - var requestId *string - var cliVersion *string - if req.RequestId != nil { - requestId = &req.RequestId.Value - } - if req.CliVersion != nil { - cliVersion = &req.CliVersion.Value - } - - if requestId == nil { - id := uuid.New().String() - requestId = &id - } - - s.blobWorkerPool.Submit(func() { - _, err = s.blobClient.UploadBuffer(context.Background(), s.cfg.AzBlob.Container, fmt.Sprintf("aws-rds-cluster/%s.json", req.RequestId.GetValue()), fullReqJson, &azblob.UploadBufferOptions{AccessTier: utils.GetPointer(blob.AccessTierCold)}) - if err != nil { - s.logger.Error("failed to upload usage to blob storage", zap.Error(err)) - } - }) - usage := model.UsageV2{ - ApiEndpoint: "aws-rds-cluster", - Request: trimmedReqJson, - RequestId: requestId, - CliVersion: cliVersion, - Response: nil, - FailureMessage: nil, - Statistics: statsOut, - } - err = s.usageRepo.Create(&usage) - if err != nil { - s.logger.Error("failed to create usage", zap.Error(err)) - return nil, err - } - - defer func() { - if err != nil { - fmsg := err.Error() - usage.FailureMessage = &fmsg - } else { - usage.Response, _ = json.Marshal(resp) - id := uuid.New() - responseId := id.String() - usage.ResponseId = &responseId - - recom := entity.RightsizingAwsRds{} - for _, instance := range resp.RightSizing { - recom.Region = instance.Recommended.Region - recom.InstanceType = instance.Recommended.InstanceType - recom.Engine = instance.Recommended.Engine - recom.EngineVersion = instance.Recommended.EngineVersion - recom.ClusterType = entity.AwsRdsClusterType(instance.Recommended.ClusterType) - recom.VCPU += instance.Recommended.Vcpu - recom.MemoryGb += instance.Recommended.MemoryGb - recom.StorageType = recommendation.WrappedToString(instance.Recommended.StorageType) - recom.StorageSize = recommendation.WrappedToInt32(instance.Recommended.StorageSize) - recom.StorageIops = recommendation.WrappedToInt32(instance.Recommended.StorageIops) - recom.StorageThroughput = recommendation.WrappedToFloat64(instance.Recommended.StorageThroughput) - - recom.Cost += instance.Recommended.Cost - recom.ComputeCost += instance.Recommended.ComputeCost - recom.StorageCost += instance.Recommended.StorageCost - - stats.CurrentCost += instance.Current.Cost - stats.RDSInstanceCurrentCost += instance.Current.Cost - } - stats.Savings = stats.CurrentCost - recom.Cost - stats.RDSInstanceSavings = stats.CurrentCost - recom.Cost - stats.RecommendedCost = recom.Cost - stats.RDSInstanceRecommendedCost = recom.Cost - - statsOut, _ := json.Marshal(stats) - usage.Statistics = statsOut - } - err = s.usageRepo.Update(usage.ID, usage) - if err != nil { - s.logger.Error("failed to update usage", zap.Error(err), zap.Any("usage", usage)) - } - }() - if req.Loading { - return nil, nil - } - - usageAverageType := recommendation.UsageAverageTypeMax - if req.CliVersion == nil || semver.Compare("v"+req.CliVersion.GetValue(), "v0.5.2") < 0 { - return nil, fmt.Errorf("plugin version is no longer supported - please update to the latest version") - } - - resp = aws.RDSClusterOptimizationResponse{ - RightSizing: make(map[string]*aws.RDSInstanceRightSizingRecommendation), - } - - //ok, err = s.limitService.CheckAccountsLimit(ctx, userId, req.Identification["org_m_email"], req.Identification["account"]) - //if err != nil { - // s.logger.Error("failed to check profile limit", zap.Error(err)) - // return nil, err - //} - //if !ok { - // err = s.limitService.CheckPremiumAndSendErr(ctx, userId, req.Identification["org_m_email"], "profile") - // if err != nil { - // return nil, err - // } - //} - - ok, err = s.limitService.CheckRDSClusterLimit(ctx, userId, req.Identification["org_m_email"]) - if err != nil { - s.logger.Error("failed to check aws rds cluster limit", zap.Error(err)) - return nil, err - } - if !ok { - err = s.limitService.CheckPremiumAndSendErr(ctx, userId, req.Identification["org_m_email"], "rds cluster") - if err != nil { - return nil, err - } - } - - var aggregatedInstance *aws.RDSInstance - var aggregatedMetrics map[string]*aws.Metric - for _, instance := range req.Instances { - instance := instance - rdsRightSizingRecom, err2 := s.recomSvc.AwsRdsRecommendationGrpc(ctx, req.Region, instance, req.Metrics[instance.HashedInstanceId].Metrics, req.Preferences, usageAverageType) - if err2 != nil { - s.logger.Error("failed to get aws rds recommendation", zap.Error(err)) - err = err2 - return nil, err - } - resp.RightSizing[instance.HashedInstanceId] = rdsRightSizingRecom - if aggregatedInstance == nil { - aggregatedInstance = instance - } - if aggregatedMetrics == nil { - aggregatedMetrics = req.Metrics[instance.HashedInstanceId].Metrics - } else { - for key, value := range req.Metrics[instance.HashedInstanceId].Metrics { - switch key { - case "FreeableMemory", "FreeStorageSpace": - aggregatedMetrics[key] = &aws.Metric{ - Metric: recommendation.MergeGrpcDatapoints(aggregatedMetrics[key].Metric, value.Metric, func(aa, bb float64) float64 { return math.Min(aa, bb) }), - } - default: - aggregatedMetrics[key] = &aws.Metric{ - Metric: recommendation.MergeGrpcDatapoints(aggregatedMetrics[key].Metric, value.Metric, func(aa, bb float64) float64 { return math.Max(aa, bb) }), - } - } - } - } - } - if aggregatedInstance == nil { - return nil, fmt.Errorf("no instances found in the request") - } - rdsClusterRightSizingRecom, err := s.recomSvc.AwsRdsRecommendationGrpc(ctx, req.Region, aggregatedInstance, aggregatedMetrics, req.Preferences, usageAverageType) - if err != nil { - s.logger.Error("failed to get aws rds recommendation", zap.Error(err)) - return nil, err - } - - if !strings.Contains(strings.ToLower(req.Cluster.Engine), "aurora") { - for k, instance := range resp.RightSizing { - instance := instance - instance.Recommended = rdsClusterRightSizingRecom.Recommended - instance.Description = rdsClusterRightSizingRecom.Description - resp.RightSizing[k] = instance - } - } else { - // TODO Handle aurora storage somehow - } - - elapsed := time.Since(start).Seconds() - usage.Latency = &elapsed - err = s.usageRepo.Update(usage.ID, usage) - if err != nil { - s.logger.Error("failed to update usage", zap.Error(err), zap.Any("usage", usage)) - } - - return &resp, nil -} diff --git a/services/wastage/api/wastage/grpc-server/gcp_plugin.go b/services/wastage/api/wastage/grpc-server/gcp_plugin.go deleted file mode 100644 index 86517e734..000000000 --- a/services/wastage/api/wastage/grpc-server/gcp_plugin.go +++ /dev/null @@ -1,190 +0,0 @@ -package grpc_server - -import ( - "context" - "encoding/json" - "fmt" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" - "github.com/alitto/pond" - "github.com/google/uuid" - "github.com/opengovern/og-util/pkg/httpserver" - "github.com/opengovern/opengovernance/pkg/utils" - "github.com/opengovern/opengovernance/services/wastage/config" - "github.com/opengovern/opengovernance/services/wastage/db/model" - "github.com/opengovern/opengovernance/services/wastage/db/repo" - "github.com/opengovern/opengovernance/services/wastage/recommendation" - gcp "github.com/opengovern/plugin-gcp/plugin/proto/src/golang/gcp" - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" - "google.golang.org/grpc/metadata" - "time" -) - -type gcpPluginServer struct { - gcp.OptimizationServer - - cfg config.WastageConfig - - tracer trace.Tracer - logger *zap.Logger - - blobClient *azblob.Client - blobWorkerPool *pond.WorkerPool - - usageRepo repo.UsageV2Repo - recomSvc *recommendation.Service -} - -func newGcpPluginServer(logger *zap.Logger, cfg config.WastageConfig, blobClient *azblob.Client, blobWorkerPool *pond.WorkerPool, usageRepo repo.UsageV2Repo, recomSvc *recommendation.Service) *gcpPluginServer { - return &gcpPluginServer{ - cfg: cfg, - tracer: otel.GetTracerProvider().Tracer("wastage.http.sources"), - logger: logger.Named("grpc"), - blobClient: blobClient, - blobWorkerPool: blobWorkerPool, - usageRepo: usageRepo, - recomSvc: recomSvc, - } -} - -func (s *gcpPluginServer) GCPComputeOptimization(ctx context.Context, req *gcp.GCPComputeOptimizationRequest) (*gcp.GCPComputeOptimizationResponse, error) { - start := time.Now() - ctx, span := s.tracer.Start(ctx, "get") - defer span.End() - - var resp gcp.GCPComputeOptimizationResponse - var err error - - md, ok := metadata.FromIncomingContext(ctx) - if !ok { - return nil, fmt.Errorf("failed to get incoming context") - } - - userIds := md.Get(httpserver.XPlatformUserIDHeader) - userId := "" - if len(userIds) == 0 { - return nil, fmt.Errorf("user not found") - } - userId = userIds[0] - - stats := model.Statistics{ - AccountID: "", - OrgEmail: "", - ResourceID: req.Instance.Id, - Auth0UserId: userId, - } - statsOut, _ := json.Marshal(stats) - - fullReqJson, _ := json.Marshal(req) - metrics := req.Metrics - diskMetrics := req.DisksMetrics - req.Metrics = nil - req.DisksMetrics = nil - trimmedReqJson, _ := json.Marshal(req) - req.Metrics = metrics - req.DisksMetrics = diskMetrics - - var requestId *string - var cliVersion *string - if req.RequestId != nil { - requestId = &req.RequestId.Value - } - if req.CliVersion != nil { - cliVersion = &req.CliVersion.Value - } - - if requestId == nil { - id := uuid.New().String() - requestId = &id - } - - s.blobWorkerPool.Submit(func() { - _, err = s.blobClient.UploadBuffer(context.Background(), s.cfg.AzBlob.Container, fmt.Sprintf("gcp-compute-instance/%s.json", *requestId), fullReqJson, &azblob.UploadBufferOptions{AccessTier: utils.GetPointer(blob.AccessTierCold)}) - if err != nil { - s.logger.Error("failed to upload usage to blob storage", zap.Error(err)) - } - }) - - usage := model.UsageV2{ - ApiEndpoint: "gcp-compute-instance", - Request: trimmedReqJson, - RequestId: requestId, - CliVersion: cliVersion, - Response: nil, - FailureMessage: nil, - Statistics: statsOut, - } - err = s.usageRepo.Create(&usage) - if err != nil { - s.logger.Error("failed to create usage", zap.Error(err)) - return nil, err - } - - defer func() { - if err != nil { - fmsg := err.Error() - usage.FailureMessage = &fmsg - } else { - usage.Response, _ = json.Marshal(resp) - id := uuid.New() - responseId := id.String() - usage.ResponseId = &responseId - - recom := gcp.RightsizingGcpComputeInstance{} - if resp.Rightsizing != nil && resp.Rightsizing.Recommended != nil { - recom = *resp.Rightsizing.Recommended - } - stats.CurrentCost = resp.Rightsizing.Current.Cost - stats.RecommendedCost = recom.Cost - stats.Savings = resp.Rightsizing.Current.Cost - recom.Cost - stats.GCPComputeInstanceCurrentCost = resp.Rightsizing.Current.Cost - stats.GCPComputeInstanceRecommendedCost = recom.Cost - stats.GCPComputeInstanceSavings = resp.Rightsizing.Current.Cost - recom.Cost - - statsOut, _ := json.Marshal(stats) - usage.Statistics = statsOut - } - err = s.usageRepo.Update(usage.ID, usage) - if err != nil { - s.logger.Error("failed to update usage", zap.Error(err), zap.Any("usage", usage)) - } - }() - if req.Loading { - return nil, nil - } - - rightSizingRecom, currentMachine, recomMachine, err := s.recomSvc.GCPComputeInstanceRecommendation(ctx, *req.Instance, req.Metrics, req.Preferences) - if err != nil { - s.logger.Error("failed to get gcp compute instance recommendation", zap.Error(err)) - return nil, err - } - - diskRightSizingRecoms := make(map[string]*gcp.GcpComputeDiskRecommendation) - for _, disk := range req.Disks { - var diskRightSizingRecom *gcp.GcpComputeDiskRecommendation - diskRightSizingRecom, err = s.recomSvc.GCPComputeDiskRecommendation(ctx, *disk, currentMachine, recomMachine, *req.DisksMetrics[disk.Id], req.Preferences) - if err != nil { - err = fmt.Errorf("failed to get GCP Compute Disk %s recommendation: %s", disk.Id, err.Error()) - return nil, err - } - diskRightSizingRecoms[disk.Id] = diskRightSizingRecom - } - - elapsed := time.Since(start).Seconds() - usage.Latency = &elapsed - err = s.usageRepo.Update(usage.ID, usage) - if err != nil { - s.logger.Error("failed to update usage", zap.Error(err), zap.Any("usage", usage)) - } - - // DO NOT change this, resp is used in updating usage - resp = gcp.GCPComputeOptimizationResponse{ - Rightsizing: rightSizingRecom, - VolumesRightsizing: diskRightSizingRecoms, - } - // DO NOT change this, resp is used in updating usage - - return &resp, nil -} diff --git a/services/wastage/api/wastage/grpc-server/grpc.go b/services/wastage/api/wastage/grpc-server/grpc.go deleted file mode 100644 index 178c32ad6..000000000 --- a/services/wastage/api/wastage/grpc-server/grpc.go +++ /dev/null @@ -1,91 +0,0 @@ -package grpc_server - -import ( - "context" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" - "github.com/alitto/pond" - "github.com/google/uuid" - "github.com/opengovern/opengovernance/pkg/utils" - "github.com/opengovern/opengovernance/services/wastage/api/wastage/limit" - "github.com/opengovern/opengovernance/services/wastage/config" - "github.com/opengovern/opengovernance/services/wastage/db/repo" - "github.com/opengovern/opengovernance/services/wastage/recommendation" - awsPluginProto "github.com/opengovern/plugin-aws/plugin/proto/src/golang" - gcpPluginProto "github.com/opengovern/plugin-gcp/plugin/proto/src/golang/gcp" - kubernetesPluginProto "github.com/opengovern/plugin-kubernetes-internal/plugin/proto/src/golang" - "go.uber.org/zap" - "google.golang.org/grpc" - "google.golang.org/grpc/keepalive" - "net" - "time" -) - -type Server struct { - logger *zap.Logger - kubernetesPluginServer *kubernetesPluginServer - gcpPluginServer *gcpPluginServer - awsPluginServer *awsPluginServer -} - -func NewServer(logger *zap.Logger, cfg config.WastageConfig, blobClient *azblob.Client, blobWorkerPool *pond.WorkerPool, - usageRepo repo.UsageV2Repo, recomSvc *recommendation.Service, limitSvc *limit.Service) *Server { - kuberServer := newKubernetesPluginServer(logger, cfg, blobClient, blobWorkerPool, usageRepo, recomSvc) - gcpServer := newGcpPluginServer(logger, cfg, blobClient, blobWorkerPool, usageRepo, recomSvc) - awsServer := newAwsPluginServer(logger, cfg, blobClient, blobWorkerPool, usageRepo, recomSvc, limitSvc) - - svr := Server{ - logger: logger, - kubernetesPluginServer: kuberServer, - gcpPluginServer: gcpServer, - awsPluginServer: awsServer, - } - return &svr -} - -func Logger(logger *zap.Logger) func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) { - return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) { - reqId := uuid.New() - - logger.Info("Request", zap.String("ReqID", reqId.String())) - startTime := time.Now() - resp, err := handler(ctx, req) - elapsed := time.Since(startTime).Seconds() - if err != nil { - logger.Error("Request failed", zap.String("ReqID", reqId.String()), zap.Error(err), zap.Float64("latency", elapsed)) - } else { - logger.Info("Request succeeded", zap.String("ReqID", reqId.String()), zap.Float64("latency", elapsed)) - } - - return resp, err - } -} - -func StartGrpcServer(server *Server, grpcServerAddress string, authGRPCURI string) error { - lis, err := net.Listen("tcp", grpcServerAddress) - if err != nil { - server.logger.Error("failed to listen", zap.Error(err)) - return err - } - - s := grpc.NewServer( - grpc.MaxRecvMsgSize(256*1024*1024), - grpc.ChainUnaryInterceptor(Logger(server.logger)), - grpc.ConnectionTimeout(10*time.Minute), - grpc.KeepaliveParams(keepalive.ServerParameters{ - MaxConnectionIdle: 10 * time.Minute, - Time: 10 * time.Minute, - Timeout: 10 * time.Minute, - }), - ) - kubernetesPluginProto.RegisterOptimizationServer(s, server.kubernetesPluginServer) - gcpPluginProto.RegisterOptimizationServer(s, server.gcpPluginServer) - awsPluginProto.RegisterOptimizationServer(s, server.awsPluginServer) - server.logger.Info("server listening at", zap.String("address", lis.Addr().String())) - utils.EnsureRunGoroutine(func() { - if err = s.Serve(lis); err != nil { - server.logger.Error("failed to serve", zap.Error(err)) - panic(err) - } - }) - return nil -} diff --git a/services/wastage/api/wastage/grpc-server/kubernetes_plugin.go b/services/wastage/api/wastage/grpc-server/kubernetes_plugin.go deleted file mode 100644 index 7178a773b..000000000 --- a/services/wastage/api/wastage/grpc-server/kubernetes_plugin.go +++ /dev/null @@ -1,873 +0,0 @@ -package grpc_server - -import ( - "context" - "encoding/json" - "fmt" - "strings" - "time" - - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" - "github.com/alitto/pond" - "github.com/google/uuid" - "github.com/opengovern/og-util/pkg/httpserver" - "github.com/opengovern/opengovernance/pkg/utils" - "github.com/opengovern/opengovernance/services/wastage/config" - "github.com/opengovern/opengovernance/services/wastage/db/model" - "github.com/opengovern/opengovernance/services/wastage/db/repo" - "github.com/opengovern/opengovernance/services/wastage/recommendation" - kubernetesPluginProto "github.com/opengovern/plugin-kubernetes-internal/plugin/proto/src/golang" - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" - "google.golang.org/grpc/metadata" - "google.golang.org/protobuf/types/known/wrapperspb" -) - -type kubernetesPluginServer struct { - kubernetesPluginProto.OptimizationServer - - cfg config.WastageConfig - - tracer trace.Tracer - logger *zap.Logger - - blobClient *azblob.Client - blobWorkerPool *pond.WorkerPool - - usageRepo repo.UsageV2Repo - recomSvc *recommendation.Service -} - -func newKubernetesPluginServer(logger *zap.Logger, cfg config.WastageConfig, blobClient *azblob.Client, blobWorkerPool *pond.WorkerPool, usageRepo repo.UsageV2Repo, recomSvc *recommendation.Service) *kubernetesPluginServer { - return &kubernetesPluginServer{ - cfg: cfg, - blobClient: blobClient, - blobWorkerPool: blobWorkerPool, - usageRepo: usageRepo, - recomSvc: recomSvc, - tracer: otel.GetTracerProvider().Tracer("wastage.grpc.kubernetes"), - logger: logger.Named("kubernetes-grpc-server"), - } -} - -func (s *kubernetesPluginServer) KubernetesPodOptimization(ctx context.Context, req *kubernetesPluginProto.KubernetesPodOptimizationRequest) (*kubernetesPluginProto.KubernetesPodOptimizationResponse, error) { - start := time.Now() - ctx, span := s.tracer.Start(ctx, "get") - defer span.End() - - var resp kubernetesPluginProto.KubernetesPodOptimizationResponse - var err error - - md, ok := metadata.FromIncomingContext(ctx) - if !ok { - return nil, fmt.Errorf("failed to get incoming context") - } - - userIds := md.Get(httpserver.XPlatformUserIDHeader) - userId := "" - if len(userIds) == 0 { - return nil, fmt.Errorf("user not found") - } - userId = userIds[0] - - email := req.Identification["cluster_name"] - if !strings.Contains(email, "@") { - email = email + "@local.temp" - } - - accountId := req.Identification["auth_info_name"] - if accountId == "" { - accountId = req.Identification["cluster_server"] - } - stats := model.Statistics{ - AccountID: accountId, - OrgEmail: email, - ResourceID: req.Pod.Id, - Auth0UserId: userId, - } - statsOut, _ := json.Marshal(stats) - - fullReqJson, _ := json.Marshal(req) - metrics := req.Metrics - req.Metrics = nil - trimmedReqJson, _ := json.Marshal(req) - req.Metrics = metrics - var requestId *string - var cliVersion *string - if req.RequestId != nil { - requestId = &req.RequestId.Value - } - if req.CliVersion != nil { - cliVersion = &req.CliVersion.Value - } - - if requestId == nil { - id := uuid.New().String() - requestId = &id - } - - s.blobWorkerPool.Submit(func() { - _, err = s.blobClient.UploadBuffer(context.Background(), s.cfg.AzBlob.Container, fmt.Sprintf("kubernetes-pod/%s.json", *requestId), fullReqJson, &azblob.UploadBufferOptions{AccessTier: utils.GetPointer(blob.AccessTierCold)}) - if err != nil { - s.logger.Error("failed to upload usage to blob storage", zap.Error(err)) - } - }) - - usage := model.UsageV2{ - ApiEndpoint: "kubernetes-pod", - Request: trimmedReqJson, - RequestId: requestId, - CliVersion: cliVersion, - Response: nil, - FailureMessage: nil, - Statistics: statsOut, - } - err = s.usageRepo.Create(&usage) - if err != nil { - s.logger.Error("failed to create usage", zap.Error(err)) - return nil, err - } - - defer func() { - if err != nil { - fmsg := err.Error() - usage.FailureMessage = &fmsg - } else { - usage.Response, _ = json.Marshal(resp) - id := uuid.New() - responseId := id.String() - usage.ResponseId = &responseId - - if req != nil && req.Pod != nil { - for _, container := range req.Pod.Containers { - if container == nil { - continue - } - stats.KubernetesCurrentCPURequest += container.CpuRequest - stats.KubernetesCurrentMemoryRequest += container.MemoryRequest - } - } - if resp.Rightsizing != nil { - for _, container := range resp.Rightsizing.ContainerResizing { - if container != nil && container.Current != nil && container.Recommended != nil { - stats.KubernetesRecommendedCPURequest += container.Recommended.CpuRequest - stats.KubernetesRecommendedMemoryRequest += container.Recommended.MemoryRequest - - stats.KubernetesCPURequestSavings += container.Current.CpuRequest - container.Recommended.CpuRequest - stats.KubernetesMemoryRequestSavings += container.Current.MemoryRequest - container.Recommended.MemoryRequest - } - } - } - - statsOut, _ := json.Marshal(stats) - usage.Statistics = statsOut - } - err = s.usageRepo.Update(usage.ID, usage) - if err != nil { - s.logger.Error("failed to update usage", zap.Error(err), zap.Any("usage", usage)) - } - }() - if req.Loading { - return nil, nil - } - - podRightSizingRecom, err := s.recomSvc.KubernetesPodRecommendation(*req.Pod, req.Metrics, req.Preferences) - if err != nil { - s.logger.Error("failed to get kubernetes pod recommendation", zap.Error(err)) - return nil, err - } - - elapsed := time.Since(start).Seconds() - usage.Latency = &elapsed - err = s.usageRepo.Update(usage.ID, usage) - if err != nil { - s.logger.Error("failed to update usage", zap.Error(err), zap.Any("usage", usage)) - return nil, err - } - - // DO NOT change this, resp is used in updating usage - resp = kubernetesPluginProto.KubernetesPodOptimizationResponse{ - Rightsizing: podRightSizingRecom, - } - // DO NOT change this, resp is used in updating usage - - return &resp, nil -} - -func (s *kubernetesPluginServer) KubernetesDeploymentOptimization(ctx context.Context, req *kubernetesPluginProto.KubernetesDeploymentOptimizationRequest) (*kubernetesPluginProto.KubernetesDeploymentOptimizationResponse, error) { - start := time.Now() - ctx, span := s.tracer.Start(ctx, "get") - defer span.End() - - var resp kubernetesPluginProto.KubernetesDeploymentOptimizationResponse - var err error - - md, ok := metadata.FromIncomingContext(ctx) - if !ok { - return nil, fmt.Errorf("failed to get incoming context") - } - - userIds := md.Get(httpserver.XPlatformUserIDHeader) - userId := "" - if len(userIds) > 0 { - userId = userIds[0] - } - - email := req.Identification["cluster_name"] - if !strings.Contains(email, "@") { - email = email + "@local.temp" - } - - accountId := req.Identification["auth_info_name"] - if accountId == "" { - accountId = req.Identification["cluster_server"] - } - stats := model.Statistics{ - AccountID: accountId, - OrgEmail: email, - ResourceID: req.GetDeployment().GetId(), - Auth0UserId: userId, - } - statsOut, _ := json.Marshal(stats) - - fullReqJson, _ := json.Marshal(req) - metrics := req.Metrics - req.Metrics = nil - trimmedReqJson, _ := json.Marshal(req) - req.Metrics = metrics - var requestId *string - var cliVersion *string - if req.RequestId != nil { - requestId = &req.RequestId.Value - } - if req.CliVersion != nil { - cliVersion = &req.CliVersion.Value - } - - if requestId == nil { - id := uuid.New().String() - requestId = &id - } - - s.blobWorkerPool.Submit(func() { - _, err = s.blobClient.UploadBuffer(context.Background(), s.cfg.AzBlob.Container, fmt.Sprintf("kubernetes-deployment/%s.json", *requestId), fullReqJson, &azblob.UploadBufferOptions{AccessTier: utils.GetPointer(blob.AccessTierCold)}) - if err != nil { - s.logger.Error("failed to upload usage to blob storage", zap.Error(err)) - } - }) - - usage := model.UsageV2{ - ApiEndpoint: "kubernetes-deployment", - Request: trimmedReqJson, - RequestId: requestId, - CliVersion: cliVersion, - Response: nil, - FailureMessage: nil, - Statistics: statsOut, - } - err = s.usageRepo.Create(&usage) - if err != nil { - s.logger.Error("failed to create usage", zap.Error(err)) - return nil, err - } - - defer func() { - if err != nil { - fmsg := err.Error() - usage.FailureMessage = &fmsg - } else { - usage.Response, _ = json.Marshal(resp) - id := uuid.New() - responseId := id.String() - usage.ResponseId = &responseId - - if req != nil && req.Deployment != nil { - for _, container := range req.Deployment.Containers { - if container == nil { - continue - } - stats.KubernetesCurrentCPURequest += container.CpuRequest - stats.KubernetesCurrentMemoryRequest += container.MemoryRequest - } - } - if resp.Rightsizing != nil { - for _, container := range resp.Rightsizing.ContainerResizing { - if container != nil && container.Current != nil && container.Recommended != nil { - stats.KubernetesRecommendedCPURequest += container.Recommended.CpuRequest - stats.KubernetesRecommendedMemoryRequest += container.Recommended.MemoryRequest - - stats.KubernetesCPURequestSavings += container.Current.CpuRequest - container.Recommended.CpuRequest - stats.KubernetesMemoryRequestSavings += container.Current.MemoryRequest - container.Recommended.MemoryRequest - } - } - } - - statsOut, _ := json.Marshal(stats) - usage.Statistics = statsOut - } - err = s.usageRepo.Update(usage.ID, usage) - if err != nil { - s.logger.Error("failed to update usage", zap.Error(err), zap.Any("usage", usage)) - } - }() - if req.Loading { - return nil, nil - } - - deploymentRightSizingRecom, err := s.recomSvc.KubernetesDeploymentRecommendation(*req.Deployment, req.Metrics, req.Preferences) - if err != nil { - s.logger.Error("failed to get kubernetes deployment recommendation", zap.Error(err)) - return nil, err - } - - elapsed := time.Since(start).Seconds() - usage.Latency = &elapsed - err = s.usageRepo.Update(usage.ID, usage) - if err != nil { - s.logger.Error("failed to update usage", zap.Error(err), zap.Any("usage", usage)) - return nil, err - } - - // DO NOT change this, resp is used in updating usage - resp = kubernetesPluginProto.KubernetesDeploymentOptimizationResponse{ - Rightsizing: deploymentRightSizingRecom, - } - // DO NOT change this, resp is used in updating usage - - return &resp, nil -} - -func (s *kubernetesPluginServer) KubernetesStatefulsetOptimization(ctx context.Context, req *kubernetesPluginProto.KubernetesStatefulsetOptimizationRequest) (*kubernetesPluginProto.KubernetesStatefulsetOptimizationResponse, error) { - start := time.Now() - ctx, span := s.tracer.Start(ctx, "get") - defer span.End() - - var resp kubernetesPluginProto.KubernetesStatefulsetOptimizationResponse - var err error - - md, ok := metadata.FromIncomingContext(ctx) - if !ok { - return nil, fmt.Errorf("failed to get incoming context") - } - - userIds := md.Get(httpserver.XPlatformUserIDHeader) - userId := "" - if len(userIds) > 0 { - userId = userIds[0] - } - - email := req.Identification["cluster_name"] - if !strings.Contains(email, "@") { - email = email + "@local.temp" - } - - accountId := req.Identification["auth_info_name"] - if accountId == "" { - accountId = req.Identification["cluster_server"] - } - stats := model.Statistics{ - AccountID: accountId, - OrgEmail: email, - ResourceID: req.GetStatefulset().GetId(), - Auth0UserId: userId, - } - statsOut, _ := json.Marshal(stats) - - fullReqJson, _ := json.Marshal(req) - metrics := req.Metrics - req.Metrics = nil - trimmedReqJson, _ := json.Marshal(req) - req.Metrics = metrics - var requestId *string - var cliVersion *string - if req.RequestId != nil { - requestId = &req.RequestId.Value - } - if req.CliVersion != nil { - cliVersion = &req.CliVersion.Value - } - - if requestId == nil { - id := uuid.New().String() - requestId = &id - } - - s.blobWorkerPool.Submit(func() { - _, err = s.blobClient.UploadBuffer(context.Background(), s.cfg.AzBlob.Container, fmt.Sprintf("kubernetes-statefulset/%s.json", *requestId), fullReqJson, &azblob.UploadBufferOptions{AccessTier: utils.GetPointer(blob.AccessTierCold)}) - if err != nil { - s.logger.Error("failed to upload usage to blob storage", zap.Error(err)) - } - }) - - usage := model.UsageV2{ - ApiEndpoint: "kubernetes-statefulset", - Request: trimmedReqJson, - RequestId: requestId, - CliVersion: cliVersion, - Response: nil, - FailureMessage: nil, - Statistics: statsOut, - } - err = s.usageRepo.Create(&usage) - if err != nil { - s.logger.Error("failed to create usage", zap.Error(err)) - return nil, err - } - - defer func() { - if err != nil { - fmsg := err.Error() - usage.FailureMessage = &fmsg - } else { - usage.Response, _ = json.Marshal(resp) - id := uuid.New() - responseId := id.String() - usage.ResponseId = &responseId - - if req != nil && req.Statefulset != nil { - for _, container := range req.Statefulset.Containers { - if container == nil { - continue - } - stats.KubernetesCurrentCPURequest += container.CpuRequest - stats.KubernetesCurrentMemoryRequest += container.MemoryRequest - } - } - if resp.Rightsizing != nil { - for _, container := range resp.Rightsizing.ContainerResizing { - if container != nil && container.Current != nil && container.Recommended != nil { - stats.KubernetesRecommendedCPURequest += container.Recommended.CpuRequest - stats.KubernetesRecommendedMemoryRequest += container.Recommended.MemoryRequest - - stats.KubernetesCPURequestSavings += container.Current.CpuRequest - container.Recommended.CpuRequest - stats.KubernetesMemoryRequestSavings += container.Current.MemoryRequest - container.Recommended.MemoryRequest - } - } - } - - statsOut, _ := json.Marshal(stats) - usage.Statistics = statsOut - } - err = s.usageRepo.Update(usage.ID, usage) - if err != nil { - s.logger.Error("failed to update usage", zap.Error(err), zap.Any("usage", usage)) - } - }() - if req.Loading { - return nil, nil - } - - statefulsetRightSizingRecom, err := s.recomSvc.KubernetesStatefulsetRecommendation(*req.Statefulset, req.Metrics, req.Preferences) - if err != nil { - s.logger.Error("failed to get kubernetes statefulset recommendation", zap.Error(err)) - return nil, err - } - - elapsed := time.Since(start).Seconds() - usage.Latency = &elapsed - err = s.usageRepo.Update(usage.ID, usage) - if err != nil { - s.logger.Error("failed to update usage", zap.Error(err), zap.Any("usage", usage)) - return nil, err - } - - // DO NOT change this, resp is used in updating usage - resp = kubernetesPluginProto.KubernetesStatefulsetOptimizationResponse{ - Rightsizing: statefulsetRightSizingRecom, - } - // DO NOT change this, resp is used in updating usage - - return &resp, nil -} - -func (s *kubernetesPluginServer) KubernetesDaemonsetOptimization(ctx context.Context, req *kubernetesPluginProto.KubernetesDaemonsetOptimizationRequest) (*kubernetesPluginProto.KubernetesDaemonsetOptimizationResponse, error) { - start := time.Now() - ctx, span := s.tracer.Start(ctx, "get") - defer span.End() - - var resp kubernetesPluginProto.KubernetesDaemonsetOptimizationResponse - var err error - - md, ok := metadata.FromIncomingContext(ctx) - if !ok { - return nil, fmt.Errorf("failed to get incoming context") - } - - userIds := md.Get(httpserver.XPlatformUserIDHeader) - userId := "" - if len(userIds) > 0 { - userId = userIds[0] - } - - email := req.Identification["cluster_name"] - if !strings.Contains(email, "@") { - email = email + "@local.temp" - } - - accountId := req.Identification["auth_info_name"] - if accountId == "" { - accountId = req.Identification["cluster_server"] - } - stats := model.Statistics{ - AccountID: accountId, - OrgEmail: email, - ResourceID: req.GetDaemonset().GetId(), - Auth0UserId: userId, - } - statsOut, _ := json.Marshal(stats) - - fullReqJson, _ := json.Marshal(req) - metrics := req.Metrics - req.Metrics = nil - trimmedReqJson, _ := json.Marshal(req) - req.Metrics = metrics - var requestId *string - var cliVersion *string - if req.RequestId != nil { - requestId = &req.RequestId.Value - } - if req.CliVersion != nil { - cliVersion = &req.CliVersion.Value - } - - if requestId == nil { - id := uuid.New().String() - requestId = &id - } - - s.blobWorkerPool.Submit(func() { - _, err = s.blobClient.UploadBuffer(context.Background(), s.cfg.AzBlob.Container, fmt.Sprintf("kubernetes-daemonset/%s.json", *requestId), fullReqJson, &azblob.UploadBufferOptions{AccessTier: utils.GetPointer(blob.AccessTierCold)}) - if err != nil { - s.logger.Error("failed to upload usage to blob storage", zap.Error(err)) - } - }) - - usage := model.UsageV2{ - ApiEndpoint: "kubernetes-daemonset", - Request: trimmedReqJson, - RequestId: requestId, - CliVersion: cliVersion, - Response: nil, - FailureMessage: nil, - Statistics: statsOut, - } - err = s.usageRepo.Create(&usage) - if err != nil { - s.logger.Error("failed to create usage", zap.Error(err)) - return nil, err - } - - defer func() { - if err != nil { - fmsg := err.Error() - usage.FailureMessage = &fmsg - } else { - usage.Response, _ = json.Marshal(resp) - id := uuid.New() - responseId := id.String() - usage.ResponseId = &responseId - - if req != nil && req.Daemonset != nil { - for _, container := range req.Daemonset.Containers { - if container == nil { - continue - } - stats.KubernetesCurrentCPURequest += container.CpuRequest - stats.KubernetesCurrentMemoryRequest += container.MemoryRequest - } - } - if resp.Rightsizing != nil { - for _, container := range resp.Rightsizing.ContainerResizing { - if container != nil && container.Current != nil && container.Recommended != nil { - stats.KubernetesRecommendedCPURequest += container.Recommended.CpuRequest - stats.KubernetesRecommendedMemoryRequest += container.Recommended.MemoryRequest - - stats.KubernetesCPURequestSavings += container.Current.CpuRequest - container.Recommended.CpuRequest - stats.KubernetesMemoryRequestSavings += container.Current.MemoryRequest - container.Recommended.MemoryRequest - } - } - } - - statsOut, _ := json.Marshal(stats) - usage.Statistics = statsOut - } - err = s.usageRepo.Update(usage.ID, usage) - if err != nil { - s.logger.Error("failed to update usage", zap.Error(err), zap.Any("usage", usage)) - } - }() - if req.Loading { - return nil, nil - } - - daemonsetRightSizingRecom, err := s.recomSvc.KubernetesDaemonsetRecommendation(*req.Daemonset, req.Metrics, req.Preferences) - if err != nil { - s.logger.Error("failed to get kubernetes daemonset recommendation", zap.Error(err)) - return nil, err - } - - elapsed := time.Since(start).Seconds() - usage.Latency = &elapsed - err = s.usageRepo.Update(usage.ID, usage) - if err != nil { - s.logger.Error("failed to update usage", zap.Error(err), zap.Any("usage", usage)) - return nil, err - } - - // DO NOT change this, resp is used in updating usage - resp = kubernetesPluginProto.KubernetesDaemonsetOptimizationResponse{ - Rightsizing: daemonsetRightSizingRecom, - } - // DO NOT change this, resp is used in updating usage - - return &resp, nil -} - -func (s *kubernetesPluginServer) KubernetesJobOptimization(ctx context.Context, req *kubernetesPluginProto.KubernetesJobOptimizationRequest) (*kubernetesPluginProto.KubernetesJobOptimizationResponse, error) { - start := time.Now() - ctx, span := s.tracer.Start(ctx, "get") - defer span.End() - - var resp kubernetesPluginProto.KubernetesJobOptimizationResponse - var err error - - md, ok := metadata.FromIncomingContext(ctx) - if !ok { - return nil, fmt.Errorf("failed to get incoming context") - } - - userIds := md.Get(httpserver.XPlatformUserIDHeader) - userId := "" - if len(userIds) > 0 { - userId = userIds[0] - } - - email := req.Identification["cluster_name"] - if !strings.Contains(email, "@") { - email = email + "@local.temp" - } - - accountId := req.Identification["auth_info_name"] - if accountId == "" { - accountId = req.Identification["cluster_server"] - } - stats := model.Statistics{ - AccountID: accountId, - OrgEmail: email, - ResourceID: req.GetJob().GetId(), - Auth0UserId: userId, - } - statsOut, _ := json.Marshal(stats) - - fullReqJson, _ := json.Marshal(req) - metrics := req.Metrics - req.Metrics = nil - trimmedReqJson, _ := json.Marshal(req) - req.Metrics = metrics - var requestId *string - var cliVersion *string - if req.RequestId != nil { - requestId = &req.RequestId.Value - } - if req.CliVersion != nil { - cliVersion = &req.CliVersion.Value - } - - if requestId == nil { - id := uuid.New().String() - requestId = &id - } - - s.blobWorkerPool.Submit(func() { - _, err = s.blobClient.UploadBuffer(context.Background(), s.cfg.AzBlob.Container, fmt.Sprintf("kubernetes-job/%s.json", *requestId), fullReqJson, &azblob.UploadBufferOptions{AccessTier: utils.GetPointer(blob.AccessTierCold)}) - if err != nil { - s.logger.Error("failed to upload usage to blob storage", zap.Error(err)) - } - }) - - usage := model.UsageV2{ - ApiEndpoint: "kubernetes-job", - Request: trimmedReqJson, - RequestId: requestId, - CliVersion: cliVersion, - Response: nil, - FailureMessage: nil, - Statistics: statsOut, - } - err = s.usageRepo.Create(&usage) - if err != nil { - s.logger.Error("failed to create usage", zap.Error(err)) - return nil, err - } - - defer func() { - if err != nil { - fmsg := err.Error() - usage.FailureMessage = &fmsg - } else { - usage.Response, _ = json.Marshal(resp) - id := uuid.New() - responseId := id.String() - usage.ResponseId = &responseId - - if req != nil && req.Job != nil { - for _, container := range req.Job.Containers { - if container == nil { - continue - } - stats.KubernetesCurrentCPURequest += container.CpuRequest - stats.KubernetesCurrentMemoryRequest += container.MemoryRequest - } - } - if resp.Rightsizing != nil { - for _, container := range resp.Rightsizing.ContainerResizing { - if container != nil && container.Current != nil && container.Recommended != nil { - stats.KubernetesRecommendedCPURequest += container.Recommended.CpuRequest - stats.KubernetesRecommendedMemoryRequest += container.Recommended.MemoryRequest - - stats.KubernetesCPURequestSavings += container.Current.CpuRequest - container.Recommended.CpuRequest - stats.KubernetesMemoryRequestSavings += container.Current.MemoryRequest - container.Recommended.MemoryRequest - } - } - } - - statsOut, _ := json.Marshal(stats) - usage.Statistics = statsOut - } - err = s.usageRepo.Update(usage.ID, usage) - if err != nil { - s.logger.Error("failed to update usage", zap.Error(err), zap.Any("usage", usage)) - } - }() - if req.Loading { - return nil, nil - } - - jobRightSizingRecom, err := s.recomSvc.KubernetesJobRecommendation(*req.Job, req.Metrics, req.Preferences) - if err != nil { - s.logger.Error("failed to get kubernetes daemonset recommendation", zap.Error(err)) - return nil, err - } - - elapsed := time.Since(start).Seconds() - usage.Latency = &elapsed - err = s.usageRepo.Update(usage.ID, usage) - if err != nil { - s.logger.Error("failed to update usage", zap.Error(err), zap.Any("usage", usage)) - return nil, err - } - - // DO NOT change this, resp is used in updating usage - resp = kubernetesPluginProto.KubernetesJobOptimizationResponse{ - Rightsizing: jobRightSizingRecom, - } - // DO NOT change this, resp is used in updating usage - - return &resp, nil -} - -func (s *kubernetesPluginServer) KubernetesNodeGetCost(ctx context.Context, req *kubernetesPluginProto.KubernetesNodeGetCostRequest) (*kubernetesPluginProto.KubernetesNodeGetCostResponse, error) { - start := time.Now() - ctx, span := s.tracer.Start(ctx, "get") - defer span.End() - - var resp kubernetesPluginProto.KubernetesNodeGetCostResponse - var err error - - md, ok := metadata.FromIncomingContext(ctx) - if !ok { - return nil, fmt.Errorf("failed to get incoming context") - } - - userIds := md.Get(httpserver.XPlatformUserIDHeader) - userId := "" - if len(userIds) > 0 { - userId = userIds[0] - } - - email := req.Identification["cluster_name"] - if !strings.Contains(email, "@") { - email = email + "@local.temp" - } - - accountId := req.Identification["auth_info_name"] - if accountId == "" { - accountId = req.Identification["cluster_server"] - } - stats := model.Statistics{ - AccountID: accountId, - OrgEmail: email, - ResourceID: req.GetNode().GetId(), - Auth0UserId: userId, - } - statsOut, _ := json.Marshal(stats) - - reqJson, _ := json.Marshal(req) - var requestId *string - var cliVersion *string - if req.RequestId != nil { - requestId = &req.RequestId.Value - } - if req.CliVersion != nil { - cliVersion = &req.CliVersion.Value - } - - if requestId == nil { - id := uuid.New().String() - requestId = &id - } - - usage := model.UsageV2{ - ApiEndpoint: "kubernetes-node", - Request: reqJson, - RequestId: requestId, - CliVersion: cliVersion, - Response: nil, - FailureMessage: nil, - Statistics: statsOut, - } - err = s.usageRepo.Create(&usage) - if err != nil { - s.logger.Error("failed to create usage", zap.Error(err)) - return nil, err - } - - defer func() { - if err != nil { - fmsg := err.Error() - usage.FailureMessage = &fmsg - } else { - usage.Response, _ = json.Marshal(resp) - id := uuid.New() - responseId := id.String() - usage.ResponseId = &responseId - } - err = s.usageRepo.Update(usage.ID, usage) - if err != nil { - s.logger.Error("failed to update usage", zap.Error(err), zap.Any("usage", usage)) - } - }() - - nodeCost, err := s.recomSvc.KubernetesNodeCost(ctx, *req.Node) - if err != nil { - s.logger.Error("failed to get kubernetes node cost", zap.Error(err)) - return nil, err - } - - elapsed := time.Since(start).Seconds() - usage.Latency = &elapsed - err = s.usageRepo.Update(usage.ID, usage) - if err != nil { - s.logger.Error("failed to update usage", zap.Error(err), zap.Any("usage", usage)) - return nil, err - } - - // DO NOT change this, resp is used in updating usage - resp = kubernetesPluginProto.KubernetesNodeGetCostResponse{ - Cost: wrapperspb.Double(nodeCost), - } - - return &resp, nil -} diff --git a/services/wastage/api/wastage/limit/consts.go b/services/wastage/api/wastage/limit/consts.go deleted file mode 100644 index d2c7513b9..000000000 --- a/services/wastage/api/wastage/limit/consts.go +++ /dev/null @@ -1,15 +0,0 @@ -package limit - -const ( - UserEC2InstanceLimit = int32(500) - UserEBSVolumeLimit = int32(500) - UserRDSInstanceLimit = int32(100) - UserRDSClusterLimit = int32(50) - UserAccountLimit = int32(5) - - OrgEC2InstanceLimit = int32(2000) - OrgEBSVolumeLimit = int32(2000) - OrgRDSInstanceLimit = int32(1000) - OrgRDSClusterLimit = int32(500) - OrgAccountLimit = int32(5) -) diff --git a/services/wastage/api/wastage/limit/limit.go b/services/wastage/api/wastage/limit/limit.go deleted file mode 100644 index 28635a9be..000000000 --- a/services/wastage/api/wastage/limit/limit.go +++ /dev/null @@ -1,177 +0,0 @@ -package limit - -import ( - "context" - "fmt" - "github.com/opengovern/opengovernance/services/wastage/db/repo" - "go.uber.org/zap" - "strings" - "time" -) - -type Service struct { - logger *zap.Logger - userRepo repo.UserRepo - orgRepo repo.OrganizationRepo - usageRepo repo.UsageV2Repo -} - -func NewLimitService(logger *zap.Logger, userRepo repo.UserRepo, orgRepo repo.OrganizationRepo, usageRepo repo.UsageV2Repo) *Service { - return &Service{ - logger: logger, - userRepo: userRepo, - orgRepo: orgRepo, - usageRepo: usageRepo, - } -} - -func (s *Service) CheckRDSInstanceLimit(ctx context.Context, auth0UserId, orgEmail string) (bool, error) { - s.logger.Info("Checking RDS Instance limit", zap.String("auth0UserId", auth0UserId), zap.String("orgEmail", orgEmail)) - if orgEmail != "" && strings.Contains(orgEmail, "@") { - org := strings.Split(orgEmail, "@") - if org[1] != "" { - orgCount, err := s.usageRepo.GetRDSInstanceOptimizationsCountForOrg(ctx, org[1]) - if err != nil { - return false, err - } - if orgCount < int64(OrgRDSInstanceLimit) { - return true, nil - } - s.logger.Info("Org RDS Instance limit reached", zap.String("orgEmail", org[1])) - } - } - userCount, err := s.usageRepo.GetRDSInstanceOptimizationsCountForUser(ctx, auth0UserId) - if err != nil { - return false, err - } - if userCount < int64(UserRDSInstanceLimit) { - return true, nil - } - s.logger.Info("User RDS Instance limit reached", zap.String("auth0UserId", auth0UserId)) - return false, nil -} - -func (s *Service) CheckRDSClusterLimit(ctx context.Context, auth0UserId, orgEmail string) (bool, error) { - s.logger.Info("Checking RDS Cluster limit", zap.String("auth0UserId", auth0UserId), zap.String("orgEmail", orgEmail)) - if orgEmail != "" && strings.Contains(orgEmail, "@") { - org := strings.Split(orgEmail, "@") - if org[1] != "" { - orgCount, err := s.usageRepo.GetRDSClusterOptimizationsCountForOrg(ctx, org[1]) - if err != nil { - return false, err - } - if orgCount < int64(OrgRDSClusterLimit) { - return true, nil - } - s.logger.Info("Org RDS Cluster limit reached", zap.String("orgEmail", org[1])) - } - } - userCount, err := s.usageRepo.GetRDSClusterOptimizationsCountForUser(ctx, auth0UserId) - if err != nil { - return false, err - } - if userCount < int64(UserRDSClusterLimit) { - return true, nil - } - s.logger.Info("User RDS Cluster limit reached", zap.String("auth0UserId", auth0UserId)) - return false, nil -} - -func (s *Service) CheckPremiumAndSendErr(ctx context.Context, userId string, orgEmail string, service string) error { - user, err := s.userRepo.Get(ctx, userId) - if err != nil { - s.logger.Error("failed to get user", zap.Error(err)) - return err - } - if user != nil && user.PremiumUntil != nil { - if time.Now().Before(*user.PremiumUntil) { - return nil - } - } - - if orgEmail != "" && strings.Contains(orgEmail, "@") { - org := strings.Split(orgEmail, "@") - if org[1] != "" { - orgName := strings.Split(orgEmail, "@") - org, err := s.orgRepo.Get(ctx, orgName[1]) - if err != nil { - s.logger.Error("failed to get organization", zap.Error(err)) - return err - } - if org != nil && org.PremiumUntil != nil { - if time.Now().Before(*org.PremiumUntil) { - return nil - } - } - } - } - - err = fmt.Errorf("reached the %s limit for both user and organization", service) - s.logger.Error(err.Error(), zap.String("auth0UserId", userId), zap.String("orgEmail", orgEmail)) - return nil -} - -func (s *Service) CheckEC2InstanceLimit(ctx context.Context, auth0UserId, orgEmail string) (bool, error) { - s.logger.Info("Checking EC2 Instance limit", zap.String("auth0UserId", auth0UserId), zap.String("orgEmail", orgEmail)) - if orgEmail != "" && strings.Contains(orgEmail, "@") { - org := strings.Split(orgEmail, "@") - if org[1] != "" { - orgCount, err := s.usageRepo.GetEC2InstanceOptimizationsCountForOrg(ctx, org[1]) - if err != nil { - return false, err - } - if orgCount < int64(OrgEC2InstanceLimit) { - return true, nil - } - s.logger.Info("Org EC2 Instance limit reached", zap.String("orgEmail", org[1])) - } - } - userCount, err := s.usageRepo.GetEC2InstanceOptimizationsCountForUser(ctx, auth0UserId) - if err != nil { - return false, err - } - if userCount < int64(UserEC2InstanceLimit) { - return true, nil - } - s.logger.Info("User EC2 Instance limit reached", zap.String("auth0UserId", auth0UserId)) - return false, nil -} - -func (s *Service) CheckAccountsLimit(ctx context.Context, auth0UserId, orgEmail, account string) (bool, error) { - s.logger.Info("Checking account limit", zap.String("auth0UserId", auth0UserId), zap.String("orgEmail", orgEmail), zap.String("account", account)) - if orgEmail != "" && strings.Contains(orgEmail, "@") { - org := strings.Split(orgEmail, "@") - if org[1] != "" { - orgAccounts, err := s.usageRepo.GetAccountsForOrg(ctx, org[1]) - if err != nil { - return false, err - } - if len(orgAccounts) < int(OrgAccountLimit) { - return true, nil - } else if checkAccountInList(account, orgAccounts) { - return true, nil - } - s.logger.Info("Org Account limit reached", zap.String("orgEmail", org[1])) - } - } - userAccounts, err := s.usageRepo.GetAccountsForUser(ctx, auth0UserId) - if err != nil { - return false, err - } - if len(userAccounts) < int(UserAccountLimit) { - return true, nil - } else if checkAccountInList(account, userAccounts) { - return true, nil - } - s.logger.Info("User Account limit reached", zap.String("auth0UserId", auth0UserId)) - return false, nil -} - -func checkAccountInList(acc string, accounts []string) bool { - for _, account := range accounts { - if acc == account { - return true - } - } - return false -} diff --git a/services/wastage/cmd.go b/services/wastage/cmd.go deleted file mode 100644 index c4795b305..000000000 --- a/services/wastage/cmd.go +++ /dev/null @@ -1,133 +0,0 @@ -package wastage - -import ( - "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" - "github.com/alitto/pond" - "github.com/opengovern/og-util/pkg/httpserver" - "github.com/opengovern/og-util/pkg/koanf" - "github.com/opengovern/opengovernance/services/wastage/api" - grpc_server "github.com/opengovern/opengovernance/services/wastage/api/wastage/grpc-server" - "github.com/opengovern/opengovernance/services/wastage/api/wastage/limit" - "github.com/opengovern/opengovernance/services/wastage/config" - "github.com/opengovern/opengovernance/services/wastage/cost" - "github.com/opengovern/opengovernance/services/wastage/db/connector" - "github.com/opengovern/opengovernance/services/wastage/db/model" - "github.com/opengovern/opengovernance/services/wastage/db/repo" - "github.com/opengovern/opengovernance/services/wastage/ingestion" - "github.com/opengovern/opengovernance/services/wastage/recommendation" - "github.com/spf13/cobra" - "go.uber.org/zap" - logger2 "gorm.io/gorm/logger" - "os" - "time" -) - -var ( - AuthGRPCURI = os.Getenv("AUTH_GRPC_URI") - GCPProjectID = os.Getenv("GCP_PROJECT_ID") - GCPPrivateKey = os.Getenv("GCP_PRIVATE_KEY") - GCPClientEmail = os.Getenv("GCP_CLIENT_EMAIL") -) - -func Command() *cobra.Command { - cnf := koanf.Provide("wastage", config.WastageConfig{}) - - cmd := &cobra.Command{ - RunE: func(cmd *cobra.Command, _ []string) error { - ctx := cmd.Context() - logger, err := zap.NewProduction() - if err != nil { - return err - } - - logger = logger.Named("wastage") - - cmd.SilenceUsage = true - - db, err := connector.New(cnf.Postgres, logger, logger2.Info) - if err != nil { - return err - } - usageDb, err := connector.New(cnf.Postgres, logger, logger2.Warn) - if err != nil { - return err - } - // create citext extension if not exists - err = db.Conn().Exec("CREATE EXTENSION IF NOT EXISTS citext").Error - if err != nil { - logger.Error("failed to create citext extension", zap.Error(err)) - return err - } - err = db.Conn().AutoMigrate(&model.DataAge{}, &model.Usage{}, &model.User{}, &model.Organization{}) - - err = usageDb.Conn().AutoMigrate(&model.Usage{}, &model.UsageV2{}) - if err != nil { - logger.Error("failed to auto migrate", zap.Error(err)) - return err - } - ec2InstanceRepo := repo.NewEC2InstanceTypeRepo(db) - rdsRepo := repo.NewRDSProductRepo(db) - rdsInstanceRepo := repo.NewRDSDBInstanceRepo(db) - rdsStorageRepo := repo.NewRDSDBStorageRepo(logger, db) - ebsVolumeRepo := repo.NewEBSVolumeTypeRepo(db) - computeMachineTypeRepo := repo.NewGCPComputeMachineTypeRepo(db) - computeDiskTypeRepo := repo.NewGCPComputeDiskTypeRepo(db) - computeSKURepo := repo.NewGCPComputeSKURepo(db) - dataAgeRepo := repo.NewDataAgeRepo(db) - usageV2Repo := repo.NewUsageV2Repo(usageDb) - usageV1Repo := repo.NewUsageRepo(usageDb) - userRepo := repo.NewUserRepo(db) - orgRepo := repo.NewOrganizationRepo(db) - costSvc := cost.New(cnf.Pennywise.BaseURL) - - cred, err := azidentity.NewDefaultAzureCredential(&azidentity.DefaultAzureCredentialOptions{TenantID: cnf.AzBlob.TenantID}) - if err != nil { - logger.Error("failed to create azure credential", zap.Error(err)) - return err - } - - blobClient, err := azblob.NewClient(cnf.AzBlob.AccountUrl, cred, nil) - if err != nil { - logger.Error("failed to create blob client", zap.Error(err)) - return err - } - - recomSvc := recommendation.New(logger, ec2InstanceRepo, ebsVolumeRepo, rdsInstanceRepo, rdsStorageRepo, computeMachineTypeRepo, computeDiskTypeRepo, computeSKURepo, cnf.OpenAIToken, costSvc) - ingestionSvc := ingestion.New(logger, db, ec2InstanceRepo, rdsRepo, rdsInstanceRepo, rdsStorageRepo, ebsVolumeRepo, dataAgeRepo) - - gcpCredentials := map[string]string{ - "type": "service_account", - "project_id": GCPProjectID, - "private_key": GCPPrivateKey, - "client_email": GCPClientEmail, - } - gcpIngestionSvc, err := ingestion.NewGcpService(ctx, logger, dataAgeRepo, computeMachineTypeRepo, computeDiskTypeRepo, computeSKURepo, db, gcpCredentials, GCPProjectID) - go ingestionSvc.Start(ctx) - go gcpIngestionSvc.Start(ctx) - - blobWorkerPool := pond.New(50, 1000000, - pond.Strategy(pond.Eager()), - pond.Context(ctx), - pond.IdleTimeout(10*time.Second), - pond.MinWorkers(1)) - - limitSvc := limit.NewLimitService(logger, userRepo, orgRepo, usageV2Repo) - - grpcServer := grpc_server.NewServer(logger, cnf, blobClient, blobWorkerPool, usageV2Repo, recomSvc, limitSvc) - err = grpc_server.StartGrpcServer(grpcServer, cnf.Grpc.Address, AuthGRPCURI) - if err != nil { - return err - } - - return httpserver.RegisterAndStart( - ctx, - logger, - cnf.Http.Address, - api.New(cnf, logger, blobClient, blobWorkerPool, costSvc, recomSvc, ingestionSvc, limitSvc, usageV1Repo, usageV2Repo, userRepo, orgRepo), - ) - }, - } - - return cmd -} diff --git a/services/wastage/config/service.go b/services/wastage/config/service.go deleted file mode 100644 index 23d5f5460..000000000 --- a/services/wastage/config/service.go +++ /dev/null @@ -1,18 +0,0 @@ -package config - -import "github.com/opengovern/og-util/pkg/koanf" - -type AzBlobConfig struct { - TenantID string `json:"tenantId" koanf:"tenant_id"` - AccountUrl string `json:"accountUrl" koanf:"account_url"` - Container string `json:"container" koanf:"container"` -} - -type WastageConfig struct { - Postgres koanf.Postgres `json:"postgres,omitempty" koanf:"postgres"` - Http koanf.HttpServer `json:"http,omitempty" koanf:"http"` - Grpc koanf.GrpcServer `json:"grpc,omitempty" koanf:"grpc"` - Pennywise koanf.OpenGovernanceService `json:"pennywise" koanf:"pennywise"` - OpenAIToken string `json:"openAIToken" koanf:"openai_token"` - AzBlob AzBlobConfig `json:"azBlob" koanf:"az_blob"` -} diff --git a/services/wastage/cost/aws_rds.go b/services/wastage/cost/aws_rds.go deleted file mode 100644 index 409568ff3..000000000 --- a/services/wastage/cost/aws_rds.go +++ /dev/null @@ -1,262 +0,0 @@ -package cost - -import ( - "context" - "encoding/json" - "fmt" - types2 "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types" - "github.com/kaytu-io/pennywise/pkg/cost" - "github.com/kaytu-io/pennywise/pkg/schema" - "github.com/opengovern/og-util/pkg/httpclient" - og_client "github.com/opengovern/opengovernance/pkg/steampipe-plugin-opengovernance/opengovernance-client" - "github.com/opengovern/opengovernance/services/wastage/api/entity" - "net/http" - "time" -) - -func (s *Service) GetRDSInstanceCost(ctx context.Context, region string, rdsInstance entity.AwsRds, metrics map[string][]types2.Datapoint) (float64, map[string]float64, error) { - req := schema.Submission{ - ID: "submission-1", - CreatedAt: time.Now(), - Resources: []schema.ResourceDef{}, - } - - valuesMap := map[string]any{} - valuesMap["region"] = region - valuesMap["instance_class"] = rdsInstance.InstanceType - valuesMap["availability_zone"] = rdsInstance.AvailabilityZone - valuesMap["engine"] = rdsInstance.Engine - valuesMap["engine_version"] = rdsInstance.EngineVersion - valuesMap["license_model"] = rdsInstance.LicenseModel - if rdsInstance.ClusterType == entity.AwsRdsClusterTypeSingleInstance { - valuesMap["multi_az"] = false - } else { - valuesMap["multi_az"] = true - } - valuesMap["cluster_type"] = rdsInstance.ClusterType - - if rdsInstance.StorageSize != nil { - valuesMap["allocated_storage"] = *rdsInstance.StorageSize - } - valuesMap["backup_retention_period"] = rdsInstance.BackupRetentionPeriod - if rdsInstance.StorageType != nil { - valuesMap["storage_type"] = *rdsInstance.StorageType - } - if rdsInstance.StorageIops != nil { - valuesMap["iops"] = *rdsInstance.StorageIops - } - valuesMap["performance_insights_enabled"] = rdsInstance.PerformanceInsightsEnabled - valuesMap["performance_insights_retention_period"] = rdsInstance.PerformanceInsightsRetentionPeriod - valuesMap["io_optimized"] = false // TODO: Check aws api rds response // Maybe needs some changes in pennywise logic - - valuesMap["pennywise_usage"] = map[string]any{ - //"monthly_io_requests": "", - //"monthly_data_api_calls": "", - //"additional_backup_storage_gb": "", - //"monthly_additional_performance_insights_requests": "", - } - - req.Resources = append(req.Resources, schema.ResourceDef{ - Address: rdsInstance.HashedInstanceId, - Type: og_client.ResourceTypeConversion("aws::rds::dbinstance"), - Name: "", - RegionCode: region, - ProviderName: schema.AWSProvider, - Values: valuesMap, - }) - - reqBody, err := json.Marshal(req) - if err != nil { - return 0, nil, err - } - - var response cost.State - statusCode, err := httpclient.DoRequest(ctx, "GET", s.pennywiseBaseUrl+"/api/v1/cost/submission", nil, reqBody, &response) - if err != nil { - return 0, nil, err - } - - if statusCode != http.StatusOK { - return 0, nil, fmt.Errorf("failed to get pennywise cost, status code = %d", statusCode) - } - - componentCost := make(map[string]float64) - for _, component := range response.GetCostComponents() { - if component.Cost().Decimal.InexactFloat64() == 0 { - continue - } - componentCost[component.Name] = component.Cost().Decimal.InexactFloat64() - } - - resourceCost, err := response.Cost() - if err != nil { - return 0, nil, err - } - - return resourceCost.Decimal.InexactFloat64(), componentCost, nil -} - -func (s *Service) GetRDSStorageCost(ctx context.Context, region string, rdsInstance entity.AwsRds, metrics map[string][]types2.Datapoint) (float64, map[string]float64, error) { - req := schema.Submission{ - ID: "submission-1", - CreatedAt: time.Now(), - Resources: []schema.ResourceDef{}, - } - - valuesMap := map[string]any{} - valuesMap["region"] = region - valuesMap["instance_class"] = rdsInstance.InstanceType - valuesMap["availability_zone"] = rdsInstance.AvailabilityZone - valuesMap["engine"] = rdsInstance.Engine - valuesMap["engine_version"] = rdsInstance.EngineVersion - valuesMap["license_model"] = rdsInstance.LicenseModel - if rdsInstance.ClusterType == entity.AwsRdsClusterTypeSingleInstance { - valuesMap["multi_az"] = false - } else { - valuesMap["multi_az"] = true - } - valuesMap["cluster_type"] = rdsInstance.ClusterType - - if rdsInstance.StorageSize != nil { - valuesMap["allocated_storage"] = *rdsInstance.StorageSize - } - valuesMap["backup_retention_period"] = rdsInstance.BackupRetentionPeriod - if rdsInstance.StorageType != nil { - valuesMap["storage_type"] = *rdsInstance.StorageType - } - if rdsInstance.StorageIops != nil { - valuesMap["iops"] = *rdsInstance.StorageIops - } - if rdsInstance.StorageThroughput != nil { - valuesMap["throughput"] = *rdsInstance.StorageThroughput - } - valuesMap["performance_insights_enabled"] = rdsInstance.PerformanceInsightsEnabled - valuesMap["performance_insights_retention_period"] = rdsInstance.PerformanceInsightsRetentionPeriod - valuesMap["io_optimized"] = false // TODO: Check aws api rds response // Maybe needs some changes in pennywise logic - - valuesMap["pennywise_usage"] = map[string]any{ - //"monthly_io_requests": "", - //"additional_backup_storage_gb": "", - } - - req.Resources = append(req.Resources, schema.ResourceDef{ - Address: rdsInstance.HashedInstanceId, - Type: "aws_db_storage", - Name: "", - RegionCode: region, - ProviderName: schema.AWSProvider, - Values: valuesMap, - }) - - reqBody, err := json.Marshal(req) - if err != nil { - return 0, nil, err - } - - var response cost.State - statusCode, err := httpclient.DoRequest(ctx, "GET", s.pennywiseBaseUrl+"/api/v1/cost/submission", nil, reqBody, &response) - if err != nil { - return 0, nil, err - } - - if statusCode != http.StatusOK { - return 0, nil, fmt.Errorf("failed to get pennywise cost, status code = %d", statusCode) - } - - componentCost := make(map[string]float64) - for _, component := range response.GetCostComponents() { - if component.Cost().Decimal.InexactFloat64() == 0 { - continue - } - componentCost[component.Name] = component.Cost().Decimal.InexactFloat64() - } - - resourceCost, err := response.Cost() - if err != nil { - return 0, nil, err - } - - return resourceCost.Decimal.InexactFloat64(), componentCost, nil -} - -func (s *Service) GetRDSComputeCost(ctx context.Context, region string, rdsInstance entity.AwsRds, metrics map[string][]types2.Datapoint) (float64, map[string]float64, error) { - req := schema.Submission{ - ID: "submission-1", - CreatedAt: time.Now(), - Resources: []schema.ResourceDef{}, - } - - valuesMap := map[string]any{} - valuesMap["region"] = region - valuesMap["instance_class"] = rdsInstance.InstanceType - valuesMap["availability_zone"] = rdsInstance.AvailabilityZone - valuesMap["engine"] = rdsInstance.Engine - valuesMap["engine_version"] = rdsInstance.EngineVersion - valuesMap["license_model"] = rdsInstance.LicenseModel - if rdsInstance.ClusterType == entity.AwsRdsClusterTypeSingleInstance { - valuesMap["multi_az"] = false - } else { - valuesMap["multi_az"] = true - } - valuesMap["cluster_type"] = rdsInstance.ClusterType - - if rdsInstance.StorageSize != nil { - valuesMap["allocated_storage"] = *rdsInstance.StorageSize - } - valuesMap["backup_retention_period"] = rdsInstance.BackupRetentionPeriod - if rdsInstance.StorageType != nil { - valuesMap["storage_type"] = *rdsInstance.StorageType - } - if rdsInstance.StorageIops != nil { - valuesMap["iops"] = *rdsInstance.StorageIops - } - valuesMap["performance_insights_enabled"] = rdsInstance.PerformanceInsightsEnabled - valuesMap["performance_insights_retention_period"] = rdsInstance.PerformanceInsightsRetentionPeriod - valuesMap["io_optimized"] = false // TODO: Check aws api rds response // Maybe needs some changes in pennywise logic - - valuesMap["pennywise_usage"] = map[string]any{ - //"monthly_io_requests": "", - //"monthly_data_api_calls": "", - //"additional_backup_storage_gb": "", - //"monthly_additional_performance_insights_requests": "", - } - - req.Resources = append(req.Resources, schema.ResourceDef{ - Address: rdsInstance.HashedInstanceId, - Type: "aws_db_compute_instance", - Name: "", - RegionCode: region, - ProviderName: schema.AWSProvider, - Values: valuesMap, - }) - - reqBody, err := json.Marshal(req) - if err != nil { - return 0, nil, err - } - - var response cost.State - statusCode, err := httpclient.DoRequest(ctx, "GET", s.pennywiseBaseUrl+"/api/v1/cost/submission", nil, reqBody, &response) - if err != nil { - return 0, nil, err - } - - if statusCode != http.StatusOK { - return 0, nil, fmt.Errorf("failed to get pennywise cost, status code = %d", statusCode) - } - - componentCost := make(map[string]float64) - for _, component := range response.GetCostComponents() { - if component.Cost().Decimal.InexactFloat64() == 0 { - continue - } - componentCost[component.Name] = component.Cost().Decimal.InexactFloat64() - } - - resourceCost, err := response.Cost() - if err != nil { - return 0, nil, err - } - - return resourceCost.Decimal.InexactFloat64(), componentCost, nil -} diff --git a/services/wastage/cost/azurerm_compute.go b/services/wastage/cost/azurerm_compute.go deleted file mode 100644 index 4f2d2caf0..000000000 --- a/services/wastage/cost/azurerm_compute.go +++ /dev/null @@ -1,63 +0,0 @@ -package cost - -import ( - "context" - "encoding/json" - "fmt" - "github.com/kaytu-io/pennywise/pkg/cost" - "github.com/kaytu-io/pennywise/pkg/schema" - "github.com/opengovern/og-util/pkg/httpclient" - "github.com/opengovern/opengovernance/services/wastage/api/entity" - "net/http" - "time" -) - -func (s *Service) GetAzureComputeVMCost(ctx context.Context, instance entity.AzureVM) (float64, error) { - req := schema.Submission{ - ID: "submission-1", - CreatedAt: time.Now(), - Resources: []schema.ResourceDef{}, - } - - valuesMap := map[string]any{} - valuesMap["size"] = instance.InstanceType - valuesMap["sku"] = instance.InstanceType - valuesMap["location"] = instance.Region - valuesMap["instances"] = 1 - - valuesMap["pennywise_usage"] = map[string]any{} - - req.Resources = append(req.Resources, schema.ResourceDef{ - Address: instance.Id, - Type: "azurerm_linux_virtual_machine_scale_set", - Name: "", - RegionCode: instance.Region, - ProviderName: "azurerm", - Values: valuesMap, - }) - - reqBody, err := json.Marshal(req) - if err != nil { - return 0, err - } - - var response cost.State - statusCode, err := httpclient.DoRequest(ctx, "GET", s.pennywiseBaseUrl+"/api/v1/cost/submission", nil, reqBody, &response) - if err != nil { - return 0, err - } - - if statusCode != http.StatusOK { - return 0, fmt.Errorf("failed to get pennywise cost, status code = %d", statusCode) - } - - for _, comp := range response.GetCostComponents() { - fmt.Println(comp.Name, comp.Unit, comp.Rate.InexactFloat64(), comp.Error, comp.MonthlyQuantity.InexactFloat64()) - } - resourceCost, err := response.Cost() - if err != nil { - return 0, err - } - - return resourceCost.Decimal.InexactFloat64(), nil -} diff --git a/services/wastage/cost/ec2_instance.go b/services/wastage/cost/ec2_instance.go deleted file mode 100644 index 1dd944fa7..000000000 --- a/services/wastage/cost/ec2_instance.go +++ /dev/null @@ -1,215 +0,0 @@ -package cost - -import ( - "context" - "encoding/json" - "fmt" - types2 "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types" - "github.com/aws/aws-sdk-go-v2/service/ec2/types" - "github.com/kaytu-io/pennywise/pkg/cost" - "github.com/kaytu-io/pennywise/pkg/schema" - "github.com/opengovern/og-util/pkg/httpclient" - og_client "github.com/opengovern/opengovernance/pkg/steampipe-plugin-opengovernance/opengovernance-client" - "github.com/opengovern/opengovernance/services/wastage/api/entity" - "net/http" - "time" -) - -func (s *Service) GetEC2InstanceCost(ctx context.Context, region string, instance entity.EC2Instance, volumes []entity.EC2Volume, metrics map[string][]types2.Datapoint) (float64, map[string]float64, error) { - req := schema.Submission{ - ID: "submission-1", - CreatedAt: time.Now(), - Resources: []schema.ResourceDef{}, - } - - valuesMap := map[string]any{} - valuesMap["instance_type"] = instance.InstanceType - if instance.Placement != nil { - valuesMap["tenancy"] = instance.Placement.Tenancy - valuesMap["availability_zone"] = instance.Placement.AvailabilityZone - valuesMap["host_id"] = instance.Placement.HashedHostId - } - valuesMap["ebs_optimized"] = instance.EbsOptimized - if instance.Monitoring != nil { - if *instance.Monitoring == "disabled" || *instance.Monitoring == "disabling" { - valuesMap["monitoring"] = false - } else { - valuesMap["monitoring"] = true - } - } - //if instance.CpuOptions != nil { - // valuesMap["credit_specification"] = []map[string]any{{ - // "cpu_credits": *instance.CpuOptions, //TODO - not sure - // }} - //} - var blockDevices []map[string]any - for _, v := range volumes { - vParams := map[string]any{ - "device_name": v.HashedVolumeId, - "volume_type": v.VolumeType, - "volume_size": *v.Size, - } - if v.Iops != nil { - vParams["iops"] = *v.Iops - } - blockDevices = append(blockDevices, vParams) - } - valuesMap["ebs_block_device"] = blockDevices - valuesMap["launch_template"] = []map[string]any{} - if instance.InstanceLifecycle == types.InstanceLifecycleTypeSpot { - valuesMap["spot_price"] = "Spot" - } else { - valuesMap["spot_price"] = "" - } - - os := "Linux" - if instance.Platform != "" { - os = instance.Platform - } - valuesMap["pennywise_usage"] = map[string]any{ - "operating_system": os, - "operation": instance.UsageOperation, - //"reserved_instance_type": "", - //"reserved_instance_term": "", - //"reserved_instance_payment_option": "", - //"monthly_cpu_credit_hrs": "", - //"vcpu_count": "", - "monthly_hrs": "730", - } - - req.Resources = append(req.Resources, schema.ResourceDef{ - Address: instance.HashedInstanceId, - Type: og_client.ResourceTypeConversion("aws::ec2::instance"), - Name: "", - RegionCode: region, - ProviderName: schema.AWSProvider, - Values: valuesMap, - }) - - reqBody, err := json.Marshal(req) - if err != nil { - return 0, nil, err - } - - var response cost.State - statusCode, err := httpclient.DoRequest(ctx, "GET", s.pennywiseBaseUrl+"/api/v1/cost/submission", nil, reqBody, &response) - if err != nil { - return 0, nil, err - } - - if statusCode != http.StatusOK { - return 0, nil, fmt.Errorf("failed to get pennywise cost, status code = %d", statusCode) - } - - componentCost := make(map[string]float64) - for _, component := range response.GetCostComponents() { - if component.Cost().Decimal.InexactFloat64() == 0 { - continue - } - componentCost[component.Name] = component.Cost().Decimal.InexactFloat64() - } - - resourceCost, err := response.Cost() - if err != nil { - return 0, nil, err - } - - return resourceCost.Decimal.InexactFloat64(), componentCost, nil -} - -func (s *Service) GetEBSVolumeCost(ctx context.Context, region string, volume entity.EC2Volume, volumeMetrics map[string][]types2.Datapoint) (float64, map[string]float64, error) { - req := schema.Submission{ - ID: "submission-1", - CreatedAt: time.Now(), - Resources: []schema.ResourceDef{}, - } - - valuesMap := map[string]any{} - valuesMap["availability_zone"] = *volume.AvailabilityZone - valuesMap["type"] = volume.VolumeType - valuesMap["size"] = *volume.Size - valuesMap["iops"] = *volume.Iops - valuesMap["throughput"] = volume.Throughput - - req.Resources = append(req.Resources, schema.ResourceDef{ - Address: volume.HashedVolumeId, - Type: og_client.ResourceTypeConversion("aws::ec2::volume"), - Name: "", - RegionCode: region, - ProviderName: schema.AWSProvider, - Values: valuesMap, - }) - - reqBody, err := json.Marshal(req) - if err != nil { - return 0, nil, err - } - - var response cost.State - statusCode, err := httpclient.DoRequest(ctx, "GET", s.pennywiseBaseUrl+"/api/v1/cost/submission", nil, reqBody, &response) - if err != nil { - return 0, nil, err - } - - if statusCode != http.StatusOK { - return 0, nil, fmt.Errorf("failed to get pennywise cost, status code = %d", statusCode) - } - - componentCost := make(map[string]float64) - for _, component := range response.GetCostComponents() { - if component.Cost().Decimal.InexactFloat64() == 0 { - continue - } - componentCost[component.Name] = component.Cost().Decimal.InexactFloat64() - } - - resourceCost, err := response.Cost() - if err != nil { - return 0, nil, err - } - - return resourceCost.Decimal.InexactFloat64(), componentCost, nil -} - -func (s *Service) EstimateLicensePrice(ctx context.Context, instance entity.EC2Instance) (float64, error) { - originalAZ := instance.Placement.AvailabilityZone - defer func() { - instance.Placement.AvailabilityZone = originalAZ - }() - instance.Placement.AvailabilityZone = "us-east-1a" - withLicense, _, err := s.GetEC2InstanceCost(ctx, "us-east-1", instance, nil, nil) - if err != nil { - return 0, err - } - instance.UsageOperation = mapLicenseToNoLicense[instance.UsageOperation] - withoutLicense, _, err := s.GetEC2InstanceCost(ctx, "us-east-1", instance, nil, nil) - if err != nil { - return 0, err - } - return withLicense - withoutLicense, nil -} - -var mapLicenseToNoLicense = map[string]string{ - // Red Hat - "RunInstances:00g0": "RunInstances:00g0", - "RunInstances:0010": "RunInstances:00g0", - "RunInstances:1010": "RunInstances:00g0", - "RunInstances:1014": "RunInstances:00g0", - "RunInstances:1110": "RunInstances:00g0", - "RunInstances:0014": "RunInstances:00g0", - "RunInstances:0210": "RunInstances:00g0", - "RunInstances:0110": "RunInstances:00g0", - // Windows - "RunInstances:0002": "RunInstances:0800", - "RunInstances:0800": "RunInstances:0800", - "RunInstances:0102": "RunInstances:0800", - "RunInstances:0006": "RunInstances:0800", - "RunInstances:0202": "RunInstances:0800", - // Linux/UNIX - "RunInstances": "RunInstances", - "RunInstances:0004": "RunInstances", - "RunInstances:0200": "RunInstances", - "RunInstances:000g": "RunInstances", - "RunInstances:0g00": "RunInstances", - "RunInstances:0100": "RunInstances", -} diff --git a/services/wastage/cost/gcp_compute.go b/services/wastage/cost/gcp_compute.go deleted file mode 100644 index c52eccea4..000000000 --- a/services/wastage/cost/gcp_compute.go +++ /dev/null @@ -1,128 +0,0 @@ -package cost - -import ( - "context" - "encoding/json" - "fmt" - "github.com/kaytu-io/pennywise/pkg/cost" - "github.com/kaytu-io/pennywise/pkg/schema" - "github.com/opengovern/og-util/pkg/httpclient" - gcp "github.com/opengovern/plugin-gcp/plugin/proto/src/golang/gcp" - "net/http" - "time" -) - -func (s *Service) GetGCPComputeInstanceCost(ctx context.Context, instance gcp.GcpComputeInstance) (float64, float64, error) { - req := schema.Submission{ - ID: "submission-1", - CreatedAt: time.Now(), - Resources: []schema.ResourceDef{}, - } - - valuesMap := map[string]any{} - valuesMap["machine_type"] = instance.MachineType - valuesMap["zone"] = instance.Zone - - purcharseOption := "on_demand" - if instance.Preemptible { - purcharseOption = "preemptible" - } - valuesMap["purchase_option"] = purcharseOption - valuesMap["license"] = instance.InstanceOsLicense - - valuesMap["pennywise_usage"] = map[string]any{} - - req.Resources = append(req.Resources, schema.ResourceDef{ - Address: instance.Id, - Type: "google_compute_instance", - Name: "", - RegionCode: instance.Zone, - ProviderName: "google", - Values: valuesMap, - }) - - reqBody, err := json.Marshal(req) - if err != nil { - return 0, 0, err - } - - var response cost.State - statusCode, err := httpclient.DoRequest(ctx, "GET", s.pennywiseBaseUrl+"/api/v1/cost/submission", nil, reqBody, &response) - if err != nil { - return 0, 0, err - } - - if statusCode != http.StatusOK { - return 0, 0, fmt.Errorf("failed to get pennywise cost, status code = %d", statusCode) - } - - resourceCost, err := response.Cost() - if err != nil { - return 0, 0, err - } - - var licenseCost float64 - - for _, resource := range response.Resources { - for _, comps := range resource.Components { - for _, comp := range comps { - if comp.Name == "License Price" { - licenseCost = comp.MonthlyQuantity.InexactFloat64() * comp.Rate.InexactFloat64() - } - } - } - } - - return resourceCost.Decimal.InexactFloat64(), licenseCost, nil -} - -func (s *Service) GetGCPComputeDiskCost(ctx context.Context, disk gcp.GcpComputeDisk) (float64, error) { - req := schema.Submission{ - ID: "submission-1", - CreatedAt: time.Now(), - Resources: []schema.ResourceDef{}, - } - - valuesMap := map[string]any{} - valuesMap["disk_type"] = disk.DiskType - valuesMap["region"] = disk.Region - if disk.DiskSize != nil { - valuesMap["size"] = disk.DiskSize.Value - } - if disk.ProvisionedIops != nil { - valuesMap["iops"] = disk.ProvisionedIops.Value - } - - valuesMap["pennywise_usage"] = map[string]any{} - - req.Resources = append(req.Resources, schema.ResourceDef{ - Address: disk.Id, - Type: "google_compute_disk", - Name: "", - RegionCode: disk.Region, - ProviderName: "google", - Values: valuesMap, - }) - - reqBody, err := json.Marshal(req) - if err != nil { - return 0, err - } - - var response cost.State - statusCode, err := httpclient.DoRequest(ctx, "GET", s.pennywiseBaseUrl+"/api/v1/cost/submission", nil, reqBody, &response) - if err != nil { - return 0, err - } - - if statusCode != http.StatusOK { - return 0, fmt.Errorf("failed to get pennywise cost, status code = %d", statusCode) - } - - resourceCost, err := response.Cost() - if err != nil { - return 0, err - } - - return resourceCost.Decimal.InexactFloat64(), nil -} diff --git a/services/wastage/cost/svc.go b/services/wastage/cost/svc.go deleted file mode 100644 index 3eb0a5743..000000000 --- a/services/wastage/cost/svc.go +++ /dev/null @@ -1,11 +0,0 @@ -package cost - -type Service struct { - pennywiseBaseUrl string -} - -func New(pennywiseBaseUrl string) *Service { - return &Service{ - pennywiseBaseUrl: pennywiseBaseUrl, - } -} diff --git a/services/wastage/db/connector/service.go b/services/wastage/db/connector/service.go deleted file mode 100644 index 8eff32df8..000000000 --- a/services/wastage/db/connector/service.go +++ /dev/null @@ -1,41 +0,0 @@ -package connector - -import ( - "fmt" - "github.com/opengovern/og-util/pkg/koanf" - "go.uber.org/zap" - "gorm.io/driver/postgres" - "gorm.io/gorm" - "gorm.io/gorm/logger" - "moul.io/zapgorm2" -) - -type Database struct { - db *gorm.DB -} - -func New(config koanf.Postgres, logger *zap.Logger, logLevel logger.LogLevel) (*Database, error) { - gormLogger := zapgorm2.New(logger).LogMode(logLevel) - dsn := fmt.Sprintf("host=%s user=%s password=%s dbname=%s port=%s sslmode=%s", config.Host, config.Username, config.Password, config.DB, config.Port, config.SSLMode) - db, err := gorm.Open(postgres.Open(dsn), &gorm.Config{ - Logger: gormLogger, - }) - if err != nil { - return nil, err - } - - sqlDB, err := db.DB() - if err != nil { - return nil, err - } - sqlDB.SetMaxOpenConns(20) - sqlDB.SetMaxIdleConns(10) - - return &Database{ - db: db, - }, nil -} - -func (s *Database) Conn() *gorm.DB { - return s.db -} diff --git a/services/wastage/db/model/credential.go b/services/wastage/db/model/credential.go deleted file mode 100644 index 03a86c2fd..000000000 --- a/services/wastage/db/model/credential.go +++ /dev/null @@ -1,12 +0,0 @@ -package model - -import ( - "gorm.io/gorm" -) - -type Credential struct { - gorm.Model - - Auth0UserId string - AWSJumpRole string -} diff --git a/services/wastage/db/model/data_age.go b/services/wastage/db/model/data_age.go deleted file mode 100644 index 13fb47884..000000000 --- a/services/wastage/db/model/data_age.go +++ /dev/null @@ -1,8 +0,0 @@ -package model - -import "time" - -type DataAge struct { - DataType string `gorm:"primaryKey"` - UpdatedAt time.Time -} diff --git a/services/wastage/db/model/ebs_volume_types.go b/services/wastage/db/model/ebs_volume_types.go deleted file mode 100644 index 2e3dafbc6..000000000 --- a/services/wastage/db/model/ebs_volume_types.go +++ /dev/null @@ -1,108 +0,0 @@ -package model - -import ( - "github.com/aws/aws-sdk-go-v2/service/ec2/types" - "gorm.io/gorm" - "strconv" - "strings" -) - -type EBSVolumeChargeType string - -const ( - ChargeTypeIOPS EBSVolumeChargeType = "IOPS" - ChargeTypeSize EBSVolumeChargeType = "Size" - ChargeTypeThroughput EBSVolumeChargeType = "Throughput" -) - -const ( - Io2ProvisionedIopsTier1UpperBound = 32000 - Io2ProvisionedIopsTier2UpperBound = 64000 - Gp3BaseThroughput = 125 - Gp3BaseIops = 3000 - Gp2IopsPerGiB = 3 -) - -type EBSVolumeType struct { - gorm.Model - - VolumeType types.VolumeType `gorm:index` - ChargeType EBSVolumeChargeType `gorm:index` - PricePerUnit float64 `gorm:index:price_idx,sort:asc` - PriceGroup string - - MaxIops int32 `gorm:index` - MaxThroughput int32 `gorm:index` - MaxSize int `gorm:index` - - TermType string - RegionCode string `gorm:index` -} - -func (v *EBSVolumeType) PopulateFromMap(columns map[string]int, row []string) { - for col, index := range columns { - switch col { - case "Volume API Name": - switch row[index] { - case "gp2": - v.VolumeType = types.VolumeTypeGp2 - case "gp3": - v.VolumeType = types.VolumeTypeGp3 - case "io1": - v.VolumeType = types.VolumeTypeIo1 - case "io2": - v.VolumeType = types.VolumeTypeIo2 - case "sc1": - v.VolumeType = types.VolumeTypeSc1 - case "st1": - v.VolumeType = types.VolumeTypeSt1 - case "standard": - v.VolumeType = types.VolumeTypeStandard - } - case "PricePerUnit": - v.PricePerUnit, _ = strconv.ParseFloat(row[index], 64) - case "Region Code": - v.RegionCode = row[index] - case "TermType": - v.TermType = row[index] - case "Group": - v.PriceGroup = row[index] - case "Product Family": - switch row[index] { - case "Storage": - v.ChargeType = ChargeTypeSize - case "Provisioned Throughput": - v.ChargeType = ChargeTypeThroughput - case "System Operation": - v.ChargeType = ChargeTypeIOPS - } - case "Max throughput/volume": - sections := strings.Split(row[index], " ") - - for _, numberSection := range sections { - mt, err := strconv.ParseInt(numberSection, 10, 32) - if err == nil { - v.MaxThroughput = max(int32(mt), v.MaxThroughput) - } - } - case "Max IOPS/volume": - sections := strings.Split(row[index], " ") - for _, numberSection := range sections { - mi, err := strconv.ParseInt(numberSection, 10, 32) - if err == nil { - v.MaxIops = max(int32(mi), v.MaxIops) - } - } - case "Max Volume Size": - sections := strings.Split(row[index], " ") - for _, numberSection := range sections { - mv, err := strconv.ParseInt(numberSection, 10, 32) - if err == nil { - v.MaxSize = max(int(mv), v.MaxSize) - } - } - // TiB to GiB - v.MaxSize *= 1024 - } - } -} diff --git a/services/wastage/db/model/ec2_instance_types.go b/services/wastage/db/model/ec2_instance_types.go deleted file mode 100644 index 8c6f73e80..000000000 --- a/services/wastage/db/model/ec2_instance_types.go +++ /dev/null @@ -1,377 +0,0 @@ -package model - -import ( - "gorm.io/gorm" - "strconv" - "strings" -) - -type EC2InstanceType struct { - gorm.Model - - // Basic fields - InstanceType string `gorm:"index"` - VCpu float64 `gorm:"index:compute_idx"` - MemoryGB float64 `gorm:"index:compute_idx"` - NetworkMaxBandwidth float64 `gorm:"index:network_idx"` - NetworkIsDedicated bool `gorm:"index:network_idx"` - PricePerUnit float64 `gorm:"index:price_idx,sort:asc"` - - // Computed fields - OperatingSystemFamily string `gorm:"index"` - PhysicalProcessorArch string `gorm:"index"` - InstanceFamily string `gorm:"index"` - - // Important non-computed fields - ProcessorArchitecture string `gorm:"index"` - Tenancy string `gorm:"index"` - LicenseModel string `gorm:"index"` - RegionCode string `gorm:"index"` - Operation string `gorm:"index"` - - // Extra fields extracted from describe instance types - EbsBaselineBandwidth *int32 `gorm:"index"` - EbsMaximumBandwidth *int32 `gorm:"index"` - EbsBaselineIops *int32 `gorm:"index"` - EbsMaximumIops *int32 `gorm:"index"` - EbsBaselineThroughput *float64 `gorm:"index"` - EbsMaximumThroughput *float64 `gorm:"index"` - - PricePerUnitStr string - NetworkPerformance string - Memory string - VCPUStr string - TermType string - PriceDescription string - EffectiveDate string - StartingRange string - EndingRange string - Unit string - Currency string - RelatedTo string - LeaseContractLength string - PurchaseOption string - OfferingClass string - ProductFamily string - ServiceCode string - Location string - LocationType string - CurrentGeneration string - PhysicalProcessor string - ClockSpeed string - Storage string - StorageMedia string - VolumeType string - MaxVolumeSize string - MaxIOPSVolume string - MaxIOPSBurstPerformance string - MaxThroughputVolume string - Provisioned string - EBSOptimized string - OperatingSystem string - Group string - GroupDescription string - TransferType string - FromLocation string - FromLocationType string - ToLocation string - ToLocationType string - UsageType string - AvailabilityZone string - CapacityStatus string - ClassicNetworkingSupport string - DedicatedEBSThroughput string - ECU string - ElasticGraphicsType string - EnhancedNetworkingSupported string - FromRegionCode string - GPU string - GPUMemory string - Instance string - InstanceCapacity10xlarge string - InstanceCapacity12xlarge string - InstanceCapacity16xlarge string - InstanceCapacity18xlarge string - InstanceCapacity24xlarge string - InstanceCapacity2xlarge string - InstanceCapacity32xlarge string - InstanceCapacity4xlarge string - InstanceCapacity8xlarge string - InstanceCapacity9xlarge string - InstanceCapacityLarge string - InstanceCapacityMedium string - InstanceCapacityMetal string - InstanceCapacityxlarge string - InstanceSKU string - IntelAVX2Available string - IntelAVXAvailable string - IntelTurboAvailable string - MarketOption string - NormalizationSizeFactor string - PhysicalCores string - PreInstalledSW string - ProcessorFeatures string - ProductType string - ResourceType string - ServiceName string - SnapshotArchiveFeeType string - ToRegionCode string - VolumeAPIName string - VPCNetworkingSupport string -} - -func (v *EC2InstanceType) PopulateFromMap(columns map[string]int, row []string) { - for col, index := range columns { - switch col { - case "TermType": - v.TermType = row[index] - case "PriceDescription": - v.PriceDescription = row[index] - case "EffectiveDate": - v.EffectiveDate = row[index] - case "StartingRange": - v.StartingRange = row[index] - case "EndingRange": - v.EndingRange = row[index] - case "Unit": - v.Unit = row[index] - case "PricePerUnit": - v.PricePerUnit, _ = strconv.ParseFloat(row[index], 64) - v.PricePerUnitStr = row[index] - case "Currency": - v.Currency = row[index] - case "RelatedTo": - v.RelatedTo = row[index] - case "LeaseContractLength": - v.LeaseContractLength = row[index] - case "PurchaseOption": - v.PurchaseOption = row[index] - case "OfferingClass": - v.OfferingClass = row[index] - case "Product Family": - v.ProductFamily = row[index] - case "serviceCode": - v.ServiceCode = row[index] - case "Location": - v.Location = row[index] - case "Location Type": - v.LocationType = row[index] - case "Instance Type": - v.InstanceType = row[index] - case "Current Generation": - v.CurrentGeneration = row[index] - case "Instance Family": - v.InstanceFamily = row[index] - case "vCPU": - v.VCpu, _ = strconv.ParseFloat(row[index], 64) - v.VCPUStr = row[index] - case "Physical Processor": - v.PhysicalProcessor = row[index] - ph := strings.ToLower(row[index]) - switch { - case strings.Contains(ph, "graviton"): - v.PhysicalProcessorArch = "arm64" - case strings.Contains(ph, "apple"): - v.PhysicalProcessorArch = "arm64_mac" - default: - v.PhysicalProcessorArch = "x86_64" - } - case "Clock Speed": - v.ClockSpeed = row[index] - case "Memory": - v.MemoryGB = parseMemory(row[index]) - v.Memory = row[index] - case "Storage": - v.Storage = row[index] - case "Network Performance": - v.NetworkPerformance = row[index] - bandwidth, upTo := parseNetworkPerformance(row[index]) - v.NetworkMaxBandwidth = bandwidth - v.NetworkIsDedicated = !upTo - case "Processor Architecture": - v.ProcessorArchitecture = row[index] - case "Storage Media": - v.StorageMedia = row[index] - case "Volume Type": - v.VolumeType = row[index] - case "Max Volume Size": - v.MaxVolumeSize = row[index] - case "Max IOPS/volume": - v.MaxIOPSVolume = row[index] - case "Max IOPS Burst Performance": - v.MaxIOPSBurstPerformance = row[index] - case "Max throughput/volume": - v.MaxThroughputVolume = row[index] - case "Provisioned": - v.Provisioned = row[index] - case "Tenancy": - v.Tenancy = row[index] - case "EBS Optimized": - v.EBSOptimized = row[index] - case "Operating System": - v.OperatingSystem = row[index] - switch v.OperatingSystem { - case "RHEL": - v.OperatingSystemFamily = "Linux/UNIX" - case "Red Hat Enterprise Linux with HA": - v.OperatingSystemFamily = "Linux/UNIX" - case "Ubuntu Pro": - v.OperatingSystemFamily = "Linux/UNIX" - case "SUSE": - v.OperatingSystemFamily = "Linux/UNIX" - case "Linux": - v.OperatingSystemFamily = "Linux/UNIX" - case "Windows": - v.OperatingSystemFamily = "Windows" - default: - v.OperatingSystemFamily = "" - } - case "License Model": - v.LicenseModel = row[index] - case "Group": - v.Group = row[index] - case "Group Description": - v.GroupDescription = row[index] - case "Transfer Type": - v.TransferType = row[index] - case "From Location": - v.FromLocation = row[index] - case "From Location Type": - v.FromLocationType = row[index] - case "To Location": - v.ToLocation = row[index] - case "To Location Type": - v.ToLocationType = row[index] - case "usageType": - v.UsageType = row[index] - case "operation": - v.Operation = row[index] - case "AvailabilityZone": - v.AvailabilityZone = row[index] - case "CapacityStatus": - v.CapacityStatus = row[index] - case "ClassicNetworkingSupport": - v.ClassicNetworkingSupport = row[index] - case "Dedicated EBS Throughput": - v.DedicatedEBSThroughput = row[index] - case "ECU": - v.ECU = row[index] - case "Elastic Graphics Type": - v.ElasticGraphicsType = row[index] - case "Enhanced Networking Supported": - v.EnhancedNetworkingSupported = row[index] - case "From Region Code": - v.FromRegionCode = row[index] - case "GPU": - v.GPU = row[index] - case "GPU Memory": - v.GPUMemory = row[index] - case "Instance": - v.Instance = row[index] - case "Instance Capacity - 10xlarge": - v.InstanceCapacity10xlarge = row[index] - case "Instance Capacity - 12xlarge": - v.InstanceCapacity12xlarge = row[index] - case "Instance Capacity - 16xlarge": - v.InstanceCapacity16xlarge = row[index] - case "Instance Capacity - 18xlarge": - v.InstanceCapacity18xlarge = row[index] - case "Instance Capacity - 24xlarge": - v.InstanceCapacity24xlarge = row[index] - case "Instance Capacity - 2xlarge": - v.InstanceCapacity2xlarge = row[index] - case "Instance Capacity - 32xlarge": - v.InstanceCapacity32xlarge = row[index] - case "Instance Capacity - 4xlarge": - v.InstanceCapacity4xlarge = row[index] - case "Instance Capacity - 8xlarge": - v.InstanceCapacity8xlarge = row[index] - case "Instance Capacity - 9xlarge": - v.InstanceCapacity9xlarge = row[index] - case "Instance Capacity - large": - v.InstanceCapacityLarge = row[index] - case "Instance Capacity - medium": - v.InstanceCapacityMedium = row[index] - case "Instance Capacity - metal": - v.InstanceCapacityMetal = row[index] - case "Instance Capacity - xlarge": - v.InstanceCapacityxlarge = row[index] - case "instanceSKU": - v.InstanceSKU = row[index] - case "Intel AVX2 Available": - v.IntelAVX2Available = row[index] - case "Intel AVX Available": - v.IntelAVXAvailable = row[index] - case "Intel Turbo Available": - v.IntelTurboAvailable = row[index] - case "MarketOption": - v.MarketOption = row[index] - case "Normalization Size Factor": - v.NormalizationSizeFactor = row[index] - case "Physical Cores": - v.PhysicalCores = row[index] - case "Pre Installed S/W": - v.PreInstalledSW = row[index] - case "Processor Features": - v.ProcessorFeatures = row[index] - case "Product Type": - v.ProductType = row[index] - case "Region Code": - v.RegionCode = row[index] - case "Resource Type": - v.ResourceType = row[index] - case "serviceName": - v.ServiceName = row[index] - case "SnapshotArchiveFeeType": - v.SnapshotArchiveFeeType = row[index] - case "To Region Code": - v.ToRegionCode = row[index] - case "Volume API Name": - v.VolumeAPIName = row[index] - case "VPCNetworkingSupport": - v.VPCNetworkingSupport = row[index] - } - } -} - -func parseMemory(str string) float64 { - str = strings.TrimSpace(strings.ToLower(str)) - if str == "na" { - return -1 - } - str = strings.TrimSuffix(str, " gib") - n, _ := strconv.ParseFloat(str, 64) - return n -} - -func parseNetworkPerformance(v string) (float64, bool) { - v = strings.ToLower(v) - switch v { - case "very low": - return 50000000, true - case "low": - return 175000000, true - case "low to moderate": - return 475000000, true - case "moderate": - return 600000000, true - case "high": - return 1500000000, true - } - upTo := strings.HasPrefix(v, "up to ") - v = strings.TrimPrefix(v, "up to ") - - factor := 0.0 - if strings.HasSuffix(v, "gigabit") { - factor = (1024 * 1024 * 1024) / 8 - v = strings.TrimSuffix(v, " gigabit") - } else if strings.HasSuffix(v, "megabit") { - factor = (1024 * 1024) / 8 - v = strings.TrimSuffix(v, " megabit") - } - b, err := strconv.ParseFloat(v, 64) - if err != nil { - return 0, false - } - return b * factor, upTo -} diff --git a/services/wastage/db/model/gcp_compute_machinetype.go b/services/wastage/db/model/gcp_compute_machinetype.go deleted file mode 100644 index d1820df79..000000000 --- a/services/wastage/db/model/gcp_compute_machinetype.go +++ /dev/null @@ -1,40 +0,0 @@ -package model - -import ( - "google.golang.org/api/compute/v1" - "gorm.io/gorm" - "strings" -) - -type GCPComputeMachineType struct { - gorm.Model - - // Basic fields - Name string `gorm:"index"` - MachineType string `gorm:"index"` - MachineFamily string `gorm:"index"` - Zone string `gorm:"index"` - Preemptible bool `gorm:"index"` - - GuestCpus int64 - MemoryMb int64 - ImageSpaceGb int64 - Description string - Region string - - UnitPrice float64 -} - -func (p *GCPComputeMachineType) PopulateFromObject(machineType *compute.MachineType, region string, preemptible bool) { - p.Name = machineType.Name - p.MachineType = machineType.Name - mf := strings.ToLower(strings.Split(machineType.Name, "-")[0]) - p.MachineFamily = mf - p.GuestCpus = machineType.GuestCpus - p.MemoryMb = machineType.MemoryMb - p.ImageSpaceGb = machineType.ImageSpaceGb - p.Description = machineType.Description - p.Zone = machineType.Zone - p.Region = region - p.Preemptible = preemptible -} diff --git a/services/wastage/db/model/gcp_compute_sku.go b/services/wastage/db/model/gcp_compute_sku.go deleted file mode 100644 index 9ab476153..000000000 --- a/services/wastage/db/model/gcp_compute_sku.go +++ /dev/null @@ -1,118 +0,0 @@ -package model - -import ( - "fmt" - "google.golang.org/api/cloudbilling/v1" - "gorm.io/gorm" - "regexp" - "strings" -) - -const ( - ram = "RAM" - cpu = "CPU" - gpu = "GPU" -) - -type GCPComputeSKU struct { - gorm.Model - - // Basic fields - SKU string `gorm:"index"` - ResourceFamily string `gorm:"index"` - ResourceGroup string `gorm:"index"` - ServiceDisplayName string `gorm:"index"` - UsageType string `gorm:"index"` - Location string `gorm:"index"` - Type string `gorm:"index"` - ProvisioningModel string `gorm:"index"` - - Description string - MachineFamily string - - UnitPrice float64 - CurrencyCode string -} - -func (p *GCPComputeSKU) PopulateFromObject(sku *cloudbilling.Sku, region string) { - p.Location = region - p.SKU = sku.SkuId - if sku.Category != nil { - p.ResourceFamily = sku.Category.ResourceFamily - p.ResourceGroup = sku.Category.ResourceGroup - p.ServiceDisplayName = sku.Category.ServiceDisplayName - p.UsageType = sku.Category.UsageType - } - p.Description = sku.Description - p.MachineFamily, p.ResourceGroup, p.Type, p.ProvisioningModel = GetSkuDetails(sku) - pe := sku.PricingInfo[len(sku.PricingInfo)-1].PricingExpression - for i := range pe.TieredRates { - p.UnitPrice = float64(pe.TieredRates[i].UnitPrice.Units) + - (float64(pe.TieredRates[i].UnitPrice.Nanos) / float64(1_000_000_000)) - if p.UnitPrice != 0 { - break - } - } - p.CurrencyCode = pe.TieredRates[0].UnitPrice.CurrencyCode -} - -// GetSkuDetails returns 'Machine Family', 'Resource Group', 'Type', 'ProvisioningModel' -func GetSkuDetails(sku *cloudbilling.Sku) (string, string, string, string) { - if sku.Category == nil { - return "", "", "", "" - } - if sku.Category.ResourceGroup == cpu || sku.Category.ResourceGroup == ram || sku.Category.ResourceGroup == gpu { - mf := strings.ToLower(strings.Split(sku.Description, " ")[0]) - provisioningModel := "standard" - if fmt.Sprintf("%s %s", strings.ToLower(strings.Split(sku.Description, " ")[0]), strings.ToLower(strings.Split(sku.Description, " ")[1])) == "spot preemptible" { - mf = strings.ToLower(strings.Split(sku.Description, " ")[2]) - provisioningModel = "preemptible" - } - if mf == "n4" || mf == "e2" || mf == "n2" || mf == "c3" || mf == "c3d" || mf == "n2d" || - mf == "t2d" || mf == "t2a" || mf == "h3" || mf == "c2" || mf == "c2d" || mf == "m3" || mf == "m2" || - mf == "m1" || mf == "z3" || mf == "a3" || mf == "a3plus" || mf == "a2" || mf == "g2" { - reST := regexp.MustCompile(`^.* Sole Tenancy Instance (Core|Ram) running in .*$`) - if reST.MatchString(sku.Description) { - return mf, sku.Category.ResourceGroup, "", provisioningModel - } - reCustomExt := regexp.MustCompile(`^.* Custom Extended Instance (Core|Ram) running in .*$`) - if reCustomExt.MatchString(sku.Description) { - return mf, sku.Category.ResourceGroup, "Custom Extended", provisioningModel - } - reCustomExt = regexp.MustCompile(`^.* Custom Extended (Core|Ram) running in .*$`) - if reCustomExt.MatchString(sku.Description) { - return mf, sku.Category.ResourceGroup, "Custom Extended", provisioningModel - } - reCustom := regexp.MustCompile(`^.* Custom Instance (Core|Ram) running in .*$`) - if reCustom.MatchString(sku.Description) { - return mf, sku.Category.ResourceGroup, "Custom", provisioningModel - } - re := regexp.MustCompile(`^.* Instance (Core|Ram) running in .*$`) - if re.MatchString(sku.Description) { - return mf, sku.Category.ResourceGroup, "Predefined", provisioningModel - } - return mf, sku.Category.ResourceGroup, "", provisioningModel - } - } - - if sku.Category.ResourceGroup == "N1Standard" { - reCore := regexp.MustCompile(`^N1 Predefined Instance Core running in .*$`) - if reCore.MatchString(sku.Description) { - return "n1", cpu, "Predefined", "standard" - } - reRam := regexp.MustCompile(`^N1 Predefined Instance Ram running in .*$`) - if reRam.MatchString(sku.Description) { - return "n1", ram, "Predefined", "standard" - } - reSpotCore := regexp.MustCompile(`^Spot Preemptible N1 Predefined Instance Core running in .*$`) - if reSpotCore.MatchString(sku.Description) { - return "n1", cpu, "Predefined", "preemptible" - } - reSpotRam := regexp.MustCompile(`^Spot Preemptible N1 Predefined Instance Ram running in .*$`) - if reSpotRam.MatchString(sku.Description) { - return "n1", ram, "Predefined", "preemptible" - } - } - - return "", sku.Category.ResourceGroup, "", "" -} diff --git a/services/wastage/db/model/gcp_compute_storagetype.go b/services/wastage/db/model/gcp_compute_storagetype.go deleted file mode 100644 index fb31af205..000000000 --- a/services/wastage/db/model/gcp_compute_storagetype.go +++ /dev/null @@ -1,43 +0,0 @@ -package model - -import ( - "google.golang.org/api/compute/v1" - "gorm.io/gorm" - "strconv" - "strings" -) - -type GCPComputeDiskType struct { - gorm.Model - - // Basic fields - Name string `gorm:"index"` - StorageType string `gorm:"index"` - Zone string `gorm:"index"` - - MinSizeGb int64 - MaxSizeGb int64 - Region string - - UnitPrice float64 -} - -func (p *GCPComputeDiskType) PopulateFromObject(diskType *compute.DiskType) { - p.Name = diskType.Name - p.StorageType = diskType.Name - - diskZoneURLParts := strings.Split(diskType.Zone, "/") - diskZone := diskZoneURLParts[len(diskZoneURLParts)-1] - region := strings.Join([]string{strings.Split(diskZone, "-")[0], strings.Split(diskZone, "-")[1]}, "-") - p.Region = region - p.Zone = diskZone - - vds := strings.Split(diskType.ValidDiskSize, "-") - minSizeGbStr, _ := strings.CutSuffix(vds[0], "GB") - minSizeGb, _ := strconv.ParseInt(minSizeGbStr, 10, 64) - p.MinSizeGb = minSizeGb - - maxSizeGbStr, _ := strings.CutSuffix(vds[1], "GB") - maxSizeGb, _ := strconv.ParseInt(maxSizeGbStr, 10, 64) - p.MaxSizeGb = maxSizeGb -} diff --git a/services/wastage/db/model/organization.go b/services/wastage/db/model/organization.go deleted file mode 100644 index 81357d421..000000000 --- a/services/wastage/db/model/organization.go +++ /dev/null @@ -1,15 +0,0 @@ -package model - -import ( - "database/sql" - "time" -) - -type Organization struct { - OrganizationId string `gorm:"primaryKey"` - PremiumUntil *time.Time - - CreatedAt time.Time - UpdatedAt time.Time - DeletedAt sql.NullTime `gorm:"index"` -} diff --git a/services/wastage/db/model/preprocess_job.go b/services/wastage/db/model/preprocess_job.go deleted file mode 100644 index 90d7ec1ec..000000000 --- a/services/wastage/db/model/preprocess_job.go +++ /dev/null @@ -1,21 +0,0 @@ -package model - -import ( - "gorm.io/gorm" -) - -type Status string - -const ( - Status_CREATED Status = "CREATED" - Status_IN_PROGRESS Status = "IN_PROGRESS" - Status_SUCCEEDED Status = "SUCCEEDED" - Status_FAILED Status = "FAILED" -) - -type PreProcessJob struct { - gorm.Model - - Auth0UserId string - Status Status -} diff --git a/services/wastage/db/model/rds_db_instance.go b/services/wastage/db/model/rds_db_instance.go deleted file mode 100644 index decbe60cd..000000000 --- a/services/wastage/db/model/rds_db_instance.go +++ /dev/null @@ -1,187 +0,0 @@ -package model - -import ( - "gorm.io/gorm" - "strconv" - "strings" -) - -type RDSDBInstance struct { - gorm.Model - - // Basic fields - - VCpu float64 `gorm:"index"` - MemoryGb float64 `gorm:"index"` - NetworkThroughput *float64 `gorm:"index"` // In bytes/s - DedicatedEBSThroughputBytes *float64 `gorm:"index"` // In bytes/s - DedicatedEBSThroughput string `gorm:"index"` - DatabaseEngine string `gorm:"index;type:citext"` - DatabaseEdition string `gorm:"index;type:citext"` - DeploymentOption string `gorm:"index"` - ProductFamily string `gorm:"index"` - InstanceType string `gorm:"index;type:citext"` - - PricePerUnit float64 `gorm:"index:price_idx,sort:asc"` - - SKU string - OfferTermCode string - RateCode string - TermType string - PriceDescription string - EffectiveDate string - StartingRange string - EndingRange string - Unit string - PricePerUnitStr string - Currency string - serviceCode string - Location string - LocationType string - CurrentGeneration string - InstanceFamily string - PhysicalProcessor string - ClockSpeed string - Memory string - Storage string - NetworkPerformance string - ProcessorArchitecture string - EngineCode string - LicenseModel string - UsageType string - Operation string - DeploymentModel string - EngineMediaType string - EnhancedNetworkingSupported string - InstanceTypeFamily string - NormalizationSizeFactor string - PricingUnit string - ProcessorFeatures string - RegionCode string - ServiceName string -} - -func (p *RDSDBInstance) PopulateFromMap(columns map[string]int, row []string) { - for col, index := range columns { - switch col { - case "SKU": - p.SKU = row[index] - case "OfferTermCode": - p.OfferTermCode = row[index] - case "RateCode": - p.RateCode = row[index] - case "TermType": - p.TermType = row[index] - case "PriceDescription": - p.PriceDescription = row[index] - case "EffectiveDate": - p.EffectiveDate = row[index] - case "StartingRange": - p.StartingRange = row[index] - case "EndingRange": - p.EndingRange = row[index] - case "Unit": - p.Unit = row[index] - case "PricePerUnit": - p.PricePerUnit, _ = strconv.ParseFloat(row[index], 64) - p.PricePerUnitStr = row[index] - case "Currency": - p.Currency = row[index] - case "Product Family": - p.ProductFamily = row[index] - case "serviceCode": - p.serviceCode = row[index] - case "Location": - p.Location = row[index] - case "Location Type": - p.LocationType = row[index] - case "Instance Type": - p.InstanceType = row[index] - case "Current Generation": - p.CurrentGeneration = row[index] - case "Instance Family": - p.InstanceFamily = row[index] - case "vCPU": - i, err := strconv.ParseFloat(row[index], 64) - if err == nil { - p.VCpu = i - } - case "Physical Processor": - p.PhysicalProcessor = row[index] - case "Clock Speed": - p.ClockSpeed = row[index] - case "Memory": - p.Memory = row[index] - for _, part := range strings.Split(row[index], " ") { - i, err := strconv.ParseFloat(part, 64) - if err == nil { - p.MemoryGb = max(p.MemoryGb, i) - } - } - case "Storage": - p.Storage = row[index] - case "Network Performance": - p.NetworkPerformance = row[index] - for _, part := range strings.Split(row[index], " ") { - i, err := strconv.ParseFloat(part, 64) - // convert from Gbps to bytes/s - i = i * (1024 * 1024 * 1024) / 8 - if err == nil { - if p.NetworkThroughput == nil { - p.NetworkThroughput = &i - } else { - *p.NetworkThroughput = max(*p.NetworkThroughput, i) - } - } - } - case "Processor Architecture": - p.ProcessorArchitecture = row[index] - case "Engine Code": - p.EngineCode = row[index] - case "Database Engine": - p.DatabaseEngine = row[index] - case "Database Edition": - p.DatabaseEdition = row[index] - case "License Model": - p.LicenseModel = row[index] - case "Deployment Option": - p.DeploymentOption = row[index] - case "usageType": - p.UsageType = row[index] - case "operation": - p.Operation = row[index] - case "Dedicated EBS Throughput": - p.DedicatedEBSThroughput = row[index] - for _, part := range strings.Split(row[index], " ") { - i, err := strconv.ParseFloat(part, 64) - // convert from Mbps to bytes/s - i = i * (1024 * 1024) / 8 - if err == nil { - if p.DedicatedEBSThroughputBytes == nil { - p.DedicatedEBSThroughputBytes = &i - } else { - *p.DedicatedEBSThroughputBytes = max(*p.DedicatedEBSThroughputBytes, i) - } - } - } - case "Deployment Model": - p.DeploymentModel = row[index] - case "Engine Media Type": - p.EngineMediaType = row[index] - case "Enhanced Networking Supported": - p.EnhancedNetworkingSupported = row[index] - case "Instance Type Family": - p.InstanceTypeFamily = row[index] - case "Normalization Size Factor": - p.NormalizationSizeFactor = row[index] - case "Pricing Unit": - p.PricingUnit = row[index] - case "Processor Features": - p.ProcessorFeatures = row[index] - case "Region Code": - p.RegionCode = row[index] - case "serviceName": - p.ServiceName = row[index] - } - } -} diff --git a/services/wastage/db/model/rds_db_storage.go b/services/wastage/db/model/rds_db_storage.go deleted file mode 100644 index b58b750c7..000000000 --- a/services/wastage/db/model/rds_db_storage.go +++ /dev/null @@ -1,299 +0,0 @@ -package model - -import ( - "gorm.io/gorm" - "strconv" - "strings" -) - -const ( - RDSDBStorageTier1Gp3BaseThroughput = 125.0 - RDSDBStorageTier1Gp3BaseIops = 3000 - RDSDBStorageTier2Gp3BaseThroughput = 500.0 - RDSDBStorageTier2Gp3BaseIops = 12000 - RDSDBStorageTier1Gp3SizeThreshold = 400 - RDSDBStorageTier1Gp3SizeThresholdForOracleEngine = 200 - RDSDBStorageIopsThresholdForThroughputScalingForMySqlEngine = 32000 - RDSDBStorageThroughputScalingOnIopsFactorForMySqlEngine = 5 -) - -type RDSDBStorage struct { - gorm.Model - - // Basic fields - RegionCode string `gorm:"index;type:citext"` - DatabaseEngine string `gorm:"index;type:citext"` - DatabaseEdition string `gorm:"index;type:citext"` - PricePerUnit float64 `gorm:"index:price_idx,sort:asc"` - MinVolumeSizeGb int32 `gorm:"index"` - MaxVolumeSizeGb int32 `gorm:"index"` - MaxThroughputMB float64 `gorm:"index"` - MaxIops int32 `gorm:"index"` - VolumeType string `gorm:"index"` - DeploymentOption string `gorm:"index"` - Group string `gorm:"index"` - - SKU string - OfferTermCode string - RateCode string - TermType string - PriceDescription string - EffectiveDate string - StartingRange string - EndingRange string - Unit string - PricePerUnitStr string - Currency string - ProductFamily string - serviceCode string - Location string - LocationType string - StorageMedia string - MinVolumeSize string - MaxVolumeSize string - EngineCode string - LicenseModel string - GroupDescription string - UsageType string - Operation string - DeploymentModel string - LimitlessPreview string - ServiceName string - VolumeName string -} - -type RDSDBStorageVolumeType string - -const ( - RDSDBStorageVolumeTypeGP2 RDSDBStorageVolumeType = "General Purpose" - RDSDBStorageVolumeTypeGP3 RDSDBStorageVolumeType = "General Purpose-GP3" - RDSDBStorageVolumeTypeIO1 RDSDBStorageVolumeType = "Provisioned IOPS" - RDSDBStorageVolumeTypeIO2 RDSDBStorageVolumeType = "Provisioned IOPS-IO2" - RDSDBStorageVolumeTypeMagnetic RDSDBStorageVolumeType = "Magnetic" - RDSDBStorageVolumeTypeGeneralPurposeAurora RDSDBStorageVolumeType = "General Purpose-Aurora" - RDSDBStorageVolumeTypeIOOptimizedAurora RDSDBStorageVolumeType = "IO Optimized-Aurora" -) - -var RDSDBStorageVolumeTypeToEBSType = map[string]string{ - string(RDSDBStorageVolumeTypeGP2): "gp2", - string(RDSDBStorageVolumeTypeGP3): "gp3", - string(RDSDBStorageVolumeTypeIO1): "io1", - string(RDSDBStorageVolumeTypeIO2): "io2", - string(RDSDBStorageVolumeTypeMagnetic): "standard", - string(RDSDBStorageVolumeTypeGeneralPurposeAurora): "aurora", - string(RDSDBStorageVolumeTypeIOOptimizedAurora): "aurora-iopt1", -} - -var RDSDBStorageEBSTypeToVolumeType = map[string]RDSDBStorageVolumeType{ - "gp2": RDSDBStorageVolumeTypeGP2, - "gp3": RDSDBStorageVolumeTypeGP3, - "io1": RDSDBStorageVolumeTypeIO1, - "io2": RDSDBStorageVolumeTypeIO2, - "standard": RDSDBStorageVolumeTypeMagnetic, - "aurora": RDSDBStorageVolumeTypeGeneralPurposeAurora, - "aurora-iopt1": RDSDBStorageVolumeTypeIOOptimizedAurora, -} - -func (p *RDSDBStorage) PopulateFromMap(columns map[string]int, row []string) { - for col, index := range columns { - switch col { - case "SKU": - p.SKU = row[index] - case "OfferTermCode": - p.OfferTermCode = row[index] - case "RateCode": - p.RateCode = row[index] - case "TermType": - p.TermType = row[index] - case "PriceDescription": - p.PriceDescription = row[index] - case "EffectiveDate": - p.EffectiveDate = row[index] - case "StartingRange": - p.StartingRange = row[index] - case "EndingRange": - p.EndingRange = row[index] - case "Unit": - p.Unit = row[index] - case "PricePerUnit": - p.PricePerUnit, _ = strconv.ParseFloat(row[index], 64) - p.PricePerUnitStr = row[index] - case "Currency": - p.Currency = row[index] - case "Product Family": - p.ProductFamily = row[index] - case "serviceCode": - p.serviceCode = row[index] - case "Location": - p.Location = row[index] - case "Location Type": - p.LocationType = row[index] - case "Storage Media": - p.StorageMedia = row[index] - case "Volume Type": - p.VolumeType = row[index] - case "Min Volume Size": - var val int32 - var unit string - for _, c := range strings.Split(strings.ToLower(row[index]), " ") { - v, err := strconv.ParseInt(c, 10, 32) - if err == nil { - val = max(val, int32(v)) - } - if strings.Contains(c, "gb") || strings.Contains(c, "tb") { - unit = strings.ToLower(c) - } - } - switch { - case strings.Contains(unit, "gb"): - p.MinVolumeSizeGb = val - case strings.Contains(unit, "tb"): - p.MinVolumeSizeGb = val * 1024 - default: - p.MinVolumeSizeGb = val - } - p.MinVolumeSize = row[index] - case "Max Volume Size": - var val int32 - var unit string - for _, c := range strings.Split(strings.ToLower(row[index]), " ") { - v, err := strconv.ParseInt(c, 10, 32) - if err == nil { - val = max(val, int32(v)) - } - if strings.Contains(c, "gb") || strings.Contains(c, "tb") { - unit = strings.ToLower(c) - } - } - switch { - case strings.Contains(unit, "gb"): - p.MaxVolumeSizeGb = val - case strings.Contains(unit, "tb"): - p.MaxVolumeSizeGb = val * 1024 - default: - p.MaxVolumeSizeGb = val - } - p.MaxVolumeSize = row[index] - case "Engine Code": - p.EngineCode = row[index] - case "Database Engine": - p.DatabaseEngine = row[index] - case "Database Edition": - p.DatabaseEdition = row[index] - case "License Model": - p.LicenseModel = row[index] - case "Deployment Option": - p.DeploymentOption = row[index] - case "Group": - p.Group = row[index] - case "Group Description": - p.GroupDescription = row[index] - case "usageType": - p.UsageType = row[index] - case "operation": - p.Operation = row[index] - case "Deployment Model": - p.DeploymentModel = row[index] - case "LimitlessPreview": - p.LimitlessPreview = row[index] - case "Region Code": - p.RegionCode = row[index] - case "serviceName": - p.ServiceName = row[index] - case "Volume Name": - p.VolumeName = row[index] - } - } - - // Computed fields - if p.ProductFamily == "Database Storage" { - engine := strings.ToLower(p.DatabaseEngine) - volType := p.VolumeType - // Using https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html to fill in the iops/throughput values - switch { - case volType == string(RDSDBStorageVolumeTypeGP2) && !strings.Contains(engine, "aurora"): // GP2 non-aurora - switch { - case strings.Contains(engine, "mariadb"), strings.Contains(engine, "mysql"), - strings.Contains(engine, "postgres"), strings.Contains(engine, "any"): - p.MaxThroughputMB = 1000 - p.MaxIops = 64000 - case strings.Contains(engine, "oracle"): - p.MaxThroughputMB = 1000 - p.MaxIops = 64000 - case strings.Contains(engine, "sql server"): - p.MaxThroughputMB = 250 - p.MaxIops = 16000 - } - case volType == string(RDSDBStorageVolumeTypeGP3) && !strings.Contains(engine, "aurora"): // GP3 non-aurora - switch { - case strings.Contains(engine, "db2"), strings.Contains(engine, "mariadb"), - strings.Contains(engine, "mysql"), strings.Contains(engine, "postgres"), strings.Contains(engine, "any"): - p.MaxThroughputMB = 4000 - p.MaxIops = 64000 - case strings.Contains(engine, "oracle"): - p.MaxThroughputMB = 4000 - p.MaxIops = 64000 - case strings.Contains(engine, "sql server"): - p.MaxThroughputMB = 1000 - p.MaxIops = 16000 - } - case volType == string(RDSDBStorageVolumeTypeIO1) && !strings.Contains(engine, "aurora"): // IO1 non-aurora - switch { - case strings.Contains(engine, "db2"), strings.Contains(engine, "mariadb"), - strings.Contains(engine, "mysql"), strings.Contains(engine, "postgres"), strings.Contains(engine, "any"): - p.MaxThroughputMB = 4000 - p.MaxIops = 256000 - case strings.Contains(engine, "oracle"): - p.MaxThroughputMB = 4000 - p.MaxIops = 256000 - case strings.Contains(engine, "sql server"): - p.MaxThroughputMB = 1000 - p.MaxIops = 64000 - } - case volType == string(RDSDBStorageVolumeTypeIO2) && !strings.Contains(engine, "aurora"): // IO2 non-aurora - switch { - case strings.Contains(engine, "db2"), strings.Contains(engine, "mariadb"), - strings.Contains(engine, "mysql"), strings.Contains(engine, "postgres"), strings.Contains(engine, "any"): - p.MaxThroughputMB = 4000 - p.MaxIops = 256000 - case strings.Contains(engine, "oracle"): - p.MaxThroughputMB = 4000 - p.MaxIops = 256000 - case strings.Contains(engine, "sql server"): - p.MaxThroughputMB = 4000 - p.MaxIops = 64000 - } - case volType == string(RDSDBStorageVolumeTypeMagnetic) && !strings.Contains(engine, "aurora"): // Magnetic non-aurora - p.MaxIops = 1000 - // This is an estimate, as the docs don't specify and leaving as 0 would make it unsuggestable (which you can make a case for) - p.MaxThroughputMB = 100 - // aurora cases are not in the docs, so these are populated based on the general purpose and io optimized values - // it shouldn't be too far off or matter too much as aurora is a managed service, and you can't change the storage type except between general purpose and io optimized - // and for those we will use the cost and only cost to determine the cheapest option since other things are managed - case volType == string(RDSDBStorageVolumeTypeGeneralPurposeAurora) && strings.Contains(engine, "aurora"): // General Purpose Aurora - p.MaxThroughputMB = 4000 - p.MaxIops = 64000 - case volType == string(RDSDBStorageVolumeTypeIOOptimizedAurora) && strings.Contains(engine, "aurora"): // IO Optimized Aurora - p.MaxThroughputMB = 4000 - p.MaxIops = 256000 - } - } -} - -func (p *RDSDBStorage) DoIngest() bool { - if p.TermType != "OnDemand" || - p.LocationType == "AWS Outposts" || - p.VolumeType == "General Purpose (SSD)" || - p.VolumeType == "Provisioned IOPS (SSD)" { - return false - } - //if (p.ProductFamily == "Database Storage" && p.VolumeType == "General Purpose-GP3" && p.MinVolumeSize == "") || - // (p.ProductFamily == "Database Storage" && p.VolumeType == "Provisioned IOPS-IO2" && p.MinVolumeSize == "") { - // return false - //} - if p.LimitlessPreview == "Yes" { - return false - } - - return true -} diff --git a/services/wastage/db/model/rds_products.go b/services/wastage/db/model/rds_products.go deleted file mode 100644 index 3d26991a3..000000000 --- a/services/wastage/db/model/rds_products.go +++ /dev/null @@ -1,151 +0,0 @@ -package model - -import ( - "gorm.io/gorm" - "strconv" -) - -type RDSProduct struct { - gorm.Model - - DatabaseEngine string `gorm:"index;type:citext"` - DatabaseEdition string `gorm:"index;type:citext"` - PricePerUnit float64 `gorm:"index:price_idx,sort:asc"` - - // Basic fields - SKU string - OfferTermCode string - RateCode string - TermType string - PriceDescription string - EffectiveDate string - StartingRange string - EndingRange string - Unit string - PricePerUnitStr string - Currency string - RelatedTo string - ProductFamily string - serviceCode string - Location string - LocationType string - InstanceFamily string - vCPU string - Storage string - StorageMedia string - VolumeType string - MinVolumeSize string - MaxVolumeSize string - EngineCode string - LicenseModel string - DeploymentOption string - Group string - GroupDescription string - UsageType string - Operation string - ACU string - DeploymentModel string - EngineMajorVersion string - EngineMediaType string - ExtendedSupportPricingYear string - InstanceTypeFamily string - LimitlessPreview string - NormalizationSizeFactor string - RegionCode string - ServiceName string - VolumeName string -} - -func (p *RDSProduct) PopulateFromMap(columns map[string]int, row []string) { - for col, index := range columns { - switch col { - case "SKU": - p.SKU = row[index] - case "OfferTermCode": - p.OfferTermCode = row[index] - case "RateCode": - p.RateCode = row[index] - case "TermType": - p.TermType = row[index] - case "PriceDescription": - p.PriceDescription = row[index] - case "EffectiveDate": - p.EffectiveDate = row[index] - case "StartingRange": - p.StartingRange = row[index] - case "EndingRange": - p.EndingRange = row[index] - case "Unit": - p.Unit = row[index] - case "PricePerUnit": - p.PricePerUnit, _ = strconv.ParseFloat(row[index], 64) - p.PricePerUnitStr = row[index] - case "Currency": - p.Currency = row[index] - case "RelatedTo": - p.RelatedTo = row[index] - case "Product Family": - p.ProductFamily = row[index] - case "serviceCode": - p.serviceCode = row[index] - case "Location": - p.Location = row[index] - case "Location Type": - p.LocationType = row[index] - case "Instance Family": - p.InstanceFamily = row[index] - case "vCPU": - p.vCPU = row[index] - case "Storage": - p.Storage = row[index] - case "Storage Media": - p.StorageMedia = row[index] - case "Volume Type": - p.VolumeType = row[index] - case "Min Volume Size": - p.MinVolumeSize = row[index] - case "Max Volume Size": - p.MaxVolumeSize = row[index] - case "Engine Code": - p.EngineCode = row[index] - case "Database Engine": - p.DatabaseEngine = row[index] - case "Database Edition": - p.DatabaseEdition = row[index] - case "License Model": - p.LicenseModel = row[index] - case "Deployment Option": - p.DeploymentOption = row[index] - case "Group": - p.Group = row[index] - case "Group Description": - p.GroupDescription = row[index] - case "usageType": - p.UsageType = row[index] - case "operation": - p.Operation = row[index] - case "ACU": - p.ACU = row[index] - case "Deployment Model": - p.DeploymentModel = row[index] - case "Engine Major Version": - p.EngineMajorVersion = row[index] - case "Engine Media Type": - p.EngineMediaType = row[index] - case "Extended Support Pricing Year": - p.ExtendedSupportPricingYear = row[index] - case "Instance Type Family": - p.InstanceTypeFamily = row[index] - case "LimitlessPreview": - p.LimitlessPreview = row[index] - case "Normalization Size Factor": - p.NormalizationSizeFactor = row[index] - case "Region Code": - p.RegionCode = row[index] - case "serviceName": - p.ServiceName = row[index] - case "Volume Name": - p.VolumeName = row[index] - } - } -} diff --git a/services/wastage/db/model/uasge_v2.go b/services/wastage/db/model/uasge_v2.go deleted file mode 100644 index b591665ab..000000000 --- a/services/wastage/db/model/uasge_v2.go +++ /dev/null @@ -1,61 +0,0 @@ -package model - -import ( - "gorm.io/datatypes" - "gorm.io/gorm" - "time" -) - -type UsageV2 struct { - //We don't use gorm.Model since we need the indices on CreatedAt and UpdatedAt - ID uint `gorm:"primarykey"` - CreatedAt time.Time `gorm:"index"` - UpdatedAt time.Time `gorm:"index"` - DeletedAt gorm.DeletedAt `gorm:"index"` - - RequestId *string - ResponseId *string - ApiEndpoint string `gorm:"index"` - Request datatypes.JSON - Response datatypes.JSON - FailureMessage *string - Latency *float64 //Seconds - CliVersion *string - Statistics datatypes.JSON -} - -type Statistics struct { - AccountID string `json:"accountID"` - OrgEmail string `json:"orgEmail"` - ResourceID string `json:"resourceID"` - Auth0UserId string `json:"auth0UserId"` - - CurrentCost float64 `json:"currentCost"` - RecommendedCost float64 `json:"recommendedCost"` - Savings float64 `json:"savings"` - - EC2InstanceCurrentCost float64 `json:"ec2InstanceCurrentCost"` - EC2InstanceRecommendedCost float64 `json:"ec2InstanceRecommendedCost"` - EC2InstanceSavings float64 `json:"ec2InstanceSavings"` - - EBSCurrentCost float64 `json:"ebsCurrentCost"` - EBSRecommendedCost float64 `json:"ebsRecommendedCost"` - EBSSavings float64 `json:"ebsSavings"` - EBSVolumeCount int `json:"ebsVolumeCount"` - - RDSInstanceCurrentCost float64 `json:"rdsInstanceCurrentCost"` - RDSInstanceRecommendedCost float64 `json:"rdsInstanceRecommendedCost"` - RDSInstanceSavings float64 `json:"rdsInstanceSavings"` - - KubernetesCurrentCPURequest float64 `json:"kubernetesCurrentCPU"` - KubernetesRecommendedCPURequest float64 `json:"kubernetesRecommendedCPU"` - KubernetesCPURequestSavings float64 `json:"kubernetesCPUSavings"` - - KubernetesCurrentMemoryRequest float64 `json:"kubernetesCurrentMemoryRequest"` - KubernetesRecommendedMemoryRequest float64 `json:"kubernetesRecommendedMemoryRequest"` - KubernetesMemoryRequestSavings float64 `json:"KubernetesMemoryRequestSavings"` - - GCPComputeInstanceCurrentCost float64 `json:"gcpComputeInstanceCurrentCost"` - GCPComputeInstanceRecommendedCost float64 `json:"gcpComputeInstanceRecommendedCost"` - GCPComputeInstanceSavings float64 `json:"gcpComputeInstanceSavings"` -} diff --git a/services/wastage/db/model/usage.go b/services/wastage/db/model/usage.go deleted file mode 100644 index bfb883560..000000000 --- a/services/wastage/db/model/usage.go +++ /dev/null @@ -1,16 +0,0 @@ -package model - -import ( - "gorm.io/datatypes" - "gorm.io/gorm" -) - -type Usage struct { - gorm.Model - - Endpoint string - Request datatypes.JSON - Response datatypes.JSON - ResponseTime *float64 //Seconds - Moved bool -} diff --git a/services/wastage/db/model/user.go b/services/wastage/db/model/user.go deleted file mode 100644 index 846038bcd..000000000 --- a/services/wastage/db/model/user.go +++ /dev/null @@ -1,15 +0,0 @@ -package model - -import ( - "database/sql" - "time" -) - -type User struct { - UserId string `gorm:"primaryKey"` - PremiumUntil *time.Time - - CreatedAt time.Time - UpdatedAt time.Time - DeletedAt sql.NullTime `gorm:"index"` -} diff --git a/services/wastage/db/repo/credential.go b/services/wastage/db/repo/credential.go deleted file mode 100644 index f036281b4..000000000 --- a/services/wastage/db/repo/credential.go +++ /dev/null @@ -1,63 +0,0 @@ -package repo - -import ( - "errors" - "github.com/opengovern/opengovernance/services/wastage/db/connector" - "github.com/opengovern/opengovernance/services/wastage/db/model" - "gorm.io/gorm" -) - -type CredentialRepo interface { - Create(m *model.Credential) error - Get(id uint) (*model.Credential, error) - Update(id uint, m model.Credential) error - Delete(id uint) error - List() ([]model.Credential, error) -} - -type CredentialRepoImpl struct { - db *connector.Database -} - -func NewCredentialRepo(db *connector.Database) CredentialRepo { - return &CredentialRepoImpl{ - db: db, - } -} - -func (r *CredentialRepoImpl) Create(m *model.Credential) error { - return r.db.Conn().Create(&m).Error -} - -func (r *CredentialRepoImpl) Get(id uint) (*model.Credential, error) { - var m model.Credential - tx := r.db.Conn().Model(&model.Credential{}).Where("id=?", id).First(&m) - if tx.Error != nil { - if errors.Is(tx.Error, gorm.ErrRecordNotFound) { - return nil, nil - } - return nil, tx.Error - } - return &m, nil -} - -func (r *CredentialRepoImpl) Update(id uint, m model.Credential) error { - return r.db.Conn().Model(&model.Credential{}).Where("id=?", id).Updates(&m).Error -} - -func (r *CredentialRepoImpl) Delete(id uint) error { - return r.db.Conn().Unscoped().Delete(&model.Credential{ - Model: gorm.Model{ - ID: id, - }, - }).Error -} - -func (r *CredentialRepoImpl) List() ([]model.Credential, error) { - var ms []model.Credential - tx := r.db.Conn().Model(&model.Credential{}).Find(&ms) - if tx.Error != nil { - return nil, tx.Error - } - return ms, nil -} diff --git a/services/wastage/db/repo/data_age.go b/services/wastage/db/repo/data_age.go deleted file mode 100644 index c29e64f53..000000000 --- a/services/wastage/db/repo/data_age.go +++ /dev/null @@ -1,59 +0,0 @@ -package repo - -import ( - "errors" - "github.com/opengovern/opengovernance/services/wastage/db/connector" - "github.com/opengovern/opengovernance/services/wastage/db/model" - "gorm.io/gorm" -) - -type DataAgeRepo interface { - Create(m *model.DataAge) error - Get(dataType string) (*model.DataAge, error) - Update(dataType string, m model.DataAge) error - Delete(dataType string) error - List() ([]model.DataAge, error) -} - -type DataAgeRepoImpl struct { - db *connector.Database -} - -func NewDataAgeRepo(db *connector.Database) DataAgeRepo { - return &DataAgeRepoImpl{ - db: db, - } -} - -func (r *DataAgeRepoImpl) Create(m *model.DataAge) error { - return r.db.Conn().Create(&m).Error -} - -func (r *DataAgeRepoImpl) Get(dataType string) (*model.DataAge, error) { - var m model.DataAge - tx := r.db.Conn().Model(&model.DataAge{}).Where("data_type=?", dataType).First(&m) - if tx.Error != nil { - if errors.Is(tx.Error, gorm.ErrRecordNotFound) { - return nil, nil - } - return nil, tx.Error - } - return &m, nil -} - -func (r *DataAgeRepoImpl) Update(dataType string, m model.DataAge) error { - return r.db.Conn().Model(&model.DataAge{}).Where("data_type=?", dataType).Updates(&m).Error -} - -func (r *DataAgeRepoImpl) Delete(dataType string) error { - return r.db.Conn().Unscoped().Delete(&model.DataAge{DataType: dataType}).Error -} - -func (r *DataAgeRepoImpl) List() ([]model.DataAge, error) { - var ms []model.DataAge - tx := r.db.Conn().Model(&model.DataAge{}).Find(&ms) - if tx.Error != nil { - return nil, tx.Error - } - return ms, nil -} diff --git a/services/wastage/db/repo/ebs_volume_types.go b/services/wastage/db/repo/ebs_volume_types.go deleted file mode 100644 index 742637969..000000000 --- a/services/wastage/db/repo/ebs_volume_types.go +++ /dev/null @@ -1,478 +0,0 @@ -package repo - -import ( - "context" - "errors" - "fmt" - "github.com/aws/aws-sdk-go-v2/service/ec2/types" - "github.com/opengovern/opengovernance/services/wastage/db/connector" - "github.com/opengovern/opengovernance/services/wastage/db/model" - "github.com/sony/sonyflake" - "gorm.io/gorm" - "math" - "time" -) - -type EBSVolumeTypeRepo interface { - Create(tableName string, tx *gorm.DB, m *model.EBSVolumeType) error - Get(id uint) (*model.EBSVolumeType, error) - Update(tableName string, id uint, m model.EBSVolumeType) error - Delete(tableName string, id uint) error - List() ([]model.EBSVolumeType, error) - Truncate(tx *gorm.DB) error - GetCheapestTypeWithSpecs(ctx context.Context, region string, volumeSize int32, iops int32, throughput float64, validTypes []types.VolumeType) (types.VolumeType, int32, int32, float64, string, error) - MoveViewTransaction(tableName string) error - RemoveOldTables(tableName string) error - CreateNewTable() (string, error) -} - -type EBSVolumeTypeRepoImpl struct { - db *connector.Database - - viewName string -} - -func NewEBSVolumeTypeRepo(db *connector.Database) EBSVolumeTypeRepo { - stmt := &gorm.Statement{DB: db.Conn()} - stmt.Parse(&model.EBSVolumeType{}) - - return &EBSVolumeTypeRepoImpl{ - db: db, - - viewName: stmt.Schema.Table, - } -} - -func (r *EBSVolumeTypeRepoImpl) Create(tableName string, tx *gorm.DB, m *model.EBSVolumeType) error { - if tx == nil { - tx = r.db.Conn() - } - tx = tx.Table(tableName) - return tx.Create(&m).Error -} - -func (r *EBSVolumeTypeRepoImpl) Get(id uint) (*model.EBSVolumeType, error) { - var m model.EBSVolumeType - tx := r.db.Conn().Table(r.viewName).Where("id=?", id).First(&m) - if tx.Error != nil { - if errors.Is(tx.Error, gorm.ErrRecordNotFound) { - return nil, nil - } - return nil, tx.Error - } - return &m, nil -} - -func (r *EBSVolumeTypeRepoImpl) Update(tableName string, id uint, m model.EBSVolumeType) error { - return r.db.Conn().Table(tableName).Where("id=?", id).Updates(&m).Error -} - -func (r *EBSVolumeTypeRepoImpl) Delete(tableName string, id uint) error { - return r.db.Conn().Unscoped().Table(tableName).Delete(&model.EBSVolumeType{}, id).Error -} - -func (r *EBSVolumeTypeRepoImpl) List() ([]model.EBSVolumeType, error) { - var ms []model.EBSVolumeType - tx := r.db.Conn().Table(r.viewName).Find(&ms) - if tx.Error != nil { - return nil, tx.Error - } - return ms, nil -} - -func (r *EBSVolumeTypeRepoImpl) Truncate(tx *gorm.DB) error { - if tx == nil { - tx = r.db.Conn() - } - tx = tx.Unscoped().Where("1 = 1").Delete(&model.EBSVolumeType{}) - if tx.Error != nil { - return tx.Error - } - return nil -} - -func (r *EBSVolumeTypeRepoImpl) getDimensionCostsByRegionVolumeTypeAndChargeType(ctx context.Context, regionCode string, volumeType types.VolumeType, chargeType model.EBSVolumeChargeType) ([]model.EBSVolumeType, error) { - var m []model.EBSVolumeType - tx := r.db.Conn().Table(r.viewName). - Where("region_code = ?", regionCode). - Where("volume_type = ?", volumeType). - Where("charge_type = ?", chargeType). - Find(&m) - if tx.Error != nil { - if errors.Is(tx.Error, gorm.ErrRecordNotFound) { - return nil, nil - } - return nil, tx.Error - } - return m, nil -} - -func (r *EBSVolumeTypeRepoImpl) getIo1TotalPrice(ctx context.Context, region string, volumeSize int32, iops int32) (float64, string, error) { - io1IopsPrices, err := r.getDimensionCostsByRegionVolumeTypeAndChargeType(ctx, region, types.VolumeTypeIo1, model.ChargeTypeIOPS) - if err != nil { - return 0, "", err - } - io1Iops := 0.0 - for _, iops := range io1IopsPrices { - io1Iops = iops.PricePerUnit - break - } - io1SizePrices, err := r.getDimensionCostsByRegionVolumeTypeAndChargeType(ctx, region, types.VolumeTypeIo1, model.ChargeTypeSize) - if err != nil { - return 0, "", err - } - io1Size := 0.0 - for _, sizes := range io1SizePrices { - io1Size = sizes.PricePerUnit - break - } - io1Price := io1Iops*float64(iops) + io1Size*float64(volumeSize) - costBreakdown := fmt.Sprintf("Provisioned IOPS: $%.2f * %d + Size: $%.2f * %d", io1Iops, iops, io1Size, volumeSize) - - return io1Price, costBreakdown, nil -} - -func (r *EBSVolumeTypeRepoImpl) getIo2TotalPrice(ctx context.Context, region string, volumeSize int32, iops int32) (float64, string, error) { - io2IopsPrices, err := r.getDimensionCostsByRegionVolumeTypeAndChargeType(ctx, region, types.VolumeTypeIo2, model.ChargeTypeIOPS) - if err != nil { - return 0, "", err - } - io2IopsTier1 := 0.0 - io2IopsTier2 := 0.0 - io2IopsTier3 := 0.0 - for _, iops := range io2IopsPrices { - switch iops.PriceGroup { - case "EBS IOPS": - io2IopsTier1 = iops.PricePerUnit - case "EBS IOPS Tier 2": - io2IopsTier2 = iops.PricePerUnit - case "EBS IOPS Tier 3": - io2IopsTier3 = iops.PricePerUnit - } - } - io2SizePrices, err := r.getDimensionCostsByRegionVolumeTypeAndChargeType(ctx, region, types.VolumeTypeIo2, model.ChargeTypeSize) - if err != nil { - return 0, "", err - } - io2Size := 0.0 - for _, sizes := range io2SizePrices { - io2Size = sizes.PricePerUnit - break - } - io2Price := io2Size * float64(volumeSize) - costBreakdown := fmt.Sprintf("Size: $%.2f * %d", io2Size, volumeSize) - if iops > model.Io2ProvisionedIopsTier2UpperBound { - io2Price += io2IopsTier3 * float64(iops-model.Io2ProvisionedIopsTier2UpperBound) - iops = model.Io2ProvisionedIopsTier2UpperBound - costBreakdown += fmt.Sprintf(" + IOPS Tier 3 (over %d): $%.2f * %d", model.Io2ProvisionedIopsTier2UpperBound, io2IopsTier3, iops-model.Io2ProvisionedIopsTier2UpperBound) - } - if iops > model.Io2ProvisionedIopsTier1UpperBound { - io2Price += io2IopsTier2 * float64(iops-model.Io2ProvisionedIopsTier1UpperBound) - iops = model.Io2ProvisionedIopsTier1UpperBound - costBreakdown += fmt.Sprintf(" + IOPS Tier 2 (over %d under %d): $%.2f * %d", model.Io2ProvisionedIopsTier1UpperBound, model.Io2ProvisionedIopsTier2UpperBound, io2IopsTier2, iops-model.Io2ProvisionedIopsTier1UpperBound) - } - io2Price += io2IopsTier1 * float64(iops) - costBreakdown += fmt.Sprintf(" + IOPS Tier 1 (under %d): $%.2f * %d", model.Io2ProvisionedIopsTier1UpperBound, io2IopsTier1, iops) - - return io2Price, "", nil -} - -func (r *EBSVolumeTypeRepoImpl) getGp2TotalPrice(ctx context.Context, region string, volumeSize *int32, iops int32) (float64, string, error) { - gp2Prices, err := r.getDimensionCostsByRegionVolumeTypeAndChargeType(ctx, region, types.VolumeTypeGp2, model.ChargeTypeSize) - if err != nil { - return 0, "", err - } - gp2Price := 0.0 - for _, gp2 := range gp2Prices { - gp2Price = gp2.PricePerUnit - break - } - - if iops > 100 { - minSizeReq := int32(math.Ceil(float64(iops) / model.Gp2IopsPerGiB)) - if minSizeReq > *volumeSize { - *volumeSize = minSizeReq - } - } - - costBreakdown := fmt.Sprintf("Size: $%.2f * %d", gp2Price, *volumeSize) - - return gp2Price * float64(*volumeSize), costBreakdown, nil -} - -func (r *EBSVolumeTypeRepoImpl) getGp3TotalPrice(ctx context.Context, region string, volumeSize int32, iops int32, throughput float64) (float64, string, error) { - iops = max(iops-model.Gp3BaseIops, 0) - throughput = max(throughput-model.Gp3BaseThroughput, 0.0) - - gp3SizePrices, err := r.getDimensionCostsByRegionVolumeTypeAndChargeType(ctx, region, types.VolumeTypeGp3, model.ChargeTypeSize) - if err != nil { - return 0, "", err - } - gp3SizePrice := 0.0 - for _, gp3 := range gp3SizePrices { - gp3SizePrice = gp3.PricePerUnit - break - } - gp3IopsPrices, err := r.getDimensionCostsByRegionVolumeTypeAndChargeType(ctx, region, types.VolumeTypeGp3, model.ChargeTypeIOPS) - if err != nil { - return 0, "", err - } - gp3IopsPrice := 0.0 - for _, gp3 := range gp3IopsPrices { - gp3IopsPrice = gp3.PricePerUnit - break - } - - gp3ThroughputPrices, err := r.getDimensionCostsByRegionVolumeTypeAndChargeType(ctx, region, types.VolumeTypeGp3, model.ChargeTypeThroughput) - if err != nil { - return 0, "", err - } - gp3ThroughputPrice := 0.0 - for _, gp3 := range gp3ThroughputPrices { - gp3ThroughputPrice = gp3.PricePerUnit - break - } - - costBreakdown := fmt.Sprintf("Size: $%.2f * %d", gp3SizePrice, volumeSize) - if iops > 0 { - costBreakdown += fmt.Sprintf(" + Provisioned IOPS (over %d): $%.2f * %d", model.Gp3BaseIops, gp3IopsPrice, iops) - } - if throughput > 0 { - costBreakdown += fmt.Sprintf(" + Provisioned Throughput (over %d): $%.2f * %.2f", model.Gp3BaseThroughput, gp3ThroughputPrice, throughput) - } - - return gp3SizePrice*float64(volumeSize) + gp3IopsPrice*float64(iops) + gp3ThroughputPrice*throughput, costBreakdown, nil -} - -func (r *EBSVolumeTypeRepoImpl) getSc1TotalPrice(ctx context.Context, region string, volumeSize int32) (float64, string, error) { - sc1SizePrices, err := r.getDimensionCostsByRegionVolumeTypeAndChargeType(ctx, region, types.VolumeTypeSc1, model.ChargeTypeSize) - if err != nil { - return 0, "", err - } - sc1SizePrice := 0.0 - for _, sc1 := range sc1SizePrices { - sc1SizePrice = sc1.PricePerUnit - break - } - - costBreakdown := fmt.Sprintf("Size: $%.2f * %d", sc1SizePrice, volumeSize) - - return sc1SizePrice * float64(volumeSize), costBreakdown, nil -} - -func (r *EBSVolumeTypeRepoImpl) getSt1TotalPrice(ctx context.Context, region string, volumeSize int32) (float64, string, error) { - st1SizePrices, err := r.getDimensionCostsByRegionVolumeTypeAndChargeType(ctx, region, types.VolumeTypeSt1, model.ChargeTypeSize) - if err != nil { - return 0, "", err - } - st1SizePrice := 0.0 - for _, st1 := range st1SizePrices { - st1SizePrice = st1.PricePerUnit - break - } - - costBreakdown := fmt.Sprintf("Size: $%.2f * %d", st1SizePrice, volumeSize) - - return st1SizePrice * float64(volumeSize), costBreakdown, nil -} - -func (r *EBSVolumeTypeRepoImpl) getStandardTotalPrice(ctx context.Context, region string, volumeSize int32) (float64, string, error) { - standardSizePrices, err := r.getDimensionCostsByRegionVolumeTypeAndChargeType(ctx, region, types.VolumeTypeStandard, model.ChargeTypeSize) - if err != nil { - return 0, "", err - } - standardSizePrice := 0.0 - for _, standard := range standardSizePrices { - standardSizePrice = standard.PricePerUnit - break - } - - costBreakdown := fmt.Sprintf("Size: $%.2f * %d", standardSizePrice, volumeSize) - - return standardSizePrice * float64(volumeSize), costBreakdown, nil -} - -func (r *EBSVolumeTypeRepoImpl) getFeasibleVolumeTypes(ctx context.Context, region string, volumeSize int32, iops int32, throughput float64, validTypes []types.VolumeType) ([]model.EBSVolumeType, error) { - var res []model.EBSVolumeType - tx := r.db.Conn().Table(r.viewName).WithContext(ctx). - Where("region_code = ?", region). - Where("max_iops >= ?", iops). - Where("max_throughput >= ?", throughput). - Where("max_size >= ?", volumeSize) - if len(validTypes) > 0 { - tx = tx.Where("volume_type IN ?", validTypes) - } - tx = tx.Find(&res) - if tx.Error != nil { - return nil, tx.Error - } - return res, nil -} - -func (r *EBSVolumeTypeRepoImpl) GetCheapestTypeWithSpecs(ctx context.Context, region string, volumeSize int32, iops int32, throughput float64, validTypes []types.VolumeType) (types.VolumeType, int32, int32, float64, string, error) { - volumeTypes, err := r.getFeasibleVolumeTypes(ctx, region, volumeSize, iops, throughput, validTypes) - if err != nil { - return "", 0, 0, 0, "", err - } - - if len(volumeTypes) == 0 { - return "", 0, 0, 0, "", errors.New("no feasible volume types found") - } - - minPrice := 0.0 - resVolumeType := "" - rescostBreakdown := "" - resBaselineIOPS := int32(0) - resBaselineThroughput := 0.0 - resVolumeSize := volumeSize - for _, vt := range volumeTypes { - var price float64 - var costBreakdown string - var volIops int32 - var volThroughput float64 - var volSize int32 = volumeSize - switch vt.VolumeType { - case types.VolumeTypeIo1: - price, costBreakdown, err = r.getIo1TotalPrice(ctx, region, volSize, iops) - volIops = 0 - volThroughput = float64(vt.MaxThroughput) - case types.VolumeTypeIo2: - price, costBreakdown, err = r.getIo2TotalPrice(ctx, region, volSize, iops) - volIops = 0 - volThroughput = float64(vt.MaxThroughput) - case types.VolumeTypeGp2: - price, costBreakdown, err = r.getGp2TotalPrice(ctx, region, &volSize, iops) - volIops = vt.MaxIops - volThroughput = float64(vt.MaxThroughput) - case types.VolumeTypeGp3: - price, costBreakdown, err = r.getGp3TotalPrice(ctx, region, volSize, iops, throughput) - volIops = model.Gp3BaseIops - volThroughput = model.Gp3BaseThroughput - case types.VolumeTypeSc1: - price, costBreakdown, err = r.getSc1TotalPrice(ctx, region, volSize) - volIops = vt.MaxIops - volThroughput = float64(vt.MaxThroughput) - case types.VolumeTypeSt1: - price, costBreakdown, err = r.getSt1TotalPrice(ctx, region, volSize) - volIops = vt.MaxIops - volThroughput = float64(vt.MaxThroughput) - case types.VolumeTypeStandard: - price, costBreakdown, err = r.getStandardTotalPrice(ctx, region, volSize) - volIops = vt.MaxIops - volThroughput = float64(vt.MaxThroughput) - } - if err != nil { - return "", 0, 0, 0, "", err - } - if resVolumeType == "" || price < minPrice { - minPrice = price - resVolumeType = string(vt.VolumeType) - resBaselineIOPS = volIops - resBaselineThroughput = volThroughput - resVolumeSize = volSize - rescostBreakdown = costBreakdown - } - } - - return types.VolumeType(resVolumeType), resVolumeSize, resBaselineIOPS, resBaselineThroughput, rescostBreakdown, nil -} - -func (r *EBSVolumeTypeRepoImpl) CreateNewTable() (string, error) { - sf := sonyflake.NewSonyflake(sonyflake.Settings{}) - var ec2InstanceTypeTable string - for { - id, err := sf.NextID() - if err != nil { - return "", err - } - - ec2InstanceTypeTable = fmt.Sprintf("%s_%s_%d", - r.viewName, - time.Now().Format("2006_01_02"), - id, - ) - var c int32 - tx := r.db.Conn().Raw(fmt.Sprintf(` - SELECT count(*) - FROM information_schema.tables - WHERE table_schema = current_schema - AND table_name = '%s'; - `, ec2InstanceTypeTable)).First(&c) - if tx.Error != nil { - return "", err - } - if c == 0 { - break - } - } - - err := r.db.Conn().Table(ec2InstanceTypeTable).AutoMigrate(&model.EBSVolumeType{}) - if err != nil { - return "", err - } - return ec2InstanceTypeTable, nil -} - -func (r *EBSVolumeTypeRepoImpl) MoveViewTransaction(tableName string) error { - tx := r.db.Conn().Begin() - var err error - defer func() { - _ = tx.Rollback() - }() - - dropViewQuery := fmt.Sprintf("DROP VIEW IF EXISTS %s", r.viewName) - tx = tx.Exec(dropViewQuery) - err = tx.Error - if err != nil { - return err - } - - createViewQuery := fmt.Sprintf(` - CREATE OR REPLACE VIEW %s AS - SELECT * - FROM %s; -`, r.viewName, tableName) - - tx = tx.Exec(createViewQuery) - err = tx.Error - if err != nil { - return err - } - - tx = tx.Commit() - err = tx.Error - if err != nil { - return err - } - return nil -} - -func (r *EBSVolumeTypeRepoImpl) getOldTables(currentTableName string) ([]string, error) { - query := fmt.Sprintf(` - SELECT table_name - FROM information_schema.tables - WHERE table_schema = current_schema - AND table_name LIKE '%s_%%' AND table_name <> '%s'; - `, r.viewName, currentTableName) - - var tableNames []string - tx := r.db.Conn().Raw(query).Find(&tableNames) - if tx.Error != nil { - return nil, tx.Error - } - return tableNames, nil -} - -func (r *EBSVolumeTypeRepoImpl) RemoveOldTables(currentTableName string) error { - tableNames, err := r.getOldTables(currentTableName) - if err != nil { - return err - } - for _, tn := range tableNames { - err = r.db.Conn().Migrator().DropTable(tn) - if err != nil { - return err - } - } - return nil -} diff --git a/services/wastage/db/repo/ec2_instance_types.go b/services/wastage/db/repo/ec2_instance_types.go deleted file mode 100644 index a4e6446bf..000000000 --- a/services/wastage/db/repo/ec2_instance_types.go +++ /dev/null @@ -1,255 +0,0 @@ -package repo - -import ( - "context" - "errors" - "fmt" - "github.com/opengovern/opengovernance/services/wastage/db/connector" - "github.com/opengovern/opengovernance/services/wastage/db/model" - "github.com/sony/sonyflake" - "gorm.io/gorm" - "time" -) - -type EC2InstanceTypeRepo interface { - Create(tableName string, tx *gorm.DB, m *model.EC2InstanceType) error - Get(id uint) (*model.EC2InstanceType, error) - Update(tableName string, id uint, m model.EC2InstanceType) error - UpdateExtrasByRegionAndType(tableName string, tx *gorm.DB, region, instanceType string, extras map[string]any) error - UpdateNullExtrasByType(tableName string, tx *gorm.DB, instanceType string, extras map[string]any) error - Delete(tableName string, id uint) error - List() ([]model.EC2InstanceType, error) - GetCheapestByCoreAndNetwork(ctx context.Context, bandwidth float64, pref map[string]interface{}) (*model.EC2InstanceType, error) - Truncate(tx *gorm.DB) error - ListByInstanceType(ctx context.Context, instanceType, operation, region string) ([]model.EC2InstanceType, error) - MoveViewTransaction(tableName string) error - RemoveOldTables(tableName string) error - CreateNewTable() (string, error) -} - -type EC2InstanceTypeRepoImpl struct { - db *connector.Database - - viewName string -} - -func NewEC2InstanceTypeRepo(db *connector.Database) EC2InstanceTypeRepo { - stmt := &gorm.Statement{DB: db.Conn()} - stmt.Parse(&model.EC2InstanceType{}) - - return &EC2InstanceTypeRepoImpl{ - db: db, - - viewName: stmt.Schema.Table, - } -} - -func (r *EC2InstanceTypeRepoImpl) Create(tableName string, tx *gorm.DB, m *model.EC2InstanceType) error { - if tx == nil { - tx = r.db.Conn() - } - tx = tx.Table(tableName) - return tx.Create(&m).Error -} - -func (r *EC2InstanceTypeRepoImpl) Get(id uint) (*model.EC2InstanceType, error) { - var m model.EC2InstanceType - tx := r.db.Conn().Table(r.viewName).Where("id=?", id).First(&m) - if tx.Error != nil { - if errors.Is(tx.Error, gorm.ErrRecordNotFound) { - return nil, nil - } - return nil, tx.Error - } - return &m, nil -} - -func (r *EC2InstanceTypeRepoImpl) GetCheapestByCoreAndNetwork(ctx context.Context, bandwidth float64, pref map[string]interface{}) (*model.EC2InstanceType, error) { - var m model.EC2InstanceType - tx := r.db.Conn().Table(r.viewName).WithContext(ctx). - Where("network_max_bandwidth >= ?", bandwidth). - Where("capacity_status = 'Used'"). - Where("price_per_unit != 0") - for k, v := range pref { - tx = tx.Where(k, v) - } - tx = tx.Order("price_per_unit ASC").First(&m) - if tx.Error != nil { - if errors.Is(tx.Error, gorm.ErrRecordNotFound) { - return nil, nil - } - return nil, tx.Error - } - return &m, nil -} - -func (r *EC2InstanceTypeRepoImpl) Update(tableName string, id uint, m model.EC2InstanceType) error { - return r.db.Conn().Table(tableName).Where("id=?", id).Updates(&m).Error -} - -func (r *EC2InstanceTypeRepoImpl) Delete(tableName string, id uint) error { - return r.db.Conn().Unscoped().Table(tableName).Delete(&model.EC2InstanceType{}, id).Error -} - -func (r *EC2InstanceTypeRepoImpl) List() ([]model.EC2InstanceType, error) { - var ms []model.EC2InstanceType - tx := r.db.Conn().Table(r.viewName).Find(&ms) - if tx.Error != nil { - return nil, tx.Error - } - return ms, nil -} - -func (r *EC2InstanceTypeRepoImpl) UpdateExtrasByRegionAndType(tableName string, tx *gorm.DB, region, instanceType string, extras map[string]any) error { - if tx == nil { - tx = r.db.Conn() - } - tx = tx.Table(tableName). - Where("region_code = ?", region). - Where("instance_type = ?", instanceType). - Updates(extras) - if tx.Error != nil { - return tx.Error - } - return nil -} - -func (r *EC2InstanceTypeRepoImpl) UpdateNullExtrasByType(tableName string, tx *gorm.DB, instanceType string, extras map[string]any) error { - if tx == nil { - tx = r.db.Conn() - } - for k, v := range extras { - tx = tx.Table(tableName). - Where("instance_type = ?", instanceType). - Where(k+" IS NULL"). - Update(k, v) - if tx.Error != nil { - return tx.Error - } - } - return nil -} - -func (r *EC2InstanceTypeRepoImpl) ListByInstanceType(ctx context.Context, instanceType, operation, region string) ([]model.EC2InstanceType, error) { - var ms []model.EC2InstanceType - tx := r.db.Conn().Table(r.viewName).WithContext(ctx). - Where("instance_type = ? AND capacity_status = 'Used'", instanceType). - Where("region_code = ?", region). - Where("operation = ?", operation). - Find(&ms) - if tx.Error != nil { - return nil, tx.Error - } - return ms, nil -} - -func (r *EC2InstanceTypeRepoImpl) Truncate(tx *gorm.DB) error { - if tx == nil { - tx = r.db.Conn() - } - tx = tx.Unscoped().Where("1 = 1").Delete(&model.EC2InstanceType{}) - if tx.Error != nil { - return tx.Error - } - return nil -} - -func (r *EC2InstanceTypeRepoImpl) CreateNewTable() (string, error) { - sf := sonyflake.NewSonyflake(sonyflake.Settings{}) - var ec2InstanceTypeTable string - for { - id, err := sf.NextID() - if err != nil { - return "", err - } - - ec2InstanceTypeTable = fmt.Sprintf("%s_%s_%d", - r.viewName, - time.Now().Format("2006_01_02"), - id, - ) - var c int32 - tx := r.db.Conn().Raw(fmt.Sprintf(` - SELECT count(*) - FROM information_schema.tables - WHERE table_schema = current_schema - AND table_name = '%s'; - `, ec2InstanceTypeTable)).First(&c) - if tx.Error != nil { - return "", err - } - if c == 0 { - break - } - } - - err := r.db.Conn().Table(ec2InstanceTypeTable).AutoMigrate(&model.EC2InstanceType{}) - if err != nil { - return "", err - } - return ec2InstanceTypeTable, nil -} - -func (r *EC2InstanceTypeRepoImpl) MoveViewTransaction(tableName string) error { - tx := r.db.Conn().Begin() - var err error - defer func() { - _ = tx.Rollback() - }() - - dropViewQuery := fmt.Sprintf("DROP VIEW IF EXISTS %s", r.viewName) - tx = tx.Exec(dropViewQuery) - err = tx.Error - if err != nil { - return err - } - - createViewQuery := fmt.Sprintf(` - CREATE OR REPLACE VIEW %s AS - SELECT * - FROM %s; -`, r.viewName, tableName) - - tx = tx.Exec(createViewQuery) - err = tx.Error - if err != nil { - return err - } - - tx = tx.Commit() - err = tx.Error - if err != nil { - return err - } - return nil -} - -func (r *EC2InstanceTypeRepoImpl) getOldTables(currentTableName string) ([]string, error) { - query := fmt.Sprintf(` - SELECT table_name - FROM information_schema.tables - WHERE table_schema = current_schema - AND table_name LIKE '%s_%%' AND table_name <> '%s'; - `, r.viewName, currentTableName) - - var tableNames []string - tx := r.db.Conn().Raw(query).Find(&tableNames) - if tx.Error != nil { - return nil, tx.Error - } - return tableNames, nil -} - -func (r *EC2InstanceTypeRepoImpl) RemoveOldTables(currentTableName string) error { - tableNames, err := r.getOldTables(currentTableName) - if err != nil { - return err - } - for _, tn := range tableNames { - err = r.db.Conn().Migrator().DropTable(tn) - if err != nil { - return err - } - } - return nil -} diff --git a/services/wastage/db/repo/gcp_compute_machinetype.go b/services/wastage/db/repo/gcp_compute_machinetype.go deleted file mode 100644 index 65b29a396..000000000 --- a/services/wastage/db/repo/gcp_compute_machinetype.go +++ /dev/null @@ -1,182 +0,0 @@ -package repo - -import ( - "errors" - "fmt" - "github.com/opengovern/opengovernance/services/wastage/db/connector" - "github.com/opengovern/opengovernance/services/wastage/db/model" - "github.com/sony/sonyflake" - "gorm.io/gorm" - "time" -) - -type GCPComputeMachineTypeRepo interface { - Create(tableName string, tx *gorm.DB, m *model.GCPComputeMachineType) error - Delete(tableName string, id string) error - List() ([]model.GCPComputeMachineType, error) - Get(machineType string) (*model.GCPComputeMachineType, error) - GetCheapestByCoreAndMemory(cpu, memory float64, pref map[string]interface{}) (*model.GCPComputeMachineType, error) - CreateNewTable() (string, error) - MoveViewTransaction(tableName string) error - RemoveOldTables(currentTableName string) error -} - -type GCPComputeMachineTypeRepoImpl struct { - db *connector.Database - - viewName string -} - -func NewGCPComputeMachineTypeRepo(db *connector.Database) GCPComputeMachineTypeRepo { - stmt := &gorm.Statement{DB: db.Conn()} - stmt.Parse(&model.GCPComputeMachineType{}) - - return &GCPComputeMachineTypeRepoImpl{ - db: db, - - viewName: stmt.Schema.Table, - } -} - -func (r *GCPComputeMachineTypeRepoImpl) Create(tableName string, tx *gorm.DB, m *model.GCPComputeMachineType) error { - if tx == nil { - tx = r.db.Conn() - } - tx = tx.Table(tableName) - return tx.Create(&m).Error -} - -func (r *GCPComputeMachineTypeRepoImpl) Delete(tableName string, sku string) error { - return r.db.Conn().Table(tableName).Where("sku=?", sku).Delete(&model.GCPComputeMachineType{}).Error -} - -func (r *GCPComputeMachineTypeRepoImpl) List() ([]model.GCPComputeMachineType, error) { - var m []model.GCPComputeMachineType - tx := r.db.Conn().Table(r.viewName).Find(&m) - return m, tx.Error -} - -func (r *GCPComputeMachineTypeRepoImpl) Get(machineType string) (*model.GCPComputeMachineType, error) { - var m model.GCPComputeMachineType - tx := r.db.Conn().Table(r.viewName).Where("machine_type=?", machineType).First(&m) - return &m, tx.Error -} - -func (r *GCPComputeMachineTypeRepoImpl) GetCheapestByCoreAndMemory(cpu, memory float64, pref map[string]interface{}) (*model.GCPComputeMachineType, error) { - var m model.GCPComputeMachineType - tx := r.db.Conn().Table(r.viewName). - Where("guest_cpus >= ?", cpu). - Where("memory_mb >= ?", memory). - Where("unit_price != 0") - for k, v := range pref { - tx = tx.Where(k, v) - } - tx = tx.Order("unit_price ASC").First(&m) - if tx.Error != nil { - if errors.Is(tx.Error, gorm.ErrRecordNotFound) { - return nil, nil - } - return nil, tx.Error - } - return &m, nil -} - -func (r *GCPComputeMachineTypeRepoImpl) CreateNewTable() (string, error) { - sf := sonyflake.NewSonyflake(sonyflake.Settings{}) - var gcpComputeMachineTypeTable string - for { - id, err := sf.NextID() - if err != nil { - return "", err - } - - gcpComputeMachineTypeTable = fmt.Sprintf("%s_%s_%d", - r.viewName, - time.Now().Format("2006_01_02"), - id, - ) - var c int32 - tx := r.db.Conn().Raw(fmt.Sprintf(` - SELECT count(*) - FROM information_schema.tables - WHERE table_schema = current_schema - AND table_name = '%s'; - `, gcpComputeMachineTypeTable)).First(&c) - if tx.Error != nil { - return "", err - } - if c == 0 { - break - } - } - - err := r.db.Conn().Table(gcpComputeMachineTypeTable).AutoMigrate(&model.GCPComputeMachineType{}) - if err != nil { - return "", err - } - return gcpComputeMachineTypeTable, nil -} - -func (r *GCPComputeMachineTypeRepoImpl) MoveViewTransaction(tableName string) error { - tx := r.db.Conn().Begin() - var err error - defer func() { - _ = tx.Rollback() - }() - - dropViewQuery := fmt.Sprintf("DROP VIEW IF EXISTS %s", r.viewName) - tx = tx.Exec(dropViewQuery) - err = tx.Error - if err != nil { - return err - } - - createViewQuery := fmt.Sprintf(` - CREATE OR REPLACE VIEW %s AS - SELECT * - FROM %s; -`, r.viewName, tableName) - - tx = tx.Exec(createViewQuery) - err = tx.Error - if err != nil { - return err - } - - tx = tx.Commit() - err = tx.Error - if err != nil { - return err - } - return nil -} - -func (r *GCPComputeMachineTypeRepoImpl) getOldTables(currentTableName string) ([]string, error) { - query := fmt.Sprintf(` - SELECT table_name - FROM information_schema.tables - WHERE table_schema = current_schema - AND table_name LIKE '%s_%%' AND table_name <> '%s'; - `, r.viewName, currentTableName) - - var tableNames []string - tx := r.db.Conn().Raw(query).Find(&tableNames) - if tx.Error != nil { - return nil, tx.Error - } - return tableNames, nil -} - -func (r *GCPComputeMachineTypeRepoImpl) RemoveOldTables(currentTableName string) error { - tableNames, err := r.getOldTables(currentTableName) - if err != nil { - return err - } - for _, tn := range tableNames { - err = r.db.Conn().Migrator().DropTable(tn) - if err != nil { - return err - } - } - return nil -} diff --git a/services/wastage/db/repo/gcp_compute_sku.go b/services/wastage/db/repo/gcp_compute_sku.go deleted file mode 100644 index 7281a5969..000000000 --- a/services/wastage/db/repo/gcp_compute_sku.go +++ /dev/null @@ -1,199 +0,0 @@ -package repo - -import ( - "errors" - "fmt" - "github.com/opengovern/opengovernance/services/wastage/db/connector" - "github.com/opengovern/opengovernance/services/wastage/db/model" - "github.com/sony/sonyflake" - "gorm.io/gorm" - "time" -) - -type GCPComputeSKURepo interface { - Create(tableName string, tx *gorm.DB, m *model.GCPComputeSKU) error - Delete(tableName string, id string) error - List() ([]model.GCPComputeSKU, error) - GetCheapestCustomCore(machineFamily string, pref map[string]interface{}) (*model.GCPComputeSKU, error) - GetCheapestCustomRam(machineFamily string, pref map[string]interface{}) (*model.GCPComputeSKU, error) - CreateNewTable() (string, error) - MoveViewTransaction(tableName string) error - RemoveOldTables(currentTableName string) error -} - -type GCPComputeSKURepoImpl struct { - db *connector.Database - - viewName string -} - -func NewGCPComputeSKURepo(db *connector.Database) GCPComputeSKURepo { - stmt := &gorm.Statement{DB: db.Conn()} - stmt.Parse(&model.GCPComputeSKU{}) - - return &GCPComputeSKURepoImpl{ - db: db, - - viewName: stmt.Schema.Table, - } -} - -func (r *GCPComputeSKURepoImpl) Create(tableName string, tx *gorm.DB, m *model.GCPComputeSKU) error { - if tx == nil { - tx = r.db.Conn() - } - tx = tx.Table(tableName) - return tx.Create(&m).Error -} - -func (r *GCPComputeSKURepoImpl) Delete(tableName string, sku string) error { - return r.db.Conn().Table(tableName).Where("sku=?", sku).Delete(&model.GCPComputeSKU{}).Error -} - -func (r *GCPComputeSKURepoImpl) List() ([]model.GCPComputeSKU, error) { - var m []model.GCPComputeSKU - tx := r.db.Conn().Table(r.viewName).Find(&m) - return m, tx.Error -} - -func (r *GCPComputeSKURepoImpl) GetCheapestCustomCore(machineFamily string, pref map[string]interface{}) (*model.GCPComputeSKU, error) { - var m model.GCPComputeSKU - tx := r.db.Conn().Table(r.viewName). - Where("resource_family = 'Compute'"). - Where("type = 'Custom'"). - Where("unit_price != 0"). - Where("machine_family = ?", machineFamily). - Where("resource_group = 'CPU'") - for k, v := range pref { - tx = tx.Where(k, v) - } - tx = tx.Order("unit_price ASC").First(&m) - if tx.Error != nil { - if errors.Is(tx.Error, gorm.ErrRecordNotFound) { - return nil, nil - } - return nil, tx.Error - } - return &m, nil -} - -func (r *GCPComputeSKURepoImpl) GetCheapestCustomRam(machineFamily string, pref map[string]interface{}) (*model.GCPComputeSKU, error) { - var m model.GCPComputeSKU - tx := r.db.Conn().Table(r.viewName). - Where("resource_family = 'Compute'"). - Where("type = 'Custom'"). - Where("unit_price != 0"). - Where("machine_family = ?", machineFamily). - Where("resource_group = 'RAM'") - for k, v := range pref { - tx = tx.Where(k, v) - } - tx = tx.Order("unit_price ASC").First(&m) - if tx.Error != nil { - if errors.Is(tx.Error, gorm.ErrRecordNotFound) { - return nil, nil - } - return nil, tx.Error - } - return &m, nil -} - -func (r *GCPComputeSKURepoImpl) CreateNewTable() (string, error) { - sf := sonyflake.NewSonyflake(sonyflake.Settings{}) - var gcpComputeSKUTable string - for { - id, err := sf.NextID() - if err != nil { - return "", err - } - - gcpComputeSKUTable = fmt.Sprintf("%s_%s_%d", - r.viewName, - time.Now().Format("2006_01_02"), - id, - ) - var c int32 - tx := r.db.Conn().Raw(fmt.Sprintf(` - SELECT count(*) - FROM information_schema.tables - WHERE table_schema = current_schema - AND table_name = '%s'; - `, gcpComputeSKUTable)).First(&c) - if tx.Error != nil { - return "", err - } - if c == 0 { - break - } - } - - err := r.db.Conn().Table(gcpComputeSKUTable).AutoMigrate(&model.GCPComputeSKU{}) - if err != nil { - return "", err - } - return gcpComputeSKUTable, nil -} - -func (r *GCPComputeSKURepoImpl) MoveViewTransaction(tableName string) error { - tx := r.db.Conn().Begin() - var err error - defer func() { - _ = tx.Rollback() - }() - - dropViewQuery := fmt.Sprintf("DROP VIEW IF EXISTS %s", r.viewName) - tx = tx.Exec(dropViewQuery) - err = tx.Error - if err != nil { - return err - } - - createViewQuery := fmt.Sprintf(` - CREATE OR REPLACE VIEW %s AS - SELECT * - FROM %s; -`, r.viewName, tableName) - - tx = tx.Exec(createViewQuery) - err = tx.Error - if err != nil { - return err - } - - tx = tx.Commit() - err = tx.Error - if err != nil { - return err - } - return nil -} - -func (r *GCPComputeSKURepoImpl) getOldTables(currentTableName string) ([]string, error) { - query := fmt.Sprintf(` - SELECT table_name - FROM information_schema.tables - WHERE table_schema = current_schema - AND table_name LIKE '%s_%%' AND table_name <> '%s'; - `, r.viewName, currentTableName) - - var tableNames []string - tx := r.db.Conn().Raw(query).Find(&tableNames) - if tx.Error != nil { - return nil, tx.Error - } - return tableNames, nil -} - -func (r *GCPComputeSKURepoImpl) RemoveOldTables(currentTableName string) error { - tableNames, err := r.getOldTables(currentTableName) - if err != nil { - return err - } - for _, tn := range tableNames { - err = r.db.Conn().Migrator().DropTable(tn) - if err != nil { - return err - } - } - return nil -} diff --git a/services/wastage/db/repo/gcp_compute_storagetype.go b/services/wastage/db/repo/gcp_compute_storagetype.go deleted file mode 100644 index 12b65363d..000000000 --- a/services/wastage/db/repo/gcp_compute_storagetype.go +++ /dev/null @@ -1,179 +0,0 @@ -package repo - -import ( - "errors" - "fmt" - "github.com/opengovern/opengovernance/services/wastage/db/connector" - "github.com/opengovern/opengovernance/services/wastage/db/model" - "github.com/sony/sonyflake" - "gorm.io/gorm" - "time" -) - -type GCPComputeDiskTypeRepo interface { - Create(tableName string, tx *gorm.DB, m *model.GCPComputeDiskType) error - Delete(tableName string, id string) error - List() ([]model.GCPComputeDiskType, error) - Get(machineType string) (*model.GCPComputeDiskType, error) - GetCheapest(pref map[string]interface{}) (*model.GCPComputeDiskType, error) - CreateNewTable() (string, error) - MoveViewTransaction(tableName string) error - RemoveOldTables(currentTableName string) error -} - -type GCPComputeDiskTypeRepoImpl struct { - db *connector.Database - - viewName string -} - -func NewGCPComputeDiskTypeRepo(db *connector.Database) GCPComputeDiskTypeRepo { - stmt := &gorm.Statement{DB: db.Conn()} - stmt.Parse(&model.GCPComputeDiskType{}) - - return &GCPComputeDiskTypeRepoImpl{ - db: db, - - viewName: stmt.Schema.Table, - } -} - -func (r *GCPComputeDiskTypeRepoImpl) Create(tableName string, tx *gorm.DB, m *model.GCPComputeDiskType) error { - if tx == nil { - tx = r.db.Conn() - } - tx = tx.Table(tableName) - return tx.Create(&m).Error -} - -func (r *GCPComputeDiskTypeRepoImpl) Delete(tableName string, sku string) error { - return r.db.Conn().Table(tableName).Where("sku=?", sku).Delete(&model.GCPComputeDiskType{}).Error -} - -func (r *GCPComputeDiskTypeRepoImpl) List() ([]model.GCPComputeDiskType, error) { - var m []model.GCPComputeDiskType - tx := r.db.Conn().Table(r.viewName).Find(&m) - return m, tx.Error -} - -func (r *GCPComputeDiskTypeRepoImpl) Get(machineType string) (*model.GCPComputeDiskType, error) { - var m model.GCPComputeDiskType - tx := r.db.Conn().Table(r.viewName).Where("machine_type=?", machineType).First(&m) - return &m, tx.Error -} - -func (r *GCPComputeDiskTypeRepoImpl) GetCheapest(pref map[string]interface{}) (*model.GCPComputeDiskType, error) { - var m model.GCPComputeDiskType - tx := r.db.Conn().Table(r.viewName) - for k, v := range pref { - tx = tx.Where(k, v) - } - tx = tx.Order("unit_price ASC").First(&m) - if tx.Error != nil { - if errors.Is(tx.Error, gorm.ErrRecordNotFound) { - return nil, nil - } - return nil, tx.Error - } - return &m, nil -} - -func (r *GCPComputeDiskTypeRepoImpl) CreateNewTable() (string, error) { - sf := sonyflake.NewSonyflake(sonyflake.Settings{}) - var gcpComputeMachineTypeTable string - for { - id, err := sf.NextID() - if err != nil { - return "", err - } - - gcpComputeMachineTypeTable = fmt.Sprintf("%s_%s_%d", - r.viewName, - time.Now().Format("2006_01_02"), - id, - ) - var c int32 - tx := r.db.Conn().Raw(fmt.Sprintf(` - SELECT count(*) - FROM information_schema.tables - WHERE table_schema = current_schema - AND table_name = '%s'; - `, gcpComputeMachineTypeTable)).First(&c) - if tx.Error != nil { - return "", err - } - if c == 0 { - break - } - } - - err := r.db.Conn().Table(gcpComputeMachineTypeTable).AutoMigrate(&model.GCPComputeDiskType{}) - if err != nil { - return "", err - } - return gcpComputeMachineTypeTable, nil -} - -func (r *GCPComputeDiskTypeRepoImpl) MoveViewTransaction(tableName string) error { - tx := r.db.Conn().Begin() - var err error - defer func() { - _ = tx.Rollback() - }() - - dropViewQuery := fmt.Sprintf("DROP VIEW IF EXISTS %s", r.viewName) - tx = tx.Exec(dropViewQuery) - err = tx.Error - if err != nil { - return err - } - - createViewQuery := fmt.Sprintf(` - CREATE OR REPLACE VIEW %s AS - SELECT * - FROM %s; -`, r.viewName, tableName) - - tx = tx.Exec(createViewQuery) - err = tx.Error - if err != nil { - return err - } - - tx = tx.Commit() - err = tx.Error - if err != nil { - return err - } - return nil -} - -func (r *GCPComputeDiskTypeRepoImpl) getOldTables(currentTableName string) ([]string, error) { - query := fmt.Sprintf(` - SELECT table_name - FROM information_schema.tables - WHERE table_schema = current_schema - AND table_name LIKE '%s_%%' AND table_name <> '%s'; - `, r.viewName, currentTableName) - - var tableNames []string - tx := r.db.Conn().Raw(query).Find(&tableNames) - if tx.Error != nil { - return nil, tx.Error - } - return tableNames, nil -} - -func (r *GCPComputeDiskTypeRepoImpl) RemoveOldTables(currentTableName string) error { - tableNames, err := r.getOldTables(currentTableName) - if err != nil { - return err - } - for _, tn := range tableNames { - err = r.db.Conn().Migrator().DropTable(tn) - if err != nil { - return err - } - } - return nil -} diff --git a/services/wastage/db/repo/organization.go b/services/wastage/db/repo/organization.go deleted file mode 100644 index 794e48df1..000000000 --- a/services/wastage/db/repo/organization.go +++ /dev/null @@ -1,60 +0,0 @@ -package repo - -import ( - "context" - "errors" - "github.com/opengovern/opengovernance/services/wastage/db/connector" - "github.com/opengovern/opengovernance/services/wastage/db/model" - "gorm.io/gorm" -) - -type OrganizationRepo interface { - Create(m *model.Organization) error - Update(id string, m *model.Organization) error - Delete(id string) error - List() ([]model.Organization, error) - Get(ctx context.Context, id string) (*model.Organization, error) -} - -type OrganizationRepoImpl struct { - db *connector.Database -} - -func NewOrganizationRepo(db *connector.Database) OrganizationRepo { - return &OrganizationRepoImpl{ - db: db, - } -} - -func (r *OrganizationRepoImpl) Create(m *model.Organization) error { - return r.db.Conn().Create(&m).Error -} - -func (r *OrganizationRepoImpl) Update(id string, m *model.Organization) error { - return r.db.Conn().Model(&model.Organization{}).Where("organization_id=?", id).Updates(&m).Error -} - -func (r *OrganizationRepoImpl) Delete(id string) error { - return r.db.Conn().Model(&model.Organization{}).Where("organization_id=?", id).Delete(&model.Organization{}).Error -} - -func (r *OrganizationRepoImpl) List() ([]model.Organization, error) { - var ms []model.Organization - tx := r.db.Conn().Model(&model.Organization{}).Find(&ms) - if tx.Error != nil { - return nil, tx.Error - } - return ms, nil -} - -func (r *OrganizationRepoImpl) Get(ctx context.Context, id string) (*model.Organization, error) { - var m model.Organization - tx := r.db.Conn().WithContext(ctx).Model(&model.Organization{}).Where("organization_id=?", id).First(&m) - if tx.Error != nil { - if errors.Is(tx.Error, gorm.ErrRecordNotFound) { - return nil, nil - } - return nil, tx.Error - } - return &m, nil -} diff --git a/services/wastage/db/repo/preprocess_job.go b/services/wastage/db/repo/preprocess_job.go deleted file mode 100644 index 43537e09e..000000000 --- a/services/wastage/db/repo/preprocess_job.go +++ /dev/null @@ -1,63 +0,0 @@ -package repo - -import ( - "errors" - "github.com/opengovern/opengovernance/services/wastage/db/connector" - "github.com/opengovern/opengovernance/services/wastage/db/model" - "gorm.io/gorm" -) - -type PreProcessJobRepo interface { - Create(m *model.PreProcessJob) error - Get(id uint) (*model.PreProcessJob, error) - Update(id uint, m model.PreProcessJob) error - Delete(id uint) error - List() ([]model.PreProcessJob, error) -} - -type PreProcessJobRepoImpl struct { - db *connector.Database -} - -func NewPreProcessJobRepo(db *connector.Database) PreProcessJobRepo { - return &PreProcessJobRepoImpl{ - db: db, - } -} - -func (r *PreProcessJobRepoImpl) Create(m *model.PreProcessJob) error { - return r.db.Conn().Create(&m).Error -} - -func (r *PreProcessJobRepoImpl) Get(id uint) (*model.PreProcessJob, error) { - var m model.PreProcessJob - tx := r.db.Conn().Model(&model.PreProcessJob{}).Where("id=?", id).First(&m) - if tx.Error != nil { - if errors.Is(tx.Error, gorm.ErrRecordNotFound) { - return nil, nil - } - return nil, tx.Error - } - return &m, nil -} - -func (r *PreProcessJobRepoImpl) Update(id uint, m model.PreProcessJob) error { - return r.db.Conn().Model(&model.PreProcessJob{}).Where("id=?", id).Updates(&m).Error -} - -func (r *PreProcessJobRepoImpl) Delete(id uint) error { - return r.db.Conn().Unscoped().Delete(&model.PreProcessJob{ - Model: gorm.Model{ - ID: id, - }, - }).Error -} - -func (r *PreProcessJobRepoImpl) List() ([]model.PreProcessJob, error) { - var ms []model.PreProcessJob - tx := r.db.Conn().Model(&model.PreProcessJob{}).Find(&ms) - if tx.Error != nil { - return nil, tx.Error - } - return ms, nil -} diff --git a/services/wastage/db/repo/rds_db_instance.go b/services/wastage/db/repo/rds_db_instance.go deleted file mode 100644 index bb22da905..000000000 --- a/services/wastage/db/repo/rds_db_instance.go +++ /dev/null @@ -1,254 +0,0 @@ -package repo - -import ( - "context" - "errors" - "fmt" - "github.com/opengovern/opengovernance/services/wastage/db/connector" - "github.com/opengovern/opengovernance/services/wastage/db/model" - "github.com/sony/sonyflake" - "gorm.io/gorm" - "time" -) - -type RDSDBInstanceRepo interface { - Create(tableName string, tx *gorm.DB, m *model.RDSDBInstance) error - Get(id uint) (*model.RDSDBInstance, error) - Update(id uint, m model.RDSDBInstance) error - Delete(id uint) error - List() ([]model.RDSDBInstance, error) - Truncate(tx *gorm.DB) error - UpdateNilEBSThroughput(tx *gorm.DB, tableName string) error - ListByInstanceType(ctx context.Context, region, instanceType, engine, engineEdition, clusterType string) ([]model.RDSDBInstance, error) - GetCheapestByPref(ctx context.Context, pref map[string]any) (*model.RDSDBInstance, error) - MoveViewTransaction(tableName string) error - RemoveOldTables(tableName string) error - CreateNewTable() (string, error) -} - -type RDSDBInstanceRepoImpl struct { - db *connector.Database - - viewName string -} - -func NewRDSDBInstanceRepo(db *connector.Database) RDSDBInstanceRepo { - stmt := &gorm.Statement{DB: db.Conn()} - stmt.Parse(&model.RDSDBInstance{}) - - return &RDSDBInstanceRepoImpl{ - db: db, - - viewName: stmt.Schema.Table, - } -} - -func (r *RDSDBInstanceRepoImpl) Create(tableName string, tx *gorm.DB, m *model.RDSDBInstance) error { - if tx == nil { - tx = r.db.Conn() - } - tx = tx.Table(tableName) - return tx.Create(&m).Error -} - -func (r *RDSDBInstanceRepoImpl) Get(id uint) (*model.RDSDBInstance, error) { - var m model.RDSDBInstance - tx := r.db.Conn().Table(r.viewName).Where("id=?", id).First(&m) - if tx.Error != nil { - if errors.Is(tx.Error, gorm.ErrRecordNotFound) { - return nil, nil - } - return nil, tx.Error - } - return &m, nil -} - -func (r *RDSDBInstanceRepoImpl) Update(id uint, m model.RDSDBInstance) error { - return r.db.Conn().Table(r.viewName).Where("id=?", id).Updates(&m).Error -} - -func (r *RDSDBInstanceRepoImpl) Delete(id uint) error { - return r.db.Conn().Unscoped().Delete(&model.RDSDBInstance{}, id).Error -} - -func (r *RDSDBInstanceRepoImpl) List() ([]model.RDSDBInstance, error) { - var ms []model.RDSDBInstance - tx := r.db.Conn().Table(r.viewName).Find(&ms) - if tx.Error != nil { - return nil, tx.Error - } - return ms, nil -} - -func (r *RDSDBInstanceRepoImpl) Truncate(tx *gorm.DB) error { - if tx == nil { - tx = r.db.Conn() - } - tx = tx.Unscoped().Where("1 = 1").Delete(&model.RDSDBInstance{}) - if tx.Error != nil { - return tx.Error - } - return nil -} - -func (r *RDSDBInstanceRepoImpl) ListByInstanceType(ctx context.Context, region, instanceType, engine, engineEdition, clusterType string) ([]model.RDSDBInstance, error) { - var ms []model.RDSDBInstance - tx := r.db.Conn().Table(r.viewName).WithContext(ctx). - Where("region_code = ?", region). - Where("instance_type = ?", instanceType). - Where("database_engine = ?", engine). - Where("deployment_option = ?", clusterType) - if engineEdition != "" { - tx = tx.Where("database_edition = ?", engineEdition) - } - tx = tx.Find(&ms) - if tx.Error != nil { - return nil, tx.Error - } - return ms, nil -} - -func (r *RDSDBInstanceRepoImpl) GetCheapestByPref(ctx context.Context, pref map[string]any) (*model.RDSDBInstance, error) { - var m model.RDSDBInstance - tx := r.db.Conn().Table(r.viewName).WithContext(ctx). - Where("price_per_unit != 0") - for k, v := range pref { - tx = tx.Where(k, v) - } - tx = tx.Order("price_per_unit ASC").First(&m) - if tx.Error != nil { - if errors.Is(tx.Error, gorm.ErrRecordNotFound) { - return nil, nil - } - return nil, tx.Error - } - return &m, nil -} - -func (r *RDSDBInstanceRepoImpl) UpdateNilEBSThroughput(tx *gorm.DB, tableName string) error { - if tx == nil { - tx = r.db.Conn() - } - tx.Raw(fmt.Sprintf(` - UPDATE %[1]s AS base - SET dedicated_ebs_throughput_bytes = ( - SELECT dedicated_ebs_throughput_bytes - FROM %[1]s AS derived - WHERE base.instance_type = derived.instance_type AND - derived.dedicated_ebs_throughput_bytes IS NOT NULL AND - derived.product_family = 'Database Instance' - LIMIT 1 - ), dedicated_ebs_throughput = ( - SELECT dedicated_ebs_throughput - FROM %[1]s AS derived - WHERE base.instance_type = derived.instance_type AND - derived.dedicated_ebs_throughput_bytes IS NOT NULL AND - derived.product_family = 'Database Instance' - LIMIT 1 - ) - WHERE dedicated_ebs_throughput_bytes IS NULL AND - base.product_family = 'Database Instance' - `, tableName)) - - return nil -} - -func (r *RDSDBInstanceRepoImpl) CreateNewTable() (string, error) { - sf := sonyflake.NewSonyflake(sonyflake.Settings{}) - var ec2InstanceTypeTable string - for { - id, err := sf.NextID() - if err != nil { - return "", err - } - - ec2InstanceTypeTable = fmt.Sprintf("%s_%s_%d", - r.viewName, - time.Now().Format("2006_01_02"), - id, - ) - var c int32 - tx := r.db.Conn().Raw(fmt.Sprintf(` - SELECT count(*) - FROM information_schema.tables - WHERE table_schema = current_schema - AND table_name = '%s'; - `, ec2InstanceTypeTable)).First(&c) - if tx.Error != nil { - return "", err - } - if c == 0 { - break - } - } - - err := r.db.Conn().Table(ec2InstanceTypeTable).AutoMigrate(&model.RDSDBInstance{}) - if err != nil { - return "", err - } - return ec2InstanceTypeTable, nil -} - -func (r *RDSDBInstanceRepoImpl) MoveViewTransaction(tableName string) error { - tx := r.db.Conn().Begin() - var err error - defer func() { - _ = tx.Rollback() - }() - - dropViewQuery := fmt.Sprintf("DROP VIEW IF EXISTS %s", r.viewName) - tx = tx.Exec(dropViewQuery) - err = tx.Error - if err != nil { - return err - } - - createViewQuery := fmt.Sprintf(` - CREATE OR REPLACE VIEW %s AS - SELECT * - FROM %s; -`, r.viewName, tableName) - - tx = tx.Exec(createViewQuery) - err = tx.Error - if err != nil { - return err - } - - tx = tx.Commit() - err = tx.Error - if err != nil { - return err - } - return nil -} - -func (r *RDSDBInstanceRepoImpl) getOldTables(currentTableName string) ([]string, error) { - query := fmt.Sprintf(` - SELECT table_name - FROM information_schema.tables - WHERE table_schema = current_schema - AND table_name LIKE '%s_%%' AND table_name <> '%s'; - `, r.viewName, currentTableName) - - var tableNames []string - tx := r.db.Conn().Raw(query).Find(&tableNames) - if tx.Error != nil { - return nil, tx.Error - } - return tableNames, nil -} - -func (r *RDSDBInstanceRepoImpl) RemoveOldTables(currentTableName string) error { - tableNames, err := r.getOldTables(currentTableName) - if err != nil { - return err - } - for _, tn := range tableNames { - err = r.db.Conn().Migrator().DropTable(tn) - if err != nil { - return err - } - } - return nil -} diff --git a/services/wastage/db/repo/rds_db_storage.go b/services/wastage/db/repo/rds_db_storage.go deleted file mode 100644 index f044020ff..000000000 --- a/services/wastage/db/repo/rds_db_storage.go +++ /dev/null @@ -1,605 +0,0 @@ -package repo - -import ( - "context" - "errors" - "fmt" - "github.com/opengovern/opengovernance/services/wastage/api/entity" - "github.com/opengovern/opengovernance/services/wastage/db/connector" - "github.com/opengovern/opengovernance/services/wastage/db/model" - "github.com/sony/sonyflake" - "go.uber.org/zap" - "gorm.io/gorm" - "math" - "strings" - "time" -) - -type RDSDBStorageRepo interface { - Create(tableName string, tx *gorm.DB, m *model.RDSDBStorage) error - Get(id uint) (*model.RDSDBStorage, error) - Update(id uint, m model.RDSDBStorage) error - Delete(id uint) error - List() ([]model.RDSDBStorage, error) - Truncate(tx *gorm.DB) error - GetCheapestBySpecs(ctx context.Context, region, engine, edition string, clusterType entity.AwsRdsClusterType, volumeSize, iops int32, throughput float64, validTypes []model.RDSDBStorageVolumeType) (*model.RDSDBStorage, int32, int32, float64, string, error) - MoveViewTransaction(tableName string) error - RemoveOldTables(tableName string) error - CreateNewTable() (string, error) -} - -type RDSDBStorageRepoImpl struct { - logger *zap.Logger - db *connector.Database - - viewName string -} - -func NewRDSDBStorageRepo(logger *zap.Logger, db *connector.Database) RDSDBStorageRepo { - stmt := &gorm.Statement{DB: db.Conn()} - stmt.Parse(&model.RDSDBStorage{}) - - return &RDSDBStorageRepoImpl{ - logger: logger, - db: db, - - viewName: stmt.Schema.Table, - } -} - -func (r *RDSDBStorageRepoImpl) Create(tableName string, tx *gorm.DB, m *model.RDSDBStorage) error { - if tx == nil { - tx = r.db.Conn() - } - tx = tx.Table(tableName) - return tx.Create(&m).Error -} - -func (r *RDSDBStorageRepoImpl) Get(id uint) (*model.RDSDBStorage, error) { - var m model.RDSDBStorage - tx := r.db.Conn().Table(r.viewName).Where("id=?", id).First(&m) - if tx.Error != nil { - if errors.Is(tx.Error, gorm.ErrRecordNotFound) { - return nil, nil - } - return nil, tx.Error - } - return &m, nil -} - -func (r *RDSDBStorageRepoImpl) Update(id uint, m model.RDSDBStorage) error { - return r.db.Conn().Model(&model.RDSDBStorage{}).Where("id=?", id).Updates(&m).Error -} - -func (r *RDSDBStorageRepoImpl) Delete(id uint) error { - return r.db.Conn().Unscoped().Delete(&model.RDSDBStorage{}, id).Error -} - -func (r *RDSDBStorageRepoImpl) List() ([]model.RDSDBStorage, error) { - var ms []model.RDSDBStorage - tx := r.db.Conn().Table(r.viewName).Find(&ms) - if tx.Error != nil { - return nil, tx.Error - } - return ms, nil -} - -func (r *RDSDBStorageRepoImpl) Truncate(tx *gorm.DB) error { - if tx == nil { - tx = r.db.Conn() - } - tx = tx.Unscoped().Where("1 = 1").Delete(&model.RDSDBStorage{}) - if tx.Error != nil { - return tx.Error - } - return nil -} - -func (r *RDSDBStorageRepoImpl) getMagneticTotalPrice(ctx context.Context, dbStorage model.RDSDBStorage, volumeSize *int32, iops *int32) (float64, string, error) { - if dbStorage.VolumeType != string(model.RDSDBStorageVolumeTypeMagnetic) { - return 0, "", errors.New("invalid volume type") - } - if dbStorage.MinVolumeSizeGb != 0 && *volumeSize < dbStorage.MinVolumeSizeGb { - *volumeSize = dbStorage.MinVolumeSizeGb - } - sizeCost := dbStorage.PricePerUnit * float64(*volumeSize) - - millionIoPerMonth := math.Ceil(float64(*iops) * 30 * 24 * 60 * 60 / 1e6) // 30 days, 24 hours, 60 minutes, 60 seconds - iopsCost := 0.0 - - tx := r.db.Conn().Table(r.viewName).WithContext(ctx). - Where("product_family = ?", "System Operation"). - Where("region_code = ?", dbStorage.RegionCode). - Where("volume_type = ?", "Magnetic"). - Where("'group' = ?", "RDS I/O Operation"). - Where("database_engine IN ?", []string{dbStorage.DatabaseEngine, "Any"}) - tx = tx.Order("price_per_unit asc") - var iopsStorage model.RDSDBStorage - err := tx.First(&iopsStorage).Error - if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { - return 0, "", tx.Error - } - iopsCost = iopsStorage.PricePerUnit * millionIoPerMonth - - costBreakdown := fmt.Sprintf("Size: $%.2f * %d\n + IOPS: $%.5f * %.2f (million IOPS per month)", dbStorage.PricePerUnit, *volumeSize, iopsStorage.PricePerUnit, millionIoPerMonth) - - return sizeCost + iopsCost, costBreakdown, nil -} - -func (r *RDSDBStorageRepoImpl) getGp2TotalPrice(ctx context.Context, dbStorage model.RDSDBStorage, volumeSize *int32, iops *int32) (float64, string, error) { - if dbStorage.VolumeType != string(model.RDSDBStorageVolumeTypeGP2) { - return 0, "", errors.New("invalid volume type") - } - if dbStorage.MinVolumeSizeGb != 0 && *volumeSize < dbStorage.MinVolumeSizeGb { - *volumeSize = dbStorage.MinVolumeSizeGb - } - - if *iops > 100 { - minReqSize := int32(math.Ceil(float64(*iops) / model.Gp2IopsPerGiB)) - *volumeSize = max(*volumeSize, minReqSize) - } - costBreakdown := fmt.Sprintf("Size: $%.2f * %d", dbStorage.PricePerUnit, *volumeSize) - - return dbStorage.PricePerUnit * float64(*volumeSize), costBreakdown, nil -} - -func (r *RDSDBStorageRepoImpl) getGp3TotalPrice(ctx context.Context, clusterType entity.AwsRdsClusterType, dbStorage model.RDSDBStorage, volumeSize *int32, iops *int32, throughput *float64) (float64, string, error) { - if dbStorage.VolumeType != string(model.RDSDBStorageVolumeTypeGP3) { - return 0, "", errors.New("invalid volume type") - } - - getIopsStorage := func(provisionedIops int) (*model.RDSDBStorage, error) { - tx := r.db.Conn().Table(r.viewName).WithContext(ctx). - Where("product_family = ?", "Provisioned IOPS"). - Where("region_code = ?", dbStorage.RegionCode). - Where("deployment_option = ?", dbStorage.DeploymentOption). - Where("group_description = ?", "RDS Provisioned GP3 IOPS"). - Where("database_engine = ?", dbStorage.DatabaseEngine) - if len(dbStorage.DatabaseEdition) > 0 { - tx = tx.Where("database_edition = ?", dbStorage.DatabaseEdition) - } - tx = tx.Order("price_per_unit asc") - var iopsStorage model.RDSDBStorage - err := tx.First(&iopsStorage).Error - if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { - return nil, tx.Error - } - return &iopsStorage, nil - } - - getThroughputStorage := func(provisionedThroughput float64) (*model.RDSDBStorage, error) { - tx := r.db.Conn().Table(r.viewName).WithContext(ctx). - Where("product_family = ?", "Provisioned Throughput"). - Where("region_code = ?", dbStorage.RegionCode). - Where("deployment_option = ?", dbStorage.DeploymentOption). - Where("database_engine = ?", dbStorage.DatabaseEngine) - if len(dbStorage.DatabaseEdition) > 0 { - tx = tx.Where("database_edition = ?", dbStorage.DatabaseEdition) - } - tx = tx.Order("price_per_unit asc") - var throughputStorage model.RDSDBStorage - err := tx.First(&throughputStorage).Error - if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { - return nil, tx.Error - } - return &throughputStorage, nil - } - - dbEngine := strings.ToLower(dbStorage.DatabaseEngine) - switch { - case strings.Contains(dbEngine, "sql server"): - sizeCost := dbStorage.PricePerUnit * float64(*volumeSize) - costBreakdown := fmt.Sprintf("Size: $%.2f * %d", dbStorage.PricePerUnit, *volumeSize) - iopsCost := 0.0 - throughputCost := 0.0 - provisionedIops := int(*iops) - model.RDSDBStorageTier1Gp3BaseIops - provisionedIops = max(provisionedIops, 0) - if provisionedIops > 0 { - iopsStorage, err := getIopsStorage(provisionedIops) - if err != nil { - return 0, "", err - } - iopsCost = iopsStorage.PricePerUnit * float64(provisionedIops) - costBreakdown += fmt.Sprintf("\n + Provisioned IOPS (anything over %d for sql servers): $%.2f * %d", model.RDSDBStorageTier1Gp3BaseIops, iopsStorage.PricePerUnit, provisionedIops) - } else { - *iops = model.RDSDBStorageTier1Gp3BaseIops - } - - provisionedThroughput := *throughput - model.RDSDBStorageTier1Gp3BaseThroughput - provisionedThroughput = max(provisionedThroughput, 0) - if provisionedThroughput > 0 { - throughputStorage, err := getThroughputStorage(provisionedThroughput) - if err != nil { - return 0, "", err - } - throughputCost = throughputStorage.PricePerUnit * provisionedThroughput - costBreakdown += fmt.Sprintf("\n + Provisioned Throughput (anything over %.2f for sql servers): $%.2f * %.2f", model.RDSDBStorageTier1Gp3BaseThroughput, throughputStorage.PricePerUnit, provisionedThroughput) - } else { - *throughput = model.RDSDBStorageTier1Gp3BaseThroughput - } - return sizeCost + iopsCost + throughputCost, costBreakdown, nil - default: - tierThreshold := int32(model.RDSDBStorageTier1Gp3SizeThreshold) - if strings.Contains(dbEngine, "oracle") { - tierThreshold = model.RDSDBStorageTier1Gp3SizeThresholdForOracleEngine - } - var costBreakdown string - if *iops > model.RDSDBStorageTier1Gp3BaseIops || *throughput > model.RDSDBStorageTier1Gp3BaseThroughput { - costBreakdown = fmt.Sprintf("Scaling size to %d to meet IOPS or Throughput requirements", tierThreshold) - *volumeSize = max(*volumeSize, tierThreshold) - } else { - *iops = model.RDSDBStorageTier1Gp3BaseIops - *throughput = model.RDSDBStorageTier1Gp3BaseThroughput - } - if dbStorage.MinVolumeSizeGb != 0 && *volumeSize < dbStorage.MinVolumeSizeGb { - *volumeSize = dbStorage.MinVolumeSizeGb - } - sizeCost := dbStorage.PricePerUnit * float64(*volumeSize) - iopsCost := 0.0 - throughputCost := 0.0 - - if *volumeSize > tierThreshold { - provisionedIops := int(*iops) - model.RDSDBStorageTier2Gp3BaseIops - provisionedIops = max(provisionedIops, 0) - if provisionedIops > 0 { - iopsStorage, err := getIopsStorage(provisionedIops) - if err != nil { - return 0, "", err - } - iopsCost = iopsStorage.PricePerUnit * float64(provisionedIops) - costBreakdown += fmt.Sprintf("\n + Provisioned IOPS (over %d): $%.2f * %d", model.RDSDBStorageTier2Gp3BaseIops, iopsStorage.PricePerUnit, provisionedIops) - } else { - *iops = model.RDSDBStorageTier2Gp3BaseIops - } - - provisionedThroughput := *throughput - model.RDSDBStorageTier2Gp3BaseThroughput - provisionedThroughput = max(provisionedThroughput, 0) - switch { - case clusterType == entity.AwsRdsClusterTypeMultiAzTwoInstance && strings.Contains(dbEngine, "postgres"): - *throughput = model.RDSDBStorageTier2Gp3BaseThroughput - case clusterType == entity.AwsRdsClusterTypeMultiAzTwoInstance && strings.Contains(dbEngine, "mysql"): - *throughput = model.RDSDBStorageTier2Gp3BaseThroughput - if *iops > model.RDSDBStorageIopsThresholdForThroughputScalingForMySqlEngine { - *throughput += math.Floor(float64(*iops-model.RDSDBStorageIopsThresholdForThroughputScalingForMySqlEngine) / model.RDSDBStorageThroughputScalingOnIopsFactorForMySqlEngine) - } - default: - if provisionedThroughput > 0 { - throughputStorage, err := getThroughputStorage(provisionedThroughput) - if err != nil { - return 0, "", err - } - throughputCost = throughputStorage.PricePerUnit * provisionedThroughput - costBreakdown += fmt.Sprintf("\n + Provisioned Throughput (over %.2f): $%.2f * %.2f", model.RDSDBStorageTier2Gp3BaseThroughput, throughputStorage.PricePerUnit, provisionedThroughput) - } else { - *throughput = model.RDSDBStorageTier2Gp3BaseThroughput - } - } - } // Else is not needed since tier 1 iops/throughput is not configurable and is not charged - - return sizeCost + iopsCost + throughputCost, costBreakdown, nil - } -} - -func (r *RDSDBStorageRepoImpl) getIo1TotalPrice(ctx context.Context, dbStorage model.RDSDBStorage, volumeSize *int32, iops *int32) (float64, string, error) { - if dbStorage.VolumeType != string(model.RDSDBStorageVolumeTypeIO1) { - return 0, "", errors.New("invalid volume type") - } - if dbStorage.MinVolumeSizeGb != 0 && *volumeSize < dbStorage.MinVolumeSizeGb { - *volumeSize = dbStorage.MinVolumeSizeGb - } - sizeCost := dbStorage.PricePerUnit * float64(*volumeSize) - iopsCost := 0.0 - tx := r.db.Conn().Table(r.viewName).WithContext(ctx). - Where("product_family = ?", "Provisioned IOPS"). - Where("region_code = ?", dbStorage.RegionCode). - Where("deployment_option = ?", dbStorage.DeploymentOption). - Where("group_description = ?", "RDS Provisioned IOPS"). - Where("database_engine = ?", dbStorage.DatabaseEngine) - if len(dbStorage.DatabaseEdition) > 0 { - tx = tx.Where("database_edition = ?", dbStorage.DatabaseEdition) - } - tx = tx.Order("price_per_unit asc") - var iopsStorage model.RDSDBStorage - err := tx.First(&iopsStorage).Error - if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { - return 0, "", tx.Error - } - iopsCost = iopsStorage.PricePerUnit * float64(*iops) - - costBreakdown := fmt.Sprintf("Size: $%.2f * %d\n + IOPS: $%.2f * %d", dbStorage.PricePerUnit, *volumeSize, iopsStorage.PricePerUnit, *iops) - - return sizeCost + iopsCost, costBreakdown, nil -} - -func (r *RDSDBStorageRepoImpl) getIo2TotalPrice(ctx context.Context, dbStorage model.RDSDBStorage, volumeSize *int32, iops *int32) (float64, string, error) { - if dbStorage.VolumeType != string(model.RDSDBStorageVolumeTypeIO2) { - return 0, "", errors.New("invalid volume type") - } - if dbStorage.MinVolumeSizeGb != 0 && *volumeSize < dbStorage.MinVolumeSizeGb { - *volumeSize = dbStorage.MinVolumeSizeGb - } - sizeCost := dbStorage.PricePerUnit * float64(*volumeSize) - iopsCost := 0.0 - tx := r.db.Conn().Table(r.viewName).WithContext(ctx). - Where("product_family = ?", "Provisioned IOPS"). - Where("region_code = ?", dbStorage.RegionCode). - Where("deployment_option = ?", dbStorage.DeploymentOption). - Where("group_description = ?", "RDS Provisioned IO2 IOPS"). - Where("database_engine = ?", dbStorage.DatabaseEngine) - if len(dbStorage.DatabaseEdition) > 0 { - tx = tx.Where("database_edition = ?", dbStorage.DatabaseEdition) - } - tx = tx.Order("price_per_unit asc") - var iopsStorage model.RDSDBStorage - err := tx.First(&iopsStorage).Error - if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { - return 0, "", tx.Error - } - iopsCost = iopsStorage.PricePerUnit * float64(*iops) - - costBreakdown := fmt.Sprintf("Size: $%.2f * %d\n + IOPS: $%.2f * %d", dbStorage.PricePerUnit, *volumeSize, iopsStorage.PricePerUnit, *iops) - - return sizeCost + iopsCost, costBreakdown, nil -} - -func (r *RDSDBStorageRepoImpl) getAuroraGeneralPurposeTotalPrice(ctx context.Context, dbStorage model.RDSDBStorage, volumeSize *int32, iops *int32) (float64, string, error) { - if dbStorage.VolumeType != string(model.RDSDBStorageVolumeTypeGeneralPurposeAurora) { - return 0, "", errors.New("invalid volume type") - } - // Disable min volume size check for aurora since use is not managing it - //if dbStorage.MinVolumeSizeGb != 0 && *volumeSize < dbStorage.MinVolumeSizeGb { - // *volumeSize = dbStorage.MinVolumeSizeGb - //} - sizeCost := dbStorage.PricePerUnit * float64(*volumeSize) - - millionIoPerMonth := math.Ceil(float64(*iops) * 30 * 24 * 60 * 60 / 1e6) // 30 days, 24 hours, 60 minutes, 60 seconds - iopsCost := 0.0 - - tx := r.db.Conn().Table(r.viewName).WithContext(ctx). - Where("product_family = ?", "System Operation"). - Where("region_code = ?", dbStorage.RegionCode). - Where("'group' = ?", "Aurora I/O Operation"). - Where("database_engine IN ?", []string{dbStorage.DatabaseEngine, "Any"}) - tx = tx.Order("price_per_unit asc") - var iopsStorage model.RDSDBStorage - err := tx.First(&iopsStorage).Error - if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { - return 0, "", tx.Error - } - iopsCost = iopsStorage.PricePerUnit * millionIoPerMonth - - costBreakdown := fmt.Sprintf("Size: $%.2f * %d\n + IOPS: $%.5f * %.2f (million IOPS per month)", dbStorage.PricePerUnit, *volumeSize, iopsStorage.PricePerUnit, millionIoPerMonth) - - return sizeCost + iopsCost, costBreakdown, nil - -} - -func (r *RDSDBStorageRepoImpl) getAuroraIOOptimizedTotalPrice(ctx context.Context, dbStorage model.RDSDBStorage, volumeSize *int32) (float64, string, error) { - if dbStorage.VolumeType != string(model.RDSDBStorageVolumeTypeIOOptimizedAurora) { - return 0, "", errors.New("invalid volume type") - } - // Disable min volume size check for aurora since use is not managing it - //if dbStorage.MinVolumeSizeGb != 0 && *volumeSize < dbStorage.MinVolumeSizeGb { - // *volumeSize = dbStorage.MinVolumeSizeGb - //} - sizeCost := dbStorage.PricePerUnit * float64(*volumeSize) - - costBreakdown := fmt.Sprintf("Size: $%.2f * %d", dbStorage.PricePerUnit, *volumeSize) - - return sizeCost, costBreakdown, nil -} - -func (r *RDSDBStorageRepoImpl) getFeasibleVolumeTypes(ctx context.Context, region string, engine, edition string, clusterType entity.AwsRdsClusterType, volumeSize int32, iops int32, throughput float64, validTypes []model.RDSDBStorageVolumeType) ([]model.RDSDBStorage, error) { - var res []model.RDSDBStorage - tx := r.db.Conn().Table(r.viewName).WithContext(ctx). - Where("product_family = ?", "Database Storage"). - Where("region_code = ?", region). - Where("max_volume_size_gb >= ? or max_volume_size = ''", volumeSize). - Where("max_iops >= ?", iops). - Where("max_throughput_mb >= ?", throughput) - - if strings.Contains(strings.ToLower(engine), "aurora") { - var filteredValidTypes []model.RDSDBStorageVolumeType - for _, t := range validTypes { - if t == model.RDSDBStorageVolumeTypeIOOptimizedAurora || - t == model.RDSDBStorageVolumeTypeGeneralPurposeAurora { - filteredValidTypes = append(filteredValidTypes, t) - } - } - if len(filteredValidTypes) == 0 { - filteredValidTypes = []model.RDSDBStorageVolumeType{ - model.RDSDBStorageVolumeTypeIOOptimizedAurora, - model.RDSDBStorageVolumeTypeGeneralPurposeAurora, - } - } - validTypes = filteredValidTypes - tx = tx.Where("database_engine IN ?", []string{engine, "Any"}) - tx = tx.Where("deployment_option = ?", "Single-AZ") - } else { - var filteredValidTypes []model.RDSDBStorageVolumeType - for _, t := range validTypes { - if t != model.RDSDBStorageVolumeTypeIOOptimizedAurora && - t != model.RDSDBStorageVolumeTypeGeneralPurposeAurora { - filteredValidTypes = append(filteredValidTypes, t) - } - } - validTypes = filteredValidTypes - tx = tx.Where("database_engine = ?", engine) - if len(edition) > 0 { - tx = tx.Where("database_edition = ?", edition) - } - tx = tx.Where("deployment_option = ?", string(clusterType)) - } - - if len(validTypes) > 0 { - tx = tx.Where("volume_type IN ?", validTypes) - } - - tx = tx.Find(&res) - if tx.Error != nil { - return nil, tx.Error - } - - return res, nil -} - -func (r *RDSDBStorageRepoImpl) GetCheapestBySpecs(ctx context.Context, region, engine, edition string, clusterType entity.AwsRdsClusterType, volumeSize, iops int32, throughput float64, validTypes []model.RDSDBStorageVolumeType) (res *model.RDSDBStorage, cheapestVSize, cheapestIops int32, cheapestThroughput float64, cheapestCostBreakdown string, err error) { - res = nil - err = nil - cheapestVSize = volumeSize - cheapestIops = iops - cheapestThroughput = throughput - cheapestCostBreakdown = "" - - volumes, err := r.getFeasibleVolumeTypes(ctx, region, engine, edition, clusterType, volumeSize, iops, throughput, validTypes) - if err != nil { - return nil, 0, 0, 0, "", err - } - - if len(volumes) == 0 { - return nil, 0, 0, 0, "", nil - } - - var cheapestPrice float64 - - for _, v := range volumes { - v := v - vSize := volumeSize - vIops := iops - vThroughput := throughput - vCostBreakdown := "" - var totalCost float64 - switch model.RDSDBStorageVolumeType(v.VolumeType) { - case model.RDSDBStorageVolumeTypeMagnetic: - totalCost, vCostBreakdown, err = r.getMagneticTotalPrice(ctx, v, &vSize, &vIops) - case model.RDSDBStorageVolumeTypeGP2: - totalCost, vCostBreakdown, err = r.getGp2TotalPrice(ctx, v, &vSize, &vIops) - case model.RDSDBStorageVolumeTypeGP3: - totalCost, vCostBreakdown, err = r.getGp3TotalPrice(ctx, clusterType, v, &vSize, &vIops, &vThroughput) - case model.RDSDBStorageVolumeTypeIO1: - totalCost, vCostBreakdown, err = r.getIo1TotalPrice(ctx, v, &vSize, &vIops) - case model.RDSDBStorageVolumeTypeIO2: - totalCost, vCostBreakdown, err = r.getIo2TotalPrice(ctx, v, &vSize, &vIops) - case model.RDSDBStorageVolumeTypeGeneralPurposeAurora: - totalCost, vCostBreakdown, err = r.getAuroraGeneralPurposeTotalPrice(ctx, v, &vSize, &vIops) - case model.RDSDBStorageVolumeTypeIOOptimizedAurora: - totalCost, vCostBreakdown, err = r.getAuroraIOOptimizedTotalPrice(ctx, v, &vSize) - } - - if err != nil { - r.logger.Error("failed to calculate total cost", zap.Error(err), zap.Any("volume", v)) - return nil, 0, 0, 0, "", err - } - - if res == nil || totalCost < cheapestPrice { - res = &v - cheapestVSize = vSize - cheapestIops = vIops - cheapestThroughput = vThroughput - cheapestCostBreakdown = vCostBreakdown - cheapestPrice = totalCost - } - } - - return res, cheapestVSize, cheapestIops, cheapestThroughput, cheapestCostBreakdown, nil -} - -func (r *RDSDBStorageRepoImpl) CreateNewTable() (string, error) { - sf := sonyflake.NewSonyflake(sonyflake.Settings{}) - var ec2InstanceTypeTable string - for { - id, err := sf.NextID() - if err != nil { - return "", err - } - - ec2InstanceTypeTable = fmt.Sprintf("%s_%s_%d", - r.viewName, - time.Now().Format("2006_01_02"), - id, - ) - var c int32 - tx := r.db.Conn().Raw(fmt.Sprintf(` - SELECT count(*) - FROM information_schema.tables - WHERE table_schema = current_schema - AND table_name = '%s'; - `, ec2InstanceTypeTable)).First(&c) - if tx.Error != nil { - return "", err - } - if c == 0 { - break - } - } - - err := r.db.Conn().Table(ec2InstanceTypeTable).AutoMigrate(&model.RDSDBStorage{}) - if err != nil { - return "", err - } - return ec2InstanceTypeTable, nil -} - -func (r *RDSDBStorageRepoImpl) MoveViewTransaction(tableName string) error { - tx := r.db.Conn().Begin() - var err error - defer func() { - _ = tx.Rollback() - }() - - dropViewQuery := fmt.Sprintf("DROP VIEW IF EXISTS rdsdb_storages") - tx = tx.Exec(dropViewQuery) - err = tx.Error - if err != nil { - return err - } - - createViewQuery := fmt.Sprintf(` - CREATE OR REPLACE VIEW rdsdb_storages AS - SELECT * - FROM %s; -`, tableName) - - tx = tx.Exec(createViewQuery) - err = tx.Error - if err != nil { - return err - } - - tx = tx.Commit() - err = tx.Error - if err != nil { - return err - } - return nil -} - -func (r *RDSDBStorageRepoImpl) getOldTables(currentTableName string) ([]string, error) { - query := fmt.Sprintf(` - SELECT table_name - FROM information_schema.tables - WHERE table_schema = current_schema - AND table_name LIKE 'rdsdb_storages_%%' AND table_name <> '%s'; - `, currentTableName) - - var tableNames []string - tx := r.db.Conn().Raw(query).Find(&tableNames) - if tx.Error != nil { - return nil, tx.Error - } - return tableNames, nil -} - -func (r *RDSDBStorageRepoImpl) RemoveOldTables(currentTableName string) error { - tableNames, err := r.getOldTables(currentTableName) - if err != nil { - return err - } - for _, tn := range tableNames { - err = r.db.Conn().Migrator().DropTable(tn) - if err != nil { - return err - } - } - return nil -} diff --git a/services/wastage/db/repo/rds_product.go b/services/wastage/db/repo/rds_product.go deleted file mode 100644 index 79d67de58..000000000 --- a/services/wastage/db/repo/rds_product.go +++ /dev/null @@ -1,188 +0,0 @@ -package repo - -import ( - "errors" - "fmt" - "github.com/opengovern/opengovernance/services/wastage/db/connector" - "github.com/opengovern/opengovernance/services/wastage/db/model" - "github.com/sony/sonyflake" - "gorm.io/gorm" - "time" -) - -type RDSProductRepo interface { - Create(tableName string, tx *gorm.DB, m *model.RDSProduct) error - Get(id uint) (*model.RDSProduct, error) - Update(id uint, m model.RDSProduct) error - Delete(id uint) error - List() ([]model.RDSProduct, error) - Truncate(tx *gorm.DB) error - MoveViewTransaction(tableName string) error - RemoveOldTables(tableName string) error - CreateNewTable() (string, error) -} - -type RDSProductRepoImpl struct { - db *connector.Database - - viewName string -} - -func NewRDSProductRepo(db *connector.Database) RDSProductRepo { - stmt := &gorm.Statement{DB: db.Conn()} - stmt.Parse(&model.RDSProduct{}) - - return &RDSProductRepoImpl{ - db: db, - - viewName: stmt.Schema.Table, - } -} - -func (r *RDSProductRepoImpl) Create(tableName string, tx *gorm.DB, m *model.RDSProduct) error { - if tx == nil { - tx = r.db.Conn() - } - tx = tx.Table(tableName) - return tx.Create(&m).Error -} - -func (r *RDSProductRepoImpl) Get(id uint) (*model.RDSProduct, error) { - var m model.RDSProduct - tx := r.db.Conn().Table(r.viewName).Where("id=?", id).First(&m) - if tx.Error != nil { - if errors.Is(tx.Error, gorm.ErrRecordNotFound) { - return nil, nil - } - return nil, tx.Error - } - return &m, nil -} - -func (r *RDSProductRepoImpl) Update(id uint, m model.RDSProduct) error { - return r.db.Conn().Model(&model.RDSProduct{}).Where("id=?", id).Updates(&m).Error -} - -func (r *RDSProductRepoImpl) Delete(id uint) error { - return r.db.Conn().Unscoped().Delete(&model.RDSProduct{}, id).Error -} - -func (r *RDSProductRepoImpl) List() ([]model.RDSProduct, error) { - var ms []model.RDSProduct - tx := r.db.Conn().Table(r.viewName).Find(&ms) - if tx.Error != nil { - return nil, tx.Error - } - return ms, nil -} - -func (r *RDSProductRepoImpl) Truncate(tx *gorm.DB) error { - if tx == nil { - tx = r.db.Conn() - } - tx = tx.Unscoped().Where("1 = 1").Delete(&model.RDSProduct{}) - if tx.Error != nil { - return tx.Error - } - return nil -} - -func (r *RDSProductRepoImpl) CreateNewTable() (string, error) { - sf := sonyflake.NewSonyflake(sonyflake.Settings{}) - var ec2InstanceTypeTable string - for { - id, err := sf.NextID() - if err != nil { - return "", err - } - - ec2InstanceTypeTable = fmt.Sprintf("%s_%s_%d", - r.viewName, - time.Now().Format("2006_01_02"), - id, - ) - var c int32 - tx := r.db.Conn().Raw(fmt.Sprintf(` - SELECT count(*) - FROM information_schema.tables - WHERE table_schema = current_schema - AND table_name = '%s'; - `, ec2InstanceTypeTable)).First(&c) - if tx.Error != nil { - return "", err - } - if c == 0 { - break - } - } - - err := r.db.Conn().Table(ec2InstanceTypeTable).AutoMigrate(&model.RDSProduct{}) - if err != nil { - return "", err - } - return ec2InstanceTypeTable, nil -} - -func (r *RDSProductRepoImpl) MoveViewTransaction(tableName string) error { - tx := r.db.Conn().Begin() - var err error - defer func() { - _ = tx.Rollback() - }() - - dropViewQuery := fmt.Sprintf("DROP VIEW IF EXISTS %s", r.viewName) - tx = tx.Exec(dropViewQuery) - err = tx.Error - if err != nil { - return err - } - - createViewQuery := fmt.Sprintf(` - CREATE OR REPLACE VIEW %s AS - SELECT * - FROM %s; -`, r.viewName, tableName) - - tx = tx.Exec(createViewQuery) - err = tx.Error - if err != nil { - return err - } - - tx = tx.Commit() - err = tx.Error - if err != nil { - return err - } - return nil -} - -func (r *RDSProductRepoImpl) getOldTables(currentTableName string) ([]string, error) { - query := fmt.Sprintf(` - SELECT table_name - FROM information_schema.tables - WHERE table_schema = current_schema - AND table_name LIKE '%s_%%' AND table_name <> '%s'; - `, r.viewName, currentTableName) - - var tableNames []string - tx := r.db.Conn().Raw(query).Find(&tableNames) - if tx.Error != nil { - return nil, tx.Error - } - return tableNames, nil -} - -func (r *RDSProductRepoImpl) RemoveOldTables(currentTableName string) error { - tableNames, err := r.getOldTables(currentTableName) - if err != nil { - return err - } - for _, tn := range tableNames { - err = r.db.Conn().Migrator().DropTable(tn) - if err != nil { - return err - } - } - return nil -} diff --git a/services/wastage/db/repo/usage.go b/services/wastage/db/repo/usage.go deleted file mode 100644 index d66c21134..000000000 --- a/services/wastage/db/repo/usage.go +++ /dev/null @@ -1,54 +0,0 @@ -package repo - -import ( - "errors" - "github.com/opengovern/opengovernance/services/wastage/db/connector" - "github.com/opengovern/opengovernance/services/wastage/db/model" - "gorm.io/gorm" -) - -type UsageRepo interface { - Create(m *model.Usage) error - Update(id uint, m model.Usage) error - List() ([]model.Usage, error) - GetRandomNotMoved() (*model.Usage, error) -} - -type UsageRepoImpl struct { - db *connector.Database -} - -func NewUsageRepo(db *connector.Database) UsageRepo { - return &UsageRepoImpl{ - db: db, - } -} - -func (r *UsageRepoImpl) Create(m *model.Usage) error { - return r.db.Conn().Create(&m).Error -} - -func (r *UsageRepoImpl) Update(id uint, m model.Usage) error { - return r.db.Conn().Model(&model.Usage{}).Where("id=?", id).Updates(&m).Error -} - -func (r *UsageRepoImpl) List() ([]model.Usage, error) { - var ms []model.Usage - tx := r.db.Conn().Model(&model.Usage{}).Find(&ms) - if tx.Error != nil { - return nil, tx.Error - } - return ms, nil -} - -func (r *UsageRepoImpl) GetRandomNotMoved() (*model.Usage, error) { - var m model.Usage - tx := r.db.Conn().Model(&model.Usage{}).Where("moved=? OR moved IS NULL", false).First(&m) - if tx.Error != nil { - if errors.Is(tx.Error, gorm.ErrRecordNotFound) { - return nil, nil - } - return nil, tx.Error - } - return &m, nil -} diff --git a/services/wastage/db/repo/usage_v2.go b/services/wastage/db/repo/usage_v2.go deleted file mode 100644 index ecde23a4a..000000000 --- a/services/wastage/db/repo/usage_v2.go +++ /dev/null @@ -1,306 +0,0 @@ -package repo - -import ( - "context" - "errors" - "fmt" - "github.com/opengovern/opengovernance/services/wastage/db/connector" - "github.com/opengovern/opengovernance/services/wastage/db/model" - "gorm.io/gorm" -) - -type UsageV2Repo interface { - Create(m *model.UsageV2) error - Update(id uint, m model.UsageV2) error - GetRandomNullStatistics() (*model.UsageV2, error) - Get(id uint) (*model.UsageV2, error) - GetByAccountID(endpoint, accountId, auth0UserId, id string) ([]uint, error) - GetLastByAccountID(endpoint, accountId, auth0UserId, randomID, groupByType string) ([]uint, error) - GetCostZero() (*model.UsageV2, error) - GetRDSInstanceOptimizationsCountForUser(ctx context.Context, userId string) (int64, error) - GetRDSInstanceOptimizationsCountForOrg(ctx context.Context, orgAddress string) (int64, error) - GetRDSClusterOptimizationsCountForUser(ctx context.Context, userId string) (int64, error) - GetRDSClusterOptimizationsCountForOrg(ctx context.Context, orgAddress string) (int64, error) - GetEC2InstanceOptimizationsCountForUser(ctx context.Context, userId string) (int64, error) - GetEC2InstanceOptimizationsCountForOrg(ctx context.Context, orgAddress string) (int64, error) - GetAccountsForUser(ctx context.Context, userId string) ([]string, error) - GetAccountsForOrg(ctx context.Context, orgAddress string) ([]string, error) -} - -type UsageV2RepoImpl struct { - db *connector.Database -} - -func NewUsageV2Repo(db *connector.Database) UsageV2Repo { - return &UsageV2RepoImpl{ - db: db, - } -} - -func (r *UsageV2RepoImpl) Create(m *model.UsageV2) error { - return r.db.Conn().Create(&m).Error -} - -func (r *UsageV2RepoImpl) Update(id uint, m model.UsageV2) error { - return r.db.Conn().Model(&model.UsageV2{}).Where("id=?", id).Updates(&m).Error -} - -func (r *UsageV2RepoImpl) Get(id uint) (*model.UsageV2, error) { - var m model.UsageV2 - tx := r.db.Conn().Model(&model.UsageV2{}).Where("id=?", id).First(&m) - if tx.Error != nil { - if errors.Is(tx.Error, gorm.ErrRecordNotFound) { - return nil, nil - } - return nil, tx.Error - } - return &m, nil -} - -func (r *UsageV2RepoImpl) GetByAccountID(endpoint, accountId, auth0UserId, randomID string) ([]uint, error) { - tx := r.db.Conn().Raw(fmt.Sprintf(` -SELECT - id -FROM - usage_v2 -WHERE - api_endpoint like '%s%%' and - (statistics ->> 'auth0UserId') = '%s' and - (request -> 'identification' ->> 'random_id') = '%s' and - (statistics ->> 'accountID') = '%s' -`, endpoint, auth0UserId, randomID, accountId)) - rows, err := tx.Rows() - if err != nil { - return nil, err - } - defer rows.Close() - - var ids []uint - for rows.Next() { - var id uint - err = rows.Scan(&id) - if err != nil { - return nil, err - } - - ids = append(ids, id) - } - - return ids, nil -} - -func (r *UsageV2RepoImpl) GetLastByAccountID(endpoint, accountId, auth0UserId, randomID, groupByType string) ([]uint, error) { - tx := r.db.Conn().Raw(fmt.Sprintf(` -SELECT - max(id) -FROM - usage_v2 -WHERE - api_endpoint like '%s%%' and - (statistics ->> 'auth0UserId') = '%s' and - (request -> 'identification' ->> 'random_id') = '%s' and - (statistics ->> 'accountID') = '%s' -GROUP BY request -> '%s' ->> 'id' -`, endpoint, auth0UserId, randomID, accountId, groupByType)) - rows, err := tx.Rows() - if err != nil { - return nil, err - } - defer rows.Close() - - var ids []uint - for rows.Next() { - var id uint - err = rows.Scan(&id) - if err != nil { - return nil, err - } - - ids = append(ids, id) - } - - return ids, nil -} - -func (r *UsageV2RepoImpl) GetRandomNullStatistics() (*model.UsageV2, error) { - var m model.UsageV2 - tx := r.db.Conn().Model(&model.UsageV2{}).Where("statistics IS NULL").First(&m) - if tx.Error != nil { - if errors.Is(tx.Error, gorm.ErrRecordNotFound) { - return nil, nil - } - return nil, tx.Error - } - return &m, nil -} - -func (r *UsageV2RepoImpl) GetCostZero() (*model.UsageV2, error) { - var m model.UsageV2 - tx := r.db.Conn().Model(&model.UsageV2{}).Where("api_endpoint = 'aws-rds'"). - Where("(response -> 'rightSizing' -> 'current' ->> 'cost')::float = 0 and (response -> 'rightSizing' -> 'recommended' ->> 'cost')::float = 0"). - Where("((response -> 'rightSizing' -> 'current' ->> 'computeCost')::float <> 0 or (response -> 'rightSizing' -> 'current' ->> 'storageCost')::float <> 0)"). - First(&m) - if tx.Error != nil { - if errors.Is(tx.Error, gorm.ErrRecordNotFound) { - return nil, nil - } - return nil, tx.Error - } - return &m, nil -} - -func (r *UsageV2RepoImpl) GetRDSInstanceOptimizationsCountForUser(ctx context.Context, userId string) (int64, error) { - var count int64 - err := r.db.Conn().WithContext(ctx). - Raw(` - SELECT COUNT(*) - FROM usage_v2 - WHERE api_endpoint = 'aws-rds' - AND statistics ->> 'auth0UserId' = ? - AND request ->> 'loading' <> 'true' - `, userId). - Scan(&count).Error - if err != nil { - return 0, err - } - return count, nil -} - -func (r *UsageV2RepoImpl) GetRDSInstanceOptimizationsCountForOrg(ctx context.Context, orgAddress string) (int64, error) { - var count int64 - err := r.db.Conn().WithContext(ctx). - Raw(` - SELECT COUNT(*) - FROM usage_v2 - WHERE api_endpoint = 'aws-rds' - AND statistics ->> 'orgEmail' LIKE ? - AND request ->> 'loading' <> 'true' - `, fmt.Sprintf("%%@%s", orgAddress)). - Scan(&count).Error - if err != nil { - return 0, err - } - return count, nil -} - -func (r *UsageV2RepoImpl) GetRDSClusterOptimizationsCountForUser(ctx context.Context, userId string) (int64, error) { - var count int64 - err := r.db.Conn().WithContext(ctx). - Raw(` - SELECT COUNT(*) - FROM usage_v2 - WHERE api_endpoint = 'aws-rds-cluster' - AND statistics ->> 'auth0UserId' = ? - AND request ->> 'loading' <> 'true' - `, userId). - Scan(&count).Error - if err != nil { - return 0, err - } - return count, nil -} - -func (r *UsageV2RepoImpl) GetRDSClusterOptimizationsCountForOrg(ctx context.Context, orgAddress string) (int64, error) { - var count int64 - err := r.db.Conn().WithContext(ctx). - Raw(` - SELECT COUNT(*) - FROM usage_v2 - WHERE api_endpoint = 'aws-rds-cluster' - AND statistics ->> 'orgEmail' LIKE ? - AND request ->> 'loading' <> 'true' - `, fmt.Sprintf("%%@%s", orgAddress)). - Scan(&count).Error - if err != nil { - return 0, err - } - return count, nil -} - -func (r *UsageV2RepoImpl) GetEC2InstanceOptimizationsCountForUser(ctx context.Context, userId string) (int64, error) { - var count int64 - err := r.db.Conn().WithContext(ctx). - Raw(` - SELECT COUNT(*) - FROM usage_v2 - WHERE api_endpoint = 'ec2-instance' - AND statistics ->> 'auth0UserId' = ? - AND request ->> 'loading' <> 'true' - `, userId). - Scan(&count).Error - if err != nil { - return 0, err - } - return count, nil -} - -func (r *UsageV2RepoImpl) GetEC2InstanceOptimizationsCountForOrg(ctx context.Context, orgAddress string) (int64, error) { - var count int64 - err := r.db.Conn().WithContext(ctx). - Raw(` - SELECT COUNT(*) - FROM usage_v2 - WHERE api_endpoint = 'ec2-instance' - AND statistics ->> 'orgEmail' LIKE ? - AND request ->> 'loading' <> 'true' - `, fmt.Sprintf("%%@%s", orgAddress)). - Scan(&count).Error - if err != nil { - return 0, err - } - return count, nil -} - -//func (r *UsageV2RepoImpl) GetEBSVolumeOptimizationsCountForUser(userId string) (int64, error) { -// var count int64 -// err := r.db.Conn().Model(&model.UsageV2{}). -// Select("SUM(statistics ->> 'ebsVolumeCount') as total_volumes"). -// Where("api_endpoint = 'ec2-instance'"). -// Where("statistics ->> 'auth0UserId' = ?", userId). -// Where("request ->> 'loading' <> 'true'"). -// Scan(&count).Error -// if err != nil { -// return 0, err -// } -// return count, nil -//} - -//func (r *UsageV2RepoImpl) GetEBSVolumeOptimizationsCountForOrg(orgAddress string) (int64, error) { -// var count int64 -// err := r.db.Conn().Model(&model.UsageV2{}). -// Select("SUM(statistics ->> 'ebsVolumeCount') as total_volumes"). -// Where("api_endpoint = 'ec2-instance'"). -// Where("statistics ->> 'orgEmail' LIKE ?", fmt.Sprintf("%%@%s", orgAddress)). -// Where("request ->> 'loading' <> 'true'"). -// Scan(&count).Error -// if err != nil { -// return 0, err -// } -// return count, nil -//} - -func (r *UsageV2RepoImpl) GetAccountsForUser(ctx context.Context, userId string) ([]string, error) { - var accounts []string - err := r.db.Conn().Model(&model.UsageV2{}).WithContext(ctx). - Select("distinct(statistics ->> 'accountID') as accounts"). - Where("statistics ->> 'auth0UserId' = ?", userId). - Where("request ->> 'loading' <> 'true'"). - Scan(&accounts).Error - if err != nil { - return nil, err - } - return accounts, nil -} - -func (r *UsageV2RepoImpl) GetAccountsForOrg(ctx context.Context, orgAddress string) ([]string, error) { - var accounts []string - err := r.db.Conn().Model(&model.UsageV2{}).WithContext(ctx). - Select("distinct(statistics ->> 'accountID') as accounts"). - Where("statistics ->> 'orgEmail' LIKE ?", fmt.Sprintf("%%@%s", orgAddress)). - Where("request ->> 'loading' <> 'true'"). - Scan(&accounts).Error - if err != nil { - return nil, err - } - return accounts, nil -} diff --git a/services/wastage/db/repo/user.go b/services/wastage/db/repo/user.go deleted file mode 100644 index 0c0724901..000000000 --- a/services/wastage/db/repo/user.go +++ /dev/null @@ -1,60 +0,0 @@ -package repo - -import ( - "context" - "errors" - "github.com/opengovern/opengovernance/services/wastage/db/connector" - "github.com/opengovern/opengovernance/services/wastage/db/model" - "gorm.io/gorm" -) - -type UserRepo interface { - Create(m *model.User) error - Update(id string, m *model.User) error - Delete(id string) error - List() ([]model.User, error) - Get(ctx context.Context, id string) (*model.User, error) -} - -type UserRepoImpl struct { - db *connector.Database -} - -func NewUserRepo(db *connector.Database) UserRepo { - return &UserRepoImpl{ - db: db, - } -} - -func (r *UserRepoImpl) Create(m *model.User) error { - return r.db.Conn().Create(&m).Error -} - -func (r *UserRepoImpl) Update(id string, m *model.User) error { - return r.db.Conn().Model(&model.User{}).Where("user_id=?", id).Updates(&m).Error -} - -func (r *UserRepoImpl) Delete(id string) error { - return r.db.Conn().Model(&model.User{}).Where("user_id=?", id).Delete(&model.User{}).Error -} - -func (r *UserRepoImpl) List() ([]model.User, error) { - var ms []model.User - tx := r.db.Conn().Model(&model.User{}).Find(&ms) - if tx.Error != nil { - return nil, tx.Error - } - return ms, nil -} - -func (r *UserRepoImpl) Get(ctx context.Context, id string) (*model.User, error) { - var m model.User - tx := r.db.Conn().WithContext(ctx).Model(&model.User{}).Where("user_id=?", id).First(&m) - if tx.Error != nil { - if errors.Is(tx.Error, gorm.ErrRecordNotFound) { - return nil, nil - } - return nil, tx.Error - } - return &m, nil -} diff --git a/services/wastage/ingestion/gcp.go b/services/wastage/ingestion/gcp.go deleted file mode 100644 index 5f61ceb9e..000000000 --- a/services/wastage/ingestion/gcp.go +++ /dev/null @@ -1,411 +0,0 @@ -package ingestion - -import ( - "context" - "encoding/json" - "fmt" - "github.com/opengovern/opengovernance/services/wastage/db/connector" - "github.com/opengovern/opengovernance/services/wastage/db/model" - "github.com/opengovern/opengovernance/services/wastage/db/repo" - "go.uber.org/zap" - "google.golang.org/api/cloudbilling/v1" - "google.golang.org/api/compute/v1" - "google.golang.org/api/option" - "gorm.io/gorm" - "strings" - "time" -) - -var ( - services = map[string]string{ - "ComputeEngine": "6F81-5844-456A", - } -) - -const ( - ram = "RAM" - cpu = "CPU" -) - -type GcpService struct { - logger *zap.Logger - - apiService *cloudbilling.APIService - compute *compute.Service - project string - - DataAgeRepo repo.DataAgeRepo - - db *connector.Database - computeMachineTypeRepo repo.GCPComputeMachineTypeRepo - computeDiskTypeRepo repo.GCPComputeDiskTypeRepo - computeSKURepo repo.GCPComputeSKURepo -} - -func NewGcpService(ctx context.Context, logger *zap.Logger, dataAgeRepo repo.DataAgeRepo, computeMachineTypeRepo repo.GCPComputeMachineTypeRepo, - computeStorageTypeRepo repo.GCPComputeDiskTypeRepo, computeSKURepo repo.GCPComputeSKURepo, db *connector.Database, gcpCredentials map[string]string, projectId string) (*GcpService, error) { - configJson, err := json.Marshal(gcpCredentials) - if err != nil { - return nil, err - } - gcpOpts := []option.ClientOption{ - option.WithCredentialsJSON(configJson), - } - apiService, err := cloudbilling.NewService(ctx, gcpOpts...) - if err != nil { - return nil, err - } - - compute, err := compute.NewService(ctx, gcpOpts...) - if err != nil { - return nil, err - } - - return &GcpService{ - logger: logger, - DataAgeRepo: dataAgeRepo, - db: db, - apiService: apiService, - compute: compute, - computeSKURepo: computeSKURepo, - computeDiskTypeRepo: computeStorageTypeRepo, - computeMachineTypeRepo: computeMachineTypeRepo, - project: projectId, - }, nil -} - -func (s *GcpService) Start(ctx context.Context) { - s.logger.Info("GCP Ingestion service started") - defer func() { - if r := recover(); r != nil { - s.logger.Error("gcp ingestion paniced", zap.Error(fmt.Errorf("%v", r))) - time.Sleep(15 * time.Minute) - go s.Start(ctx) - } - }() - - ticker := time.NewTicker(2 * time.Minute) - defer ticker.Stop() - - for range ticker.C { - s.logger.Info("checking data age") - dataAge, err := s.DataAgeRepo.List() - if err != nil { - s.logger.Error("failed to list data age", zap.Error(err)) - continue - } - - var computeData *model.DataAge - for _, data := range dataAge { - data := data - switch data.DataType { - case "GCPComputeEngine": - computeData = &data - } - } - - if computeData == nil || computeData.UpdatedAt.Before(time.Now().Add(-365*24*time.Hour)) { - s.logger.Info("gcp compute engine ingest started") - err = s.IngestComputeInstance(ctx) - if err != nil { - s.logger.Error("failed to ingest gcp compute engine", zap.Error(err)) - continue - } - if computeData == nil { - err = s.DataAgeRepo.Create(&model.DataAge{ - DataType: "GCPComputeEngine", - UpdatedAt: time.Now(), - }) - if err != nil { - s.logger.Error("failed to create data age", zap.Error(err)) - continue - } - } else { - err = s.DataAgeRepo.Update("GCPComputeEngine", model.DataAge{ - DataType: "GCPComputeEngine", - UpdatedAt: time.Now(), - }) - if err != nil { - s.logger.Error("failed to update data age", zap.Error(err)) - continue - } - } - } else { - s.logger.Info("gcp compute engine ingest not started: ", zap.Any("usage", computeData)) - } - } -} - -func (s *GcpService) IngestComputeInstance(ctx context.Context) error { - computeMachineTypeTable, err := s.computeMachineTypeRepo.CreateNewTable() - if err != nil { - s.logger.Error("failed to auto migrate", - zap.String("table", "compute_machine_type"), - zap.Error(err)) - return err - } - - computeDiskTable, err := s.computeDiskTypeRepo.CreateNewTable() - if err != nil { - s.logger.Error("failed to auto migrate", - zap.String("table", "compute_disk_type"), - zap.Error(err)) - return err - } - - computeSKUTable, err := s.computeSKURepo.CreateNewTable() - if err != nil { - s.logger.Error("failed to auto migrate", - zap.String("table", "compute_sku"), - zap.Error(err)) - return err - } - - var transaction *gorm.DB - machineTypePrices := make(map[string]map[string]map[string]float64) - diskTypePrices := make(map[string]map[string]float64) - skus, err := s.fetchSKUs(ctx, services["ComputeEngine"]) - if err != nil { - return err - } - for _, sku := range skus { - if sku.PricingInfo == nil || len(sku.PricingInfo) == 0 || sku.PricingInfo[len(sku.PricingInfo)-1].PricingExpression == nil { - continue - } - if len(sku.PricingInfo[len(sku.PricingInfo)-1].PricingExpression.TieredRates) == 0 { - continue - } - - mf, rg, t, pm := model.GetSkuDetails(sku) - - if rg == cpu || rg == ram { - if _, ok := machineTypePrices[fmt.Sprintf("%s.%s", mf, rg)]; !ok { - skuMachineTypePrices := make(map[string]map[string]float64) - machineTypePrices[fmt.Sprintf("%s.%s", mf, rg)] = skuMachineTypePrices - } - } - if rg == "SSD" && strings.Contains(sku.Description, "Hyperdisk Throughput Capacity") { - skuStorageTypePrices := make(map[string]float64) - diskTypePrices["hyperdisk-throughput"] = skuStorageTypePrices - } - if rg == "SSD" && strings.Contains(sku.Description, "Hyperdisk Extreme Capacity") { - skuStorageTypePrices := make(map[string]float64) - diskTypePrices["hyperdisk-extreme"] = skuStorageTypePrices - } - if rg == "SSD" && strings.Contains(sku.Description, "Hyperdisk Balanced Capacity") { - skuStorageTypePrices := make(map[string]float64) - diskTypePrices["hyperdisk-balanced"] = skuStorageTypePrices - } - if sku.Description == "Storage PD Capacity" { - skuStorageTypePrices := make(map[string]float64) - diskTypePrices["pd-standard"] = skuStorageTypePrices - } - if sku.Description == "Balanced PD Capacity" { - skuStorageTypePrices := make(map[string]float64) - diskTypePrices["pd-balanced"] = skuStorageTypePrices - } - if sku.Description == "SSD backed PD Capacity" { - skuStorageTypePrices := make(map[string]float64) - diskTypePrices["pd-ssd"] = skuStorageTypePrices - } - if sku.Description == "Extreme PD Capacity" { - skuStorageTypePrices := make(map[string]float64) - diskTypePrices["pd-extreme"] = skuStorageTypePrices - } - for _, region := range sku.ServiceRegions { - computeSKU := &model.GCPComputeSKU{} - computeSKU.PopulateFromObject(sku, region) - - err = s.computeSKURepo.Create(computeSKUTable, transaction, computeSKU) - if err != nil { - return err - } - - if (rg == cpu || rg == ram) && mf != "" && t == "Predefined" { - if _, ok := machineTypePrices[fmt.Sprintf("%s.%s", mf, rg)][region]; !ok { - skuMachineTypePrices := make(map[string]float64) - machineTypePrices[fmt.Sprintf("%s.%s", mf, rg)][region] = skuMachineTypePrices - } - machineTypePrices[fmt.Sprintf("%s.%s", mf, rg)][region][pm] = computeSKU.UnitPrice - } - if computeSKU.Description == "Storage PD Capacity" { - diskTypePrices["pd-standard"][region] = computeSKU.UnitPrice - } - if computeSKU.Description == "Balanced PD Capacity" { - diskTypePrices["pd-balanced"][region] = computeSKU.UnitPrice - } - if computeSKU.Description == "SSD backed PD Capacity" { - diskTypePrices["pd-ssd"][region] = computeSKU.UnitPrice - } - if computeSKU.Description == "Extreme PD Capacity" { - diskTypePrices["pd-extreme"][region] = computeSKU.UnitPrice - } - } - } - - types, err := s.fetchMachineTypes(ctx) - if err != nil { - s.logger.Error("failed to fetch machine types", zap.Error(err)) - return err - } - s.logger.Info("fetched machine types", zap.Any("count", len(types))) - for _, mt := range types { - region := strings.Join([]string{strings.Split(mt.Zone, "-")[0], strings.Split(mt.Zone, "-")[1]}, "-") - onDemandCMType := &model.GCPComputeMachineType{} - onDemandCMType.PopulateFromObject(mt, region, false) - - spotCMType := &model.GCPComputeMachineType{} - spotCMType.PopulateFromObject(mt, region, true) - - mf := strings.ToLower(strings.Split(mt.Name, "-")[0]) - rp, ok := machineTypePrices[fmt.Sprintf("%s.%s", mf, ram)][region] - if !ok { - s.logger.Error("failed to get ram price", zap.String("machine_type", mt.Name), zap.String("family", fmt.Sprintf("%s.%s", mf, cpu)), - zap.String("region", region), zap.Any("prices", machineTypePrices[fmt.Sprintf("%s.%s", mf, ram)])) - continue - } - - cp, ok := machineTypePrices[fmt.Sprintf("%s.%s", mf, cpu)][region] - if !ok { - s.logger.Error("failed to get cpu price", zap.String("machine_type", mt.Name), zap.String("family", fmt.Sprintf("%s.%s", mf, cpu)), - zap.String("region", region), zap.Any("prices", machineTypePrices[fmt.Sprintf("%s.%s", mf, cpu)])) - continue - } - - onDemandCMType.UnitPrice = (rp["standard"] * float64(onDemandCMType.MemoryMb) / float64(1024)) + (cp["standard"] * float64(onDemandCMType.GuestCpus)) - - err = s.computeMachineTypeRepo.Create(computeMachineTypeTable, transaction, onDemandCMType) - if err != nil { - s.logger.Error("failed to create compute machine type", zap.Error(err)) - continue - } - - spotCMType.UnitPrice = (rp["preemptible"] * float64(spotCMType.MemoryMb) / float64(1024)) + (cp["preemptible"] * float64(spotCMType.GuestCpus)) - - err = s.computeMachineTypeRepo.Create(computeMachineTypeTable, transaction, spotCMType) - if err != nil { - s.logger.Error("failed to create compute machine type", zap.Error(err)) - continue - } - s.logger.Info("created compute machine type", zap.String("name", mt.Name)) - } - - diskTypes, err := s.fetchDiskTypes(ctx) - if err != nil { - s.logger.Error("failed to fetch disk types", zap.Error(err)) - return err - } - s.logger.Info("fetched disk types", zap.Any("count", len(types))) - for _, mt := range diskTypes { - disk := &model.GCPComputeDiskType{} - disk.PopulateFromObject(mt) - - p, ok := diskTypePrices[mt.Name][disk.Region] - if !ok { - s.logger.Error("failed to get storage price", zap.String("storage_type", mt.Name), zap.String("region", disk.Region), - zap.Any("prices", diskTypePrices)) - continue - } - - disk.UnitPrice = p - - err = s.computeDiskTypeRepo.Create(computeDiskTable, transaction, disk) - if err != nil { - s.logger.Error("failed to create compute storage type", zap.Error(err)) - continue - } - s.logger.Info("created compute storage type", zap.String("name", mt.Name)) - } - - err = s.computeMachineTypeRepo.MoveViewTransaction(computeMachineTypeTable) - if err != nil { - return err - } - - err = s.computeMachineTypeRepo.RemoveOldTables(computeMachineTypeTable) - if err != nil { - return err - } - - err = s.computeDiskTypeRepo.MoveViewTransaction(computeDiskTable) - if err != nil { - return err - } - - err = s.computeDiskTypeRepo.RemoveOldTables(computeDiskTable) - if err != nil { - return err - } - - err = s.computeSKURepo.MoveViewTransaction(computeSKUTable) - if err != nil { - return err - } - - err = s.computeSKURepo.RemoveOldTables(computeSKUTable) - if err != nil { - return err - } - - return nil -} - -func (s *GcpService) fetchSKUs(ctx context.Context, service string) ([]*cloudbilling.Sku, error) { - var results []*cloudbilling.Sku - - err := cloudbilling.NewServicesSkusService(s.apiService).List(fmt.Sprintf("services/%s", service)).Pages(ctx, func(l *cloudbilling.ListSkusResponse) error { - for _, sku := range l.Skus { - results = append(results, sku) - } - return nil - }) - if err != nil { - return nil, err - } - - return results, nil -} - -func (s *GcpService) fetchMachineTypes(ctx context.Context) ([]*compute.MachineType, error) { - var results []*compute.MachineType - - zones, err := s.compute.Zones.List(s.project).Do() - if err != nil { - return nil, err - } - for _, zone := range zones.Items { - err = s.compute.MachineTypes.List(s.project, zone.Name).Pages(ctx, func(l *compute.MachineTypeList) error { - for _, mt := range l.Items { - results = append(results, mt) - } - return nil - }) - if err != nil { - return nil, err - } - } - - return results, nil -} - -func (s *GcpService) fetchDiskTypes(ctx context.Context) ([]*compute.DiskType, error) { - var results []*compute.DiskType - - zones, err := s.compute.Zones.List(s.project).Do() - if err != nil { - return nil, err - } - for _, zone := range zones.Items { - err = s.compute.DiskTypes.List(s.project, zone.Name).Pages(ctx, func(l *compute.DiskTypeList) error { - for _, dt := range l.Items { - results = append(results, dt) - } - return nil - }) - if err != nil { - return nil, err - } - } - - return results, nil -} diff --git a/services/wastage/ingestion/service.go b/services/wastage/ingestion/service.go deleted file mode 100644 index cac26ada6..000000000 --- a/services/wastage/ingestion/service.go +++ /dev/null @@ -1,546 +0,0 @@ -package ingestion - -import ( - "context" - "encoding/csv" - "fmt" - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/config" - "github.com/aws/aws-sdk-go-v2/service/ec2" - ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types" - "github.com/opengovern/opengovernance/services/wastage/db/connector" - "github.com/opengovern/opengovernance/services/wastage/db/model" - "github.com/opengovern/opengovernance/services/wastage/db/repo" - "go.uber.org/zap" - "gorm.io/gorm" - "io" - "net/http" - "strings" - "time" -) - -type Service struct { - logger *zap.Logger - - DataAgeRepo repo.DataAgeRepo - - db *connector.Database - ec2InstanceRepo repo.EC2InstanceTypeRepo - rdsRepo repo.RDSProductRepo - rdsInstanceRepo repo.RDSDBInstanceRepo - ebsVolumeTypeRepo repo.EBSVolumeTypeRepo - storageRepo repo.RDSDBStorageRepo -} - -func New(logger *zap.Logger, db *connector.Database, ec2InstanceRepo repo.EC2InstanceTypeRepo, rdsRepo repo.RDSProductRepo, rdsInstanceRepo repo.RDSDBInstanceRepo, storageRepo repo.RDSDBStorageRepo, ebsVolumeRepo repo.EBSVolumeTypeRepo, dataAgeRepo repo.DataAgeRepo) *Service { - return &Service{ - logger: logger, - db: db, - ec2InstanceRepo: ec2InstanceRepo, - rdsInstanceRepo: rdsInstanceRepo, - rdsRepo: rdsRepo, - storageRepo: storageRepo, - ebsVolumeTypeRepo: ebsVolumeRepo, - DataAgeRepo: dataAgeRepo, - } -} - -func (s *Service) Start(ctx context.Context) { - s.logger.Info("Ingestion service started") - defer func() { - if r := recover(); r != nil { - s.logger.Error("ingestion paniced", zap.Error(fmt.Errorf("%v", r))) - time.Sleep(15 * time.Minute) - go s.Start(ctx) - } - }() - - ticker := time.NewTicker(2 * time.Minute) - defer ticker.Stop() - - for range ticker.C { - s.logger.Info("checking data age") - dataAge, err := s.DataAgeRepo.List() - if err != nil { - s.logger.Error("failed to list data age", zap.Error(err)) - continue - } - - var ec2InstanceData *model.DataAge - var rdsData *model.DataAge - for _, data := range dataAge { - data := data - switch data.DataType { - case "AWS::EC2::Instance": - ec2InstanceData = &data - case "AWS::RDS::Instance": - rdsData = &data - } - } - - if ec2InstanceData == nil || ec2InstanceData.UpdatedAt.Before(time.Now().Add(-365*24*time.Hour)) { - s.logger.Info("ec2 instance ingest started") - err = s.IngestEc2Instances(ctx) - if err != nil { - s.logger.Error("failed to ingest ec2 instances", zap.Error(err)) - continue - } - if ec2InstanceData == nil { - err = s.DataAgeRepo.Create(&model.DataAge{ - DataType: "AWS::EC2::Instance", - UpdatedAt: time.Now(), - }) - if err != nil { - s.logger.Error("failed to create data age", zap.Error(err)) - continue - } - } else { - err = s.DataAgeRepo.Update("AWS::EC2::Instance", model.DataAge{ - DataType: "AWS::EC2::Instance", - UpdatedAt: time.Now(), - }) - if err != nil { - s.logger.Error("failed to update data age", zap.Error(err)) - continue - } - } - } else { - s.logger.Info("ec2 instance ingest not started: ", zap.Any("usage", ec2InstanceData)) - } - - if rdsData == nil || rdsData.UpdatedAt.Before(time.Now().Add(-7*24*time.Hour)) { - s.logger.Info("rds ingest started") - err = s.IngestRDS() - if err != nil { - s.logger.Error("failed to ingest rds", zap.Error(err)) - continue - } - if rdsData == nil { - err = s.DataAgeRepo.Create(&model.DataAge{ - DataType: "AWS::RDS::Instance", - UpdatedAt: time.Now(), - }) - if err != nil { - s.logger.Error("failed to create rds data age", zap.Error(err)) - continue - } - } else { - err = s.DataAgeRepo.Update("AWS::RDS::Instance", model.DataAge{ - DataType: "AWS::RDS::Instance", - UpdatedAt: time.Now(), - }) - if err != nil { - s.logger.Error("failed to update rds data age", zap.Error(err)) - continue - } - } - } else { - s.logger.Info("rds ingest not started: ", zap.Any("usage", rdsData)) - } - } - - s.logger.Error("Ingestion service stopped", zap.Time("time", time.Now())) -} - -func (s *Service) IngestEc2Instances(ctx context.Context) error { - //transaction := s.db.Conn().Begin() - //defer func() { - // transaction.Rollback() - //}() - ec2InstanceTypeTable, err := s.ec2InstanceRepo.CreateNewTable() - if err != nil { - s.logger.Error("failed to auto migrate", - zap.String("table", "ec2_instance_type"), - zap.Error(err)) - return err - } - - ebsVolumeTypeTable, err := s.ebsVolumeTypeRepo.CreateNewTable() - if err != nil { - s.logger.Error("failed to auto migrate", - zap.String("table", "ebs_volume_type"), - zap.Error(err)) - return err - } - err = s.ingestEc2InstancesBase(ctx, ec2InstanceTypeTable, ebsVolumeTypeTable, nil) - if err != nil { - s.logger.Error("failed to ingest ec2 instances", zap.Error(err)) - return err - } - - err = s.ingestEc2InstancesExtra(ctx, ec2InstanceTypeTable, nil) - if err != nil { - s.logger.Error("failed to ingest ec2 instances extra", zap.Error(err)) - return err - } - - //err = transaction.Commit().Error - //if err != nil { - // s.logger.Error("failed to commit transaction", zap.Error(err)) - // return err - //} - - s.logger.Info("ingested ec2 instances") - - return nil -} - -func (s *Service) ingestEc2InstancesBase(ctx context.Context, ec2InstanceTypeTable, ebsVolumeTypeTable string, transaction *gorm.DB) error { - url := "https://pricing.us-east-1.amazonaws.com/offers/v1.0/aws/AmazonEC2/current/index.csv" - resp, err := http.Get(url) - if err != nil { - return err - } - csvr := csv.NewReader(resp.Body) - csvr.FieldsPerRecord = -1 - - var columns map[string]int - for { - values, err := csvr.Read() - if err != nil { - return err - } - - if len(values) > 2 { - columns = readColumnPositions(values) - break - } - } - - // Read through each row in the CSV file and send a price.WithProduct on the results channel. - for { - row, err := csvr.Read() - if err != nil { - if err != io.EOF { - return err - } - break - } - - switch row[columns["Product Family"]] { - case "Compute Instance", "Compute Instance (bare metal)": - v := model.EC2InstanceType{} - v.PopulateFromMap(columns, row) - - if strings.ToLower(v.PhysicalProcessor) == "variable" { - continue - } - if v.InstanceType == "" { - continue - } - if v.TermType != "OnDemand" { - continue - } - - fmt.Println("Instance", v) - err = s.ec2InstanceRepo.Create(ec2InstanceTypeTable, transaction, &v) - if err != nil { - return err - } - case "Storage", "System Operation", "Provisioned Throughput": - v := model.EBSVolumeType{} - v.PopulateFromMap(columns, row) - - if v.VolumeType == "" { - continue - } - if v.TermType != "OnDemand" { - continue - } - fmt.Println("Volume", v) - err = s.ebsVolumeTypeRepo.Create(ebsVolumeTypeTable, transaction, &v) - if err != nil { - return err - } - } - } - - err = s.ec2InstanceRepo.MoveViewTransaction(ec2InstanceTypeTable) - if err != nil { - return err - } - - err = s.ebsVolumeTypeRepo.MoveViewTransaction(ebsVolumeTypeTable) - if err != nil { - return err - } - - err = s.ec2InstanceRepo.RemoveOldTables(ec2InstanceTypeTable) - if err != nil { - return err - } - - err = s.ebsVolumeTypeRepo.RemoveOldTables(ebsVolumeTypeTable) - if err != nil { - return err - } - - return nil -} - -func (s *Service) ingestEc2InstancesExtra(ctx context.Context, ec2InstanceTypeTable string, transaction *gorm.DB) error { - sdkConfig, err := config.LoadDefaultConfig(ctx, config.WithRegion("us-east-1")) - if err != nil { - s.logger.Error("failed to load SDK config", zap.Error(err)) - return err - } - baseEc2Client := ec2.NewFromConfig(sdkConfig) - - regions, err := baseEc2Client.DescribeRegions(ctx, &ec2.DescribeRegionsInput{AllRegions: aws.Bool(false)}) - if err != nil { - s.logger.Error("failed to describe regions", zap.Error(err)) - return err - } - - for _, region := range regions.Regions { - cnf, err := config.LoadDefaultConfig(ctx, config.WithRegion(*region.RegionName)) - if err != nil { - s.logger.Error("failed to load SDK config", zap.Error(err), zap.String("region", *region.RegionName)) - return err - } - ec2Client := ec2.NewFromConfig(cnf) - paginator := ec2.NewDescribeInstanceTypesPaginator(ec2Client, &ec2.DescribeInstanceTypesInput{}) - for paginator.HasMorePages() { - output, err := paginator.NextPage(ctx) - if err != nil { - s.logger.Error("failed to get next page", zap.Error(err), zap.String("region", *region.RegionName)) - return err - } - for _, instanceType := range output.InstanceTypes { - extras := getEc2InstanceExtrasMap(instanceType) - if len(extras) == 0 { - s.logger.Warn("no extras found", zap.String("region", *region.RegionName), zap.String("instanceType", string(instanceType.InstanceType))) - continue - } - s.logger.Info("updating extras", zap.String("region", *region.RegionName), zap.String("instanceType", string(instanceType.InstanceType)), zap.Any("extras", extras)) - err = s.ec2InstanceRepo.UpdateExtrasByRegionAndType(ec2InstanceTypeTable, transaction, *region.RegionName, string(instanceType.InstanceType), extras) - if err != nil { - s.logger.Error("failed to update extras", zap.Error(err), zap.String("region", *region.RegionName), zap.String("instanceType", string(instanceType.InstanceType))) - return err - } - } - } - } - - // Populate the still missing extras with the us-east-1 region data - paginator := ec2.NewDescribeInstanceTypesPaginator(baseEc2Client, &ec2.DescribeInstanceTypesInput{}) - for paginator.HasMorePages() { - output, err := paginator.NextPage(ctx) - if err != nil { - s.logger.Error("failed to get next page", zap.Error(err), zap.String("region", "all")) - return err - } - for _, instanceType := range output.InstanceTypes { - extras := getEc2InstanceExtrasMap(instanceType) - if len(extras) == 0 { - s.logger.Warn("no extras found", zap.String("region", "all"), zap.String("instanceType", string(instanceType.InstanceType))) - continue - } - s.logger.Info("updating extras", zap.String("region", "all"), zap.String("instanceType", string(instanceType.InstanceType)), zap.Any("extras", extras)) - err = s.ec2InstanceRepo.UpdateNullExtrasByType(ec2InstanceTypeTable, transaction, string(instanceType.InstanceType), extras) - if err != nil { - s.logger.Error("failed to update extras", zap.Error(err), zap.String("region", "all"), zap.String("instanceType", string(instanceType.InstanceType))) - return err - } - } - } - - return nil -} - -func (s *Service) IngestRDS() error { - rdsInstancesTable, err := s.rdsInstanceRepo.CreateNewTable() - if err != nil { - s.logger.Error("failed to auto migrate", - zap.String("table", "rdsdb_instances"), - zap.Error(err)) - return err - } - rdsStorageTable, err := s.storageRepo.CreateNewTable() - if err != nil { - s.logger.Error("failed to auto migrate", - zap.String("table", "rdsdb_storages"), - zap.Error(err)) - return err - } - rdsProductsTable, err := s.rdsRepo.CreateNewTable() - if err != nil { - s.logger.Error("failed to auto migrate", - zap.String("table", "rds_products"), - zap.Error(err)) - return err - } - - url := "https://pricing.us-east-1.amazonaws.com/offers/v1.0/aws/AmazonRDS/current/index.csv" - resp, err := http.Get(url) - if err != nil { - return err - } - csvr := csv.NewReader(resp.Body) - csvr.FieldsPerRecord = -1 - - var columns map[string]int - for { - values, err := csvr.Read() - if err != nil { - return err - } - - if len(values) > 2 { - columns = readColumnPositions(values) - break - } - } - // - //transaction := s.db.Conn().Begin() - //defer func() { - // transaction.Rollback() - //}() - - var transaction *gorm.DB - - // Read through each row in the CSV file and send a price.WithProduct on the results channel. - for { - row, err := csvr.Read() - if err != nil { - if err != io.EOF { - return err - } - break - } - - switch row[columns["Product Family"]] { - case "Database Storage", "Provisioned IOPS", "Provisioned Throughput", "System Operation": - v := model.RDSDBStorage{} - v.PopulateFromMap(columns, row) - - if !v.DoIngest() { - continue - } - - fmt.Println("RDSDBStorage", v) - - err = s.storageRepo.Create(rdsStorageTable, transaction, &v) - if err != nil { - return err - } - - case "Database Instance": - v := model.RDSDBInstance{} - v.PopulateFromMap(columns, row) - - if v.TermType != "OnDemand" { - continue - } - if v.LocationType == "AWS Outposts" { - continue - } - - fmt.Println("RDSDBInstance", v) - - err = s.rdsInstanceRepo.Create(rdsInstancesTable, transaction, &v) - if err != nil { - return err - } - - default: - v := model.RDSProduct{} - v.PopulateFromMap(columns, row) - - if v.TermType != "OnDemand" { - continue - } - if v.LocationType == "AWS Outposts" { - continue - } - - fmt.Println("RDS", v) - - err = s.rdsRepo.Create(rdsProductsTable, transaction, &v) - if err != nil { - return err - } - } - } - - err = s.rdsInstanceRepo.UpdateNilEBSThroughput(transaction, rdsInstancesTable) - if err != nil { - s.logger.Error("failed to update nil ebs throughput", zap.Error(err)) - } - - err = s.rdsInstanceRepo.MoveViewTransaction(rdsInstancesTable) - if err != nil { - s.logger.Error("failed to move view", zap.String("table", rdsInstancesTable), zap.Error(err)) - return err - } - - err = s.rdsRepo.MoveViewTransaction(rdsProductsTable) - if err != nil { - s.logger.Error("failed to move view", zap.String("table", rdsProductsTable), zap.Error(err)) - return err - } - - err = s.storageRepo.MoveViewTransaction(rdsStorageTable) - if err != nil { - s.logger.Error("failed to move view", zap.String("table", rdsStorageTable), zap.Error(err)) - return err - } - - err = s.rdsInstanceRepo.RemoveOldTables(rdsInstancesTable) - if err != nil { - s.logger.Error("failed to remove old tables", zap.String("table", rdsInstancesTable), zap.Error(err)) - return err - } - - err = s.rdsRepo.RemoveOldTables(rdsProductsTable) - if err != nil { - s.logger.Error("failed to remove old tables", zap.String("table", rdsProductsTable), zap.Error(err)) - return err - } - - err = s.storageRepo.RemoveOldTables(rdsStorageTable) - if err != nil { - s.logger.Error("failed to remove old tables", zap.String("table", rdsStorageTable), zap.Error(err)) - return err - } - - //err = transaction.Commit().Error - //if err != nil { - // return err - //} - return nil -} - -func getEc2InstanceExtrasMap(instanceType ec2types.InstanceTypeInfo) map[string]any { - extras := map[string]any{} - if instanceType.EbsInfo != nil && instanceType.EbsInfo.EbsOptimizedInfo != nil { - if instanceType.EbsInfo.EbsOptimizedInfo.BaselineBandwidthInMbps != nil { - extras["ebs_baseline_bandwidth"] = *instanceType.EbsInfo.EbsOptimizedInfo.BaselineBandwidthInMbps - } - if instanceType.EbsInfo.EbsOptimizedInfo.MaximumBandwidthInMbps != nil { - extras["ebs_maximum_bandwidth"] = *instanceType.EbsInfo.EbsOptimizedInfo.MaximumBandwidthInMbps - } - if instanceType.EbsInfo.EbsOptimizedInfo.BaselineIops != nil { - extras["ebs_baseline_iops"] = *instanceType.EbsInfo.EbsOptimizedInfo.BaselineIops - } - if instanceType.EbsInfo.EbsOptimizedInfo.MaximumIops != nil { - extras["ebs_maximum_iops"] = *instanceType.EbsInfo.EbsOptimizedInfo.MaximumIops - } - if instanceType.EbsInfo.EbsOptimizedInfo.BaselineThroughputInMBps != nil { - extras["ebs_baseline_throughput"] = *instanceType.EbsInfo.EbsOptimizedInfo.BaselineThroughputInMBps - } - if instanceType.EbsInfo.EbsOptimizedInfo.MaximumThroughputInMBps != nil { - extras["ebs_maximum_throughput"] = *instanceType.EbsInfo.EbsOptimizedInfo.MaximumThroughputInMBps - } - } - return extras -} - -// readColumnPositions maps column names to their position in the CSV file. -func readColumnPositions(values []string) map[string]int { - columns := make(map[string]int) - for i, v := range values { - columns[v] = i - } - return columns -} diff --git a/services/wastage/recommendation/aws_ec2_instance.go b/services/wastage/recommendation/aws_ec2_instance.go deleted file mode 100644 index e13362af5..000000000 --- a/services/wastage/recommendation/aws_ec2_instance.go +++ /dev/null @@ -1,943 +0,0 @@ -package recommendation - -import ( - "context" - "errors" - "fmt" - types2 "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types" - "github.com/aws/aws-sdk-go-v2/service/ec2/types" - "github.com/labstack/echo/v4" - "github.com/opengovern/opengovernance/pkg/utils" - "github.com/opengovern/opengovernance/services/wastage/api/entity" - "github.com/opengovern/opengovernance/services/wastage/db/model" - "github.com/opengovern/opengovernance/services/wastage/recommendation/preferences/ec2instance" - aws "github.com/opengovern/plugin-aws/plugin/proto/src/golang" - "github.com/sashabaranov/go-openai" - "go.uber.org/zap" - "google.golang.org/protobuf/types/known/timestamppb" - "google.golang.org/protobuf/types/known/wrapperspb" - "net/http" - "strconv" - "strings" - "time" -) - -func (s *Service) EC2InstanceRecommendationGrpc( - ctx context.Context, - region string, - instance *aws.EC2Instance, - volumes []*aws.EC2Volume, - metrics map[string]*aws.Metric, - volumeMetrics map[string]*aws.VolumeMetrics, - preferences map[string]*wrapperspb.StringValue, - usageAverageType UsageAverageType, -) (*aws.EC2InstanceRightSizingRecommendation, error) { - var err error - defer func() { - if r := recover(); r != nil { - s.logger.Error(fmt.Sprintf("recovered from panic: %v", r)) - err = fmt.Errorf("recovered from panic: %v", r) - } - }() - if instance == nil { - s.logger.Error("Instance is nil") - } - var monitoring *types.MonitoringState - if instance.Monitoring != nil { - monitoringTmp := types.MonitoringState(instance.Monitoring.GetValue()) - monitoring = &monitoringTmp - } - var placement *entity.EC2Placement - if instance.Placement != nil { - placement = &entity.EC2Placement{ - AvailabilityZone: instance.Placement.AvailabilityZone, - Tenancy: types.Tenancy(instance.Placement.Tenancy), - HashedHostId: instance.Placement.HashedHostId, - } - } - newInstance := entity.EC2Instance{ - HashedInstanceId: instance.HashedInstanceId, - State: types.InstanceStateName(instance.State), - InstanceType: types.InstanceType(instance.InstanceType), - Platform: instance.Platform, - ThreadsPerCore: instance.ThreadsPerCore, - CoreCount: instance.CoreCount, - EbsOptimized: instance.EbsOptimized, - InstanceLifecycle: types.InstanceLifecycleType(instance.InstanceLifecycle), - Monitoring: monitoring, - Placement: placement, - UsageOperation: instance.UsageOperation, - Tenancy: types.Tenancy(instance.Tenancy), - } - - var newVolumes []entity.EC2Volume - for _, v := range volumes { - newVolumes = append(newVolumes, entity.EC2Volume{ - HashedVolumeId: v.HashedVolumeId, - VolumeType: types.VolumeType(v.VolumeType), - Iops: WrappedToInt32(v.Iops), - Size: WrappedToInt32(v.Size), - Throughput: WrappedToFloat64(v.Throughput), - AvailabilityZone: WrappedToString(v.AvailabilityZone), - }) - } - for k, m := range metrics { - if m.Metric != nil { - s.logger.Info("Metric Info", zap.String("key", k), zap.Any("length", len(m.Metric))) - } - } - newMetrics := convertMetrics(metrics) - newVolumeMetrics := make(map[string]map[string][]types2.Datapoint) - for k, v := range volumeMetrics { - newVolumeMetrics[k] = convertMetrics(v.Metrics) - } - newPreferences := make(map[string]*string) - for k, v := range preferences { - newPreferences[k] = WrappedToString(v) - } - - s.logger.Info("EC2InstanceRecommendation parameters", zap.String("region", region), zap.Any("instance", newInstance), - zap.Any("volumes", newVolumes), zap.Any("metrics len", len(metrics)), zap.Any("volumeMetrics len", len(newVolumeMetrics)), - zap.Any("preferences", newPreferences), zap.Any("usageAverageType", usageAverageType)) - - result, err := s.EC2InstanceRecommendation(ctx, region, newInstance, newVolumes, newMetrics, newVolumeMetrics, newPreferences, usageAverageType) - if err != nil { - return nil, err - } - return &aws.EC2InstanceRightSizingRecommendation{ - Current: convertRightsizingEC2Instance(&result.Current), - Recommended: convertRightsizingEC2Instance(result.Recommended), - Vcpu: convertUsage(&result.VCPU), - Memory: convertUsage(&result.Memory), - EbsBandwidth: convertUsage(&result.EBSBandwidth), - EbsIops: convertUsage(&result.EBSIops), - NetworkThroughput: convertUsage(&result.NetworkThroughput), - Description: strings.TrimSpace(result.Description), - }, nil -} - -func convertMetrics(metrics map[string]*aws.Metric) map[string][]types2.Datapoint { - newMetrics := make(map[string][]types2.Datapoint) - for k, v := range metrics { - var datapoints []types2.Datapoint - for _, d := range v.Metric { - var timestamp *time.Time - if d.Timestamp != nil { - timestampTmp := d.Timestamp.AsTime() - timestamp = ×tampTmp - } - datapoints = append(datapoints, types2.Datapoint{ - Average: WrappedToFloat64(d.Average), - Maximum: WrappedToFloat64(d.Maximum), - Minimum: WrappedToFloat64(d.Minimum), - SampleCount: WrappedToFloat64(d.SampleCount), - Sum: WrappedToFloat64(d.Sum), - Timestamp: timestamp, - }) - } - newMetrics[k] = datapoints - } - return newMetrics -} - -func convertRightsizingEC2Instance(rightSizing *entity.RightsizingEC2Instance) *aws.RightsizingEC2Instance { - if rightSizing == nil { - return nil - } - return &aws.RightsizingEC2Instance{ - InstanceType: rightSizing.InstanceType, - Region: rightSizing.Region, - Cost: rightSizing.Cost, - CostComponents: rightSizing.CostComponents, - Processor: rightSizing.Processor, - Architecture: rightSizing.Architecture, - Vcpu: rightSizing.VCPU, - Memory: rightSizing.Memory, - EbsBandwidth: rightSizing.EBSBandwidth, - EbsIops: rightSizing.EBSIops, - NetworkThroughput: rightSizing.NetworkThroughput, - EnaSupported: rightSizing.ENASupported, - LicensePrice: rightSizing.LicensePrice, - License: rightSizing.License, - } -} - -func convertUsage(usage *entity.Usage) *aws.Usage { - if usage == nil { - return nil - } - newUsage := aws.Usage{ - Avg: Float64ToWrapper(usage.Avg), - Max: Float64ToWrapper(usage.Max), - Min: Float64ToWrapper(usage.Min), - } - if usage.Last != nil { - last := aws.Datapoint{ - Maximum: Float64ToWrapper(usage.Last.Maximum), - Minimum: Float64ToWrapper(usage.Last.Minimum), - Average: Float64ToWrapper(usage.Last.Average), - SampleCount: Float64ToWrapper(usage.Last.SampleCount), - Sum: Float64ToWrapper(usage.Last.Sum), - } - if usage.Last.Timestamp != nil { - last.Timestamp = timestamppb.New(*usage.Last.Timestamp) - } - newUsage.Last = &last - } - return &newUsage -} - -func (s *Service) EC2InstanceRecommendation( - ctx context.Context, - region string, - instance entity.EC2Instance, - volumes []entity.EC2Volume, - metrics map[string][]types2.Datapoint, - volumeMetrics map[string]map[string][]types2.Datapoint, - preferences map[string]*string, - usageAverageType UsageAverageType, -) (*entity.RightSizingRecommendation, error) { - cpuUsage := extractUsage(metrics["CPUUtilization"], usageAverageType) - memoryUsage := extractUsage(metrics["mem_used_percent"], usageAverageType) - networkUsage := extractUsage(sumMergeDatapoints(metrics["NetworkIn"], metrics["NetworkOut"]), usageAverageType) - - var ebsThroughputDatapoints []types2.Datapoint - var ebsIopsDatapoints []types2.Datapoint - for _, v := range volumeMetrics { - ebsThroughputDatapoints = sumMergeDatapoints(sumMergeDatapoints(v["VolumeReadBytes"], v["VolumeWriteBytes"]), ebsThroughputDatapoints) - ebsIopsDatapoints = sumMergeDatapoints(sumMergeDatapoints(v["VolumeReadOps"], v["VolumeWriteOps"]), ebsIopsDatapoints) - } - ebsThroughputUsage := extractUsage(ebsThroughputDatapoints, usageAverageType) - ebsIopsUsage := extractUsage(ebsIopsDatapoints, usageAverageType) - - currentInstanceTypeList, err := s.ec2InstanceRepo.ListByInstanceType(ctx, string(instance.InstanceType), instance.UsageOperation, region) - if err != nil { - err = fmt.Errorf("failed to list instances by types: %s", err.Error()) - return nil, err - } - if len(currentInstanceTypeList) == 0 { - return nil, echo.NewHTTPError(http.StatusNotFound, fmt.Sprintf("instance type not found: %s", string(instance.InstanceType))) - } - currentInstanceType := currentInstanceTypeList[0] - currentCost, currentComponentCost, err := s.costSvc.GetEC2InstanceCost(ctx, region, instance, volumes, metrics) - if err != nil { - err = fmt.Errorf("failed to get current ec2 instance cost: %s", err.Error()) - return nil, err - } - currLicensePrice, err := s.costSvc.EstimateLicensePrice(ctx, instance) - if err != nil { - err = fmt.Errorf("failed to get current ec2 instance license price: %s", err.Error()) - return nil, err - } - - s.logger.Info("Show preferences", zap.Any("preferences", preferences)) - for k, v := range preferences { - s.logger.Info("Show preference", zap.String("key", k), zap.Any("value", v)) - } - - current := entity.RightsizingEC2Instance{ - Region: currentInstanceType.RegionCode, - InstanceType: currentInstanceType.InstanceType, - Processor: currentInstanceType.PhysicalProcessor, - Architecture: currentInstanceType.PhysicalProcessorArch, - VCPU: int64(currentInstanceType.VCpu), - Memory: currentInstanceType.MemoryGB, - NetworkThroughput: currentInstanceType.NetworkPerformance, - ENASupported: currentInstanceType.EnhancedNetworkingSupported, - Cost: currentCost, - CostComponents: currentComponentCost, - LicensePrice: currLicensePrice, - License: instance.UsageOperation, - } - if currentInstanceType.EbsBaselineThroughput != nil { - current.EBSBandwidth = fmt.Sprintf("%.2f MB/s", *currentInstanceType.EbsBaselineThroughput) - } - if currentInstanceType.EbsBaselineIops != nil { - current.EBSIops = fmt.Sprintf("%d io/s", *currentInstanceType.EbsBaselineIops) - } - - //TODO Burst in CPU & Network - //TODO Network: UpTo - - vCPU := instance.ThreadsPerCore * instance.CoreCount - cpuBreathingRoom := int64(0) - if preferences["CPUBreathingRoom"] != nil { - cpuBreathingRoom, _ = strconv.ParseInt(*preferences["CPUBreathingRoom"], 10, 64) - } - memoryBreathingRoom := int64(0) - if preferences["MemoryBreathingRoom"] != nil { - memoryBreathingRoom, _ = strconv.ParseInt(*preferences["MemoryBreathingRoom"], 10, 64) - } - neededCPU := float64(vCPU) * (getValueOrZero(cpuUsage.Avg) + float64(cpuBreathingRoom)) / 100.0 - neededMemory := 0.0 - if memoryUsage.Max != nil { - neededMemory = calculateHeadroom(currentInstanceType.MemoryGB*(*memoryUsage.Max), memoryBreathingRoom) - } - neededNetworkThroughput := getValueOrZero(networkUsage.Avg) - if preferences["NetworkBreathingRoom"] != nil { - room, _ := strconv.ParseInt(*preferences["NetworkBreathingRoom"], 10, 64) - neededNetworkThroughput += neededNetworkThroughput * float64(room) / 100.0 - } - - pref := map[string]any{} - for k, v := range preferences { - var vl any - if v == nil || *v == "" { - vl = extractFromInstance(instance, currentInstanceType, region, k) - } else { - vl = *v - } - if ec2instance.PreferenceDBKey[k] == "" || vl == "" { - continue - } - - cond := "=" - if sc, ok := ec2instance.PreferenceSpecialCond[k]; ok { - cond = sc - } - pref[fmt.Sprintf("%s %s ?", ec2instance.PreferenceDBKey[k], cond)] = vl - } - if _, ok := preferences["vCPU"]; !ok { - pref["v_cpu >= ?"] = neededCPU - } - if _, ok := metrics["mem_used_percent"]; ok { - if _, ok := preferences["MemoryGB"]; !ok { - pref["memory_gb >= ?"] = neededMemory - } - } - - excludeBurstable := false - if value, ok := preferences["ExcludeBurstableInstances"]; ok && value != nil { - if *value == "Yes" { - excludeBurstable = true - pref["NOT(instance_type like ?)"] = "t%" - } else if *value == "if current resource is burstable" { - if !strings.HasPrefix(string(instance.InstanceType), "t") { - excludeBurstable = true - pref["NOT(instance_type like ?)"] = "t%" - } - } - } - if value, ok := preferences["UsageOperation"]; ok && value != nil { - if v, ok := ec2instance.UsageOperationHumanToMachine[*value]; ok { - pref["operation = ?"] = v - } else { - delete(pref, "operation = ?") - } - } - // if operation is not provided, limit the results to one with no pre-installed software - if _, ok := pref["operation = ?"]; !ok { - pref["pre_installed_sw = ?"] = "NA" - } - if ebsIopsUsage.Avg != nil && *ebsIopsUsage.Avg > 0 { - pref["ebs_maximum_iops IS NULL OR ebs_maximum_iops >= ?"] = *ebsIopsUsage.Avg - } - // Metric is in bytes so we convert to Mbytes - if ebsThroughputUsage.Avg != nil && *ebsThroughputUsage.Avg > 0 { - pref["ebs_maximum_throughput IS NULL OR ebs_maximum_throughput >= ?"] = *ebsThroughputUsage.Avg / (1024 * 1024) - } - - var recommended *entity.RightsizingEC2Instance - rightSizedInstanceType, err := s.ec2InstanceRepo.GetCheapestByCoreAndNetwork(ctx, neededNetworkThroughput, pref) - if err != nil { - err = fmt.Errorf("failed to find cheapest ec2 instance: %s", err.Error()) - return nil, err - } - if rightSizedInstanceType != nil { - newInstance := instance - newInstance.InstanceType = types.InstanceType(rightSizedInstanceType.InstanceType) - newInstance.UsageOperation = rightSizedInstanceType.Operation - if newInstance.Placement == nil { - newInstance.Placement = &entity.EC2Placement{} - } - if rightSizedInstanceType.Tenancy == "Dedicated" { - newInstance.Placement.Tenancy = types.TenancyDedicated - } else if rightSizedInstanceType.Tenancy == "Host" { - newInstance.Placement.Tenancy = types.TenancyHost - } else { - newInstance.Placement.Tenancy = types.TenancyDefault - } - recommendedCost, recommendedComponentCost, err := s.costSvc.GetEC2InstanceCost(ctx, rightSizedInstanceType.RegionCode, newInstance, volumes, metrics) - if err != nil { - err = fmt.Errorf("failed to get recommended ec2 instance cost: %s", err.Error()) - return nil, err - } - recomLicensePrice, err := s.costSvc.EstimateLicensePrice(ctx, newInstance) - if err != nil { - err = fmt.Errorf("failed to get recommended ec2 instance license price: %s", err.Error()) - return nil, err - } - recommended = &entity.RightsizingEC2Instance{ - Region: rightSizedInstanceType.RegionCode, - InstanceType: rightSizedInstanceType.InstanceType, - Processor: rightSizedInstanceType.PhysicalProcessor, - Architecture: rightSizedInstanceType.PhysicalProcessorArch, - VCPU: int64(rightSizedInstanceType.VCpu), - Memory: rightSizedInstanceType.MemoryGB, - NetworkThroughput: rightSizedInstanceType.NetworkPerformance, - ENASupported: rightSizedInstanceType.EnhancedNetworkingSupported, - Cost: recommendedCost, - CostComponents: recommendedComponentCost, - LicensePrice: recomLicensePrice, - License: newInstance.UsageOperation, - } - if rightSizedInstanceType.EbsBaselineThroughput != nil { - recommended.EBSBandwidth = fmt.Sprintf("%.2f MB/s", *rightSizedInstanceType.EbsBaselineThroughput) - } - if rightSizedInstanceType.EbsBaselineIops != nil { - recommended.EBSIops = fmt.Sprintf("%d io/s", *rightSizedInstanceType.EbsBaselineIops) - } - } - - recommendation := entity.RightSizingRecommendation{ - Current: current, - Recommended: recommended, - VCPU: cpuUsage, - EBSBandwidth: ebsThroughputUsage, - EBSIops: ebsIopsUsage, - NetworkThroughput: networkUsage, - Description: "", - } - if len(metrics["mem_used_percent"]) > 0 { - recommendation.Memory = memoryUsage - } - - if preferences["ExcludeUpsizingFeature"] != nil { - if *preferences["ExcludeUpsizingFeature"] == "Yes" { - if recommendation.Recommended != nil && recommendation.Recommended.Cost > recommendation.Current.Cost { - recommendation.Recommended = &recommendation.Current - recommendation.Description = "No recommendation available as upsizing feature is disabled" - return &recommendation, nil - } - } - } - - if rightSizedInstanceType != nil { - recommendation.Description, _ = s.generateEc2InstanceDescription(ctx, instance, region, ¤tInstanceType, rightSizedInstanceType, metrics, excludeBurstable, preferences, neededCPU, neededMemory, neededNetworkThroughput) - } - - recommendation.Description = strings.TrimSpace(recommendation.Description) - - return &recommendation, nil -} -func bpsToMBps(bps *float64) float64 { - if bps == nil { - return 0 - } - return *bps / (1024.0 * 1024.0) -} -func PFloat(v *float64) float64 { - if v == nil { - return 0 - } - return *v -} - -func (s *Service) generateEc2InstanceDescription(ctx context.Context, instance entity.EC2Instance, region string, currentInstanceType, rightSizedInstanceType *model.EC2InstanceType, metrics map[string][]types2.Datapoint, excludeBurstable bool, preferences map[string]*string, neededCPU, neededMemory, neededNetworkThroughput float64) (string, error) { - minCPU, avgCPU, maxCPU := minOfDatapoints(metrics["CPUUtilization"]), averageOfDatapoints(metrics["CPUUtilization"]), maxOfDatapoints(metrics["CPUUtilization"]) - minMemory, avgMemory, maxMemory := minOfDatapoints(metrics["mem_used_percent"]), averageOfDatapoints(metrics["mem_used_percent"]), maxOfDatapoints(metrics["mem_used_percent"]) - networkDatapoints := sumMergeDatapoints(metrics["NetworkIn"], metrics["NetworkOut"]) - _, avgNetwork, _ := minOfDatapoints(networkDatapoints), averageOfDatapoints(networkDatapoints), maxOfDatapoints(networkDatapoints) - - usage := fmt.Sprintf("- %s has %.0f vCPUs. Usage over the course of last week is min=%.2f%%, avg=%.2f%%, max=%.2f%%, so you only need %.2f vCPUs. %s has %.0f vCPUs.\n", currentInstanceType.InstanceType, currentInstanceType.VCpu, PFloat(minCPU), PFloat(avgCPU), PFloat(maxCPU), neededCPU, rightSizedInstanceType.InstanceType, rightSizedInstanceType.VCpu) - if len(metrics["mem_used_percent"]) > 0 { - usage += fmt.Sprintf("- %s has %.1fGB Memory. Usage over the course of last week is min=%.2f%%, avg=%.2f%%, max=%.2f%%, so you only need %.2fGB Memory. %s has %.1fGB Memory.\n", currentInstanceType.InstanceType, currentInstanceType.MemoryGB, PFloat(minMemory), PFloat(avgMemory), PFloat(maxMemory), neededMemory, rightSizedInstanceType.InstanceType, rightSizedInstanceType.MemoryGB) - } else { - usage += fmt.Sprintf("- %s has %.1fGB Memory. Usage is not available. You need to install CloudWatch Agent on your instance to get this data. %s has %.1fGB Memory.\n", currentInstanceType.InstanceType, currentInstanceType.MemoryGB, rightSizedInstanceType.InstanceType, rightSizedInstanceType.MemoryGB) - } - usage += fmt.Sprintf("- %s's network performance is %s. Throughput over the course of last week is avg=%.2f MB/s, so you only need %.2f MB/s. %s has %s.\n", currentInstanceType.InstanceType, currentInstanceType.NetworkPerformance, bpsToMBps(avgNetwork), neededNetworkThroughput/(1024.0*1024.0), rightSizedInstanceType.InstanceType, rightSizedInstanceType.NetworkPerformance) - - needs := "" - for k, v := range preferences { - if ec2instance.PreferenceDBKey[k] == "" { - continue - } - if v == nil { - vl := extractFromInstance(instance, *currentInstanceType, region, k) - needs += fmt.Sprintf("- You asked %s to be same as the current instance value which is %v\n", k, vl) - } else { - needs += fmt.Sprintf("- You asked %s to be %s\n", k, *v) - } - } - - prompt := fmt.Sprintf(` -I'm giving recommendation on ec2 instance right sizing. Based on user's usage and needs I have concluded that the best option for him is to use %s instead of %s. I need help summarizing the explanation into 280 characters (it's not a tweet! dont use hashtag!) while keeping these rules: -- mention the requirements from user side. -- for those fields which are changing make sure you mention the change. - -Here's usage data: -%s - -User's needs: -%s -`, rightSizedInstanceType.InstanceType, currentInstanceType.InstanceType, usage, needs) - if excludeBurstable { - prompt += "\nBurstable instances are excluded." - } - resp, err := s.openaiSvc.CreateChatCompletion( - ctx, - openai.ChatCompletionRequest{ - Model: openai.GPT4TurboPreview, - Messages: []openai.ChatCompletionMessage{ - { - Role: openai.ChatMessageRoleUser, - Content: prompt, - }, - }, - }, - ) - - if err != nil { - return "", err - } - - if len(resp.Choices) == 0 { - return "", errors.New("empty choices") - } - return strings.TrimSpace(resp.Choices[0].Message.Content), nil -} - -func extractFromInstance(instance entity.EC2Instance, i model.EC2InstanceType, region string, k string) any { - switch k { - case "InstanceFamily": - return i.InstanceFamily - case "Tenancy": - switch instance.Tenancy { - case types.TenancyDefault: - return "Shared" - case types.TenancyDedicated: - return "Dedicated" - case types.TenancyHost: - return "Host" - default: - return "" - } - case "UsageOperation": - return instance.UsageOperation - case "EBSOptimized": - if instance.EbsOptimized { - return "Yes" - } else { - return "No" - } - case "LicenseModel": - return i.LicenseModel - case "Region": - return region - case "Hypervisor": - return "" //TODO - case "CurrentGeneration": - return i.CurrentGeneration - case "PhysicalProcessor": - return i.PhysicalProcessor - case "ClockSpeed": - return i.ClockSpeed - case "ProcessorArchitecture": - return i.PhysicalProcessorArch - case "SupportedArchitectures": - return "" //TODO - case "ENASupport": - return i.EnhancedNetworkingSupported - case "EncryptionInTransitSupported": - return "" //TODO - case "SupportedRootDeviceTypes": - return "" //TODO - case "Cores": - return "" //TODO - case "Threads": - return "" //TODO - case "vCPU": - return i.VCpu - case "MemoryGB": - return i.MemoryGB - } - return "" -} - -func (s *Service) EBSVolumeRecommendationGrpc( - ctx context.Context, - region string, - volume *aws.EC2Volume, - metrics *aws.VolumeMetrics, - preferences map[string]*wrapperspb.StringValue, - usageAverageType UsageAverageType, -) (*aws.EBSVolumeRecommendation, error) { - var err error - defer func() { - if r := recover(); r != nil { - s.logger.Error(fmt.Sprintf("recovered from panic: %v", r)) - err = fmt.Errorf("recovered from panic: %v", r) - } - }() - newVolume := entity.EC2Volume{ - HashedVolumeId: volume.HashedVolumeId, - VolumeType: types.VolumeType(volume.VolumeType), - Size: WrappedToInt32(volume.Size), - Iops: WrappedToInt32(volume.Iops), - Throughput: WrappedToFloat64(volume.Throughput), - AvailabilityZone: WrappedToString(volume.AvailabilityZone), - } - newMetrics := convertMetrics(metrics.Metrics) - newPreferences := make(map[string]*string) - for k, v := range preferences { - newPreferences[k] = WrappedToString(v) - } - - s.logger.Info("EBSVolumeRecommendation parameters", zap.String("region", region), zap.Any("volume", newVolume), - zap.Any("len metrics", len(newMetrics)), zap.Any("preferences", newPreferences), zap.Any("usageAverageType", usageAverageType)) - - result, err := s.EBSVolumeRecommendation(ctx, region, newVolume, newMetrics, newPreferences, usageAverageType) - if err != nil { - return nil, err - } - newResult := &aws.EBSVolumeRecommendation{ - Current: convertRightsizingEBSVolume(&result.Current), - Recommended: convertRightsizingEBSVolume(result.Recommended), - Iops: convertUsage(&result.IOPS), - Throughput: convertUsage(&result.Throughput), - Description: strings.TrimSpace(result.Description), - } - return newResult, nil -} - -func convertRightsizingEBSVolume(rightSizing *entity.RightsizingEBSVolume) *aws.RightsizingEBSVolume { - if rightSizing == nil { - return nil - } - return &aws.RightsizingEBSVolume{ - Tier: string(rightSizing.Tier), - VolumeSize: Int32ToWrapper(rightSizing.VolumeSize), - BaselineIops: rightSizing.BaselineIOPS, - ProvisionedIops: Int32ToWrapper(rightSizing.ProvisionedIOPS), - BaselineThroughput: rightSizing.BaselineThroughput, - ProvisionedThroughput: Float64ToWrapper(rightSizing.ProvisionedThroughput), - Cost: rightSizing.Cost, - CostComponents: rightSizing.CostComponents, - } -} - -func (s *Service) EBSVolumeRecommendation(ctx context.Context, region string, volume entity.EC2Volume, metrics map[string][]types2.Datapoint, preferences map[string]*string, usageAverageType UsageAverageType) (*entity.EBSVolumeRecommendation, error) { - iopsUsage := extractUsage(sumMergeDatapoints(metrics["VolumeReadOps"], metrics["VolumeWriteOps"]), usageAverageType) - throughputUsageBytes := extractUsage(sumMergeDatapoints(metrics["VolumeReadBytes"], metrics["VolumeWriteBytes"]), usageAverageType) - usageStorageThroughputMB := entity.Usage{ - Avg: funcP(throughputUsageBytes.Avg, throughputUsageBytes.Avg, func(a, _ float64) float64 { return a / (1024 * 1024) }), - Min: funcP(throughputUsageBytes.Min, throughputUsageBytes.Min, func(a, _ float64) float64 { return a / (1024 * 1024) }), - Max: funcP(throughputUsageBytes.Max, throughputUsageBytes.Max, func(a, _ float64) float64 { return a / (1024 * 1024) }), - } - sizeUsage := extractUsage(metrics["disk_used_percent"], usageAverageType) - - s.logger.Info("Show preferences", zap.Any("preferences", preferences)) - for k, v := range preferences { - s.logger.Info("Show preference", zap.String("key", k), zap.Any("value", v)) - } - - size := float64(0) - if size == 0 && volume.Size != nil { - size = float64(*volume.Size) - } - - iopsBreathingRoom := int64(0) - if preferences["IOPSBreathingRoom"] != nil { - iopsBreathingRoom, _ = strconv.ParseInt(*preferences["IOPSBreathingRoom"], 10, 32) - } - throughputBreathingRoom := int64(0) - if preferences["ThroughputBreathingRoom"] != nil { - throughputBreathingRoom, _ = strconv.ParseInt(*preferences["ThroughputBreathingRoom"], 10, 32) - } - sizeBreathingRoom := int64(0) - if preferences["SizeBreathingRoom"] != nil { - sizeBreathingRoom, _ = strconv.ParseInt(*preferences["SizeBreathingRoom"], 10, 32) - } - neededIops := pCalculateHeadroom(iopsUsage.Avg, iopsBreathingRoom) - neededThroughput := pCalculateHeadroom(usageStorageThroughputMB.Avg, throughputBreathingRoom) - neededSize := size - if _, ok := metrics["disk_used_percent"]; ok && sizeUsage.Avg != nil { - neededSize = max(1, neededSize*(*sizeUsage.Avg/100.0)) - neededSize = calculateHeadroom(neededSize, sizeBreathingRoom) - } - - var validTypes []types.VolumeType - if v, ok := preferences["IOPS"]; ok { - if v == nil && volume.Iops != nil { - neededIops = float64(*volume.Iops) - } else { - neededIops, _ = strconv.ParseFloat(*v, 64) - } - } - if v, ok := preferences["Throughput"]; ok { - if v == nil && volume.Throughput != nil { - neededThroughput = *volume.Throughput - } else { - neededThroughput, _ = strconv.ParseFloat(*v, 64) - } - } - if v, ok := preferences["Size"]; ok { - if v == nil && volume.Size != nil { - neededSize = float64(*volume.Size) - } else if v != nil { - neededSize, _ = strconv.ParseFloat(*v, 64) - } - } - - if v, ok := preferences["VolumeFamily"]; ok { - if preferences["VolumeFamily"] == nil { - validTypes = []types.VolumeType{volume.VolumeType} - } else { - switch strings.ToLower(*v) { - case "general purpose", "ssd", "solid state drive", "gp": - validTypes = []types.VolumeType{types.VolumeTypeGp2, types.VolumeTypeGp3} - case "io", "io optimized": - validTypes = []types.VolumeType{types.VolumeTypeIo1, types.VolumeTypeIo2} - case "hdd", "sc", "cold", "hard disk drive", "st": - validTypes = []types.VolumeType{types.VolumeTypeSc1, types.VolumeTypeSt1} - } - } - } - - if v, ok := preferences["VolumeType"]; ok { - if preferences["VolumeType"] == nil { - validTypes = []types.VolumeType{volume.VolumeType} - } else { - validTypes = []types.VolumeType{types.VolumeType(*v)} - } - } - - if v, ok := preferences["ExcludeVolumeTypes"]; ok { - if v != nil && len(*v) > 0 { - if len(validTypes) == 0 { - var t types.VolumeType - validTypes = t.Values() - } - - excludeList := strings.Split(*v, ",") - - var newValidTypes []types.VolumeType - for _, o := range validTypes { - ignore := false - for _, e := range excludeList { - if string(o) == e { - ignore = true - } - } - - if ignore { - continue - } - newValidTypes = append(newValidTypes, o) - } - validTypes = newValidTypes - } - } - - volumeCost, currentVolCostComponents, err := s.costSvc.GetEBSVolumeCost(ctx, region, volume, metrics) - if err != nil { - err = fmt.Errorf("failed to get current ebs volume %s cost: %s", volume.HashedVolumeId, err.Error()) - return nil, err - } - - var result = &entity.EBSVolumeRecommendation{ - Current: entity.RightsizingEBSVolume{ - Tier: volume.VolumeType, - VolumeSize: volume.Size, - BaselineIOPS: 0, //TODO-Saleh - ProvisionedIOPS: volume.Iops, - BaselineThroughput: 0, //TODO-Saleh - ProvisionedThroughput: volume.Throughput, - Cost: volumeCost, - CostComponents: currentVolCostComponents, - }, - Recommended: nil, - IOPS: iopsUsage, - Throughput: throughputUsageBytes, - Description: "", - } - if result.Current.ProvisionedIOPS != nil { - result.Current.BaselineIOPS = *result.Current.ProvisionedIOPS - result.Current.ProvisionedIOPS = nil - } - if result.Current.ProvisionedThroughput != nil { - result.Current.BaselineThroughput = *result.Current.ProvisionedThroughput - result.Current.ProvisionedThroughput = nil - } - if volume.VolumeType == types.VolumeTypeGp3 { - provIops := max(int32(result.Current.BaselineIOPS)-model.Gp3BaseIops, 0) - provThroughput := max(result.Current.BaselineThroughput-model.Gp3BaseThroughput, 0) - result.Current.ProvisionedIOPS = &provIops - result.Current.ProvisionedThroughput = &provThroughput - } - if volume.VolumeType == types.VolumeTypeIo1 || volume.VolumeType == types.VolumeTypeIo2 { - provIops := result.Current.BaselineIOPS - result.Current.ProvisionedIOPS = &provIops - result.Current.BaselineIOPS = 0 - } - - newType, newSize, newBaselineIops, newBaselineThroughput, costBreakdown, err := s.ebsVolumeRepo.GetCheapestTypeWithSpecs(ctx, region, int32(neededSize), int32(neededIops), neededThroughput, validTypes) - if err != nil { - if strings.Contains(err.Error(), "no feasible volume types found") { - return result, nil - } - err = fmt.Errorf("failed to find cheapest ebs volume: %s", err.Error()) - return nil, err - } - - result.Recommended = &entity.RightsizingEBSVolume{ - Tier: "", - VolumeSize: utils.GetPointer(newSize), - BaselineIOPS: newBaselineIops, - ProvisionedIOPS: nil, - BaselineThroughput: newBaselineThroughput, - ProvisionedThroughput: nil, - Cost: 0, - } - result.Description = fmt.Sprintf("- cost breakdown: %s\n", costBreakdown) - newVolume := volume - result.Recommended.Tier = newType - newVolume.VolumeType = newType - if newType != volume.VolumeType { - result.Description += fmt.Sprintf("- change your volume from %s to %s\n", volume.VolumeType, newType) - } - - if int32(neededSize) != getValueOrZero(volume.Size) { - result.Recommended.VolumeSize = utils.GetPointer(int32(neededSize)) - newVolume.Size = utils.GetPointer(int32(neededSize)) - result.Description += fmt.Sprintf("- change volume size from %d to %d\n", getValueOrZero(volume.Size), int32(neededSize)) - } - - if newType == types.VolumeTypeIo1 || newType == types.VolumeTypeIo2 { - avgIOps := int32(neededIops) - result.Recommended.ProvisionedIOPS = &avgIOps - newVolume.Iops = &avgIOps - - if volume.Iops == nil { - result.Description += fmt.Sprintf("- add provisioned iops: %d\n", avgIOps) - } else if avgIOps > *volume.Iops { - result.Description += fmt.Sprintf("- increase provisioned iops from %d to %d\n", *volume.Iops, avgIOps) - } else if avgIOps < *volume.Iops { - result.Description += fmt.Sprintf("- decrease provisioned iops from %d to %d\n", *volume.Iops, avgIOps) - } else { - result.Recommended.ProvisionedIOPS = nil - newVolume.Iops = volume.Iops - } - } - - if newType == types.VolumeTypeGp3 { - provIops := max(int32(neededIops)-model.Gp3BaseIops, 0) - result.Recommended.ProvisionedIOPS = &provIops - newVolume.Iops = &provIops - - oldProvIops := int32(0) - if volume.Iops != nil { - oldProvIops = *volume.Iops - if volume.VolumeType != types.VolumeTypeGp3 { - oldProvIops -= model.Gp3BaseIops - oldProvIops = max(oldProvIops, 0) - } - } - - if volume.Iops == nil { - result.Description += fmt.Sprintf("- add provisioned iops: %d\n", provIops) - } else if provIops > oldProvIops { - result.Description += fmt.Sprintf("- increase provisioned iops from %d to %d\n", oldProvIops, provIops) - } else if provIops < oldProvIops { - result.Description += fmt.Sprintf("- decrease provisioned iops from %d to %d\n", oldProvIops, provIops) - } else { - result.Recommended.ProvisionedIOPS = nil - newVolume.Iops = volume.Iops - } - } - - if newType == types.VolumeTypeGp3 { - provThroughput := max(neededThroughput-model.Gp3BaseThroughput, 0) - result.Recommended.ProvisionedThroughput = &provThroughput - newVolume.Throughput = &provThroughput - - oldProvThroughput := float64(0) - if volume.Throughput != nil { - oldProvThroughput = *volume.Throughput - if volume.VolumeType != types.VolumeTypeGp3 { - oldProvThroughput -= model.Gp3BaseThroughput - oldProvThroughput = max(oldProvThroughput, 0) - } - } - - if volume.Throughput == nil { - result.Description += fmt.Sprintf("- add provisioned throughput: %.2f\n", provThroughput) - } else if provThroughput > oldProvThroughput { - result.Description += fmt.Sprintf("- increase provisioned throughput from %.2f to %.2f\n", oldProvThroughput, provThroughput) - } else if provThroughput < oldProvThroughput { - result.Description += fmt.Sprintf("- decrease provisioned throughput from %.2f to %.2f\n", oldProvThroughput, provThroughput) - } else { - result.Recommended.ProvisionedThroughput = nil - newVolume.Throughput = volume.Throughput - } - } - - newVolumeCost, newVolCostComponents, err := s.costSvc.GetEBSVolumeCost(ctx, region, newVolume, metrics) - if err != nil { - err = fmt.Errorf("failed to get recommended ebs volume %s cost: %s", newVolume.HashedVolumeId, err.Error()) - return nil, err - } - result.Recommended.Cost = newVolumeCost - result.Recommended.CostComponents = newVolCostComponents - - if preferences["ExcludeUpsizingFeature"] != nil { - if *preferences["ExcludeUpsizingFeature"] == "Yes" { - if result.Recommended != nil && result.Recommended.Cost > result.Current.Cost { - result.Recommended = &result.Current - result.Description = "No recommendation available as upsizing feature is disabled" - } - } - } - - result.Description = strings.TrimSpace(result.Description) - - return result, nil -} - -func WrappedToInt32(v *wrapperspb.Int32Value) *int32 { - if v == nil { - return nil - } - tmp := v.GetValue() - return &tmp -} - -func WrappedToFloat64(v *wrapperspb.DoubleValue) *float64 { - if v == nil { - return nil - } - tmp := v.GetValue() - return &tmp -} - -func WrappedToString(v *wrapperspb.StringValue) *string { - if v == nil { - return nil - } - tmp := v.GetValue() - if tmp == "" { - return nil - } - return &tmp -} - -func Int32ToWrapper(v *int32) *wrapperspb.Int32Value { - if v == nil { - return nil - } - return wrapperspb.Int32(*v) -} - -func Float64ToWrapper(v *float64) *wrapperspb.DoubleValue { - if v == nil { - return nil - } - return wrapperspb.Double(*v) -} - -func StringToWrapper(v *string) *wrapperspb.StringValue { - if v == nil { - return nil - } - return wrapperspb.String(*v) -} diff --git a/services/wastage/recommendation/aws_rds.go b/services/wastage/recommendation/aws_rds.go deleted file mode 100644 index 0e95c37cc..000000000 --- a/services/wastage/recommendation/aws_rds.go +++ /dev/null @@ -1,864 +0,0 @@ -package recommendation - -import ( - "context" - "errors" - "fmt" - types2 "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types" - "github.com/labstack/echo/v4" - "github.com/opengovern/opengovernance/pkg/utils" - "github.com/opengovern/opengovernance/services/wastage/api/entity" - "github.com/opengovern/opengovernance/services/wastage/db/model" - "github.com/opengovern/opengovernance/services/wastage/recommendation/preferences/aws_rds" - aws "github.com/opengovern/plugin-aws/plugin/proto/src/golang" - "github.com/sashabaranov/go-openai" - "go.uber.org/zap" - "google.golang.org/protobuf/types/known/wrapperspb" - "math" - "net/http" - "strconv" - "strings" -) - -type awsRdsDbType struct { - Engine string - Edition string -} - -var dbTypeMap = map[string]awsRdsDbType{ - "aurora": {"Aurora MySQL", ""}, - "aurora-mysql": {"Aurora MySQL", ""}, - "aurora-postgresql": {"Aurora PostgreSQL", ""}, - "mariadb": {"MariaDB", ""}, - "mysql": {"MySQL", ""}, - "postgres": {"PostgreSQL", ""}, - "oracle-se": {"Oracle", "Standard"}, - "oracle-se1": {"Oracle", "Standard One"}, - "oracle-se2": {"Oracle", "Standard Two"}, - "oracle-se2-cdb": {"Oracle", "Standard Two"}, - "oracle-ee": {"Oracle", "Enterprise"}, - "oracle-ee-cdb": {"Oracle", "Enterprise"}, - "sqlserver-se": {"SQL Server", "Standard"}, - "sqlserver-ee": {"SQL Server", "Enterprise"}, - "sqlserver-ex": {"SQL Server", "Express"}, - "sqlserver-web": {"SQL Server", "Web"}, -} - -func awsRdsDbTypeToAPIDbType(engine, edition string) string { - for k, v := range dbTypeMap { - if strings.ToLower(v.Engine) == strings.ToLower(engine) && (v.Edition == "" || strings.ToLower(v.Edition) == strings.ToLower(edition)) { - return k - } - } - return "" -} - -func calculateHeadroom(needed float64, percent int64) float64 { - return needed / (1.0 - (float64(percent) / 100.0)) -} - -func pCalculateHeadroom(needed *float64, percent int64) float64 { - if needed == nil { - return 0.0 - } - return *needed / (1.0 - (float64(percent) / 100.0)) -} - -func (s *Service) AwsRdsRecommendationGrpc( - ctx context.Context, - region string, - rdsInstance *aws.RDSInstance, - metrics map[string]*aws.Metric, - preferences map[string]*wrapperspb.StringValue, - usageAverageType UsageAverageType, -) (*aws.RDSInstanceRightSizingRecommendation, error) { - newRdsInstance := entity.AwsRds{ - HashedInstanceId: rdsInstance.HashedInstanceId, - AvailabilityZone: rdsInstance.AvailabilityZone, - InstanceType: rdsInstance.InstanceType, - Engine: rdsInstance.Engine, - EngineVersion: rdsInstance.EngineVersion, - LicenseModel: rdsInstance.LicenseModel, - BackupRetentionPeriod: WrappedToInt32(rdsInstance.BackupRetentionPeriod), - ClusterType: entity.AwsRdsClusterType(rdsInstance.ClusterType), - PerformanceInsightsEnabled: rdsInstance.PerformanceInsightsEnabled, - PerformanceInsightsRetentionPeriod: WrappedToInt32(rdsInstance.PerformanceInsightsRetentionPeriod), - StorageType: WrappedToString(rdsInstance.StorageType), - StorageSize: WrappedToInt32(rdsInstance.StorageSize), - StorageIops: WrappedToInt32(rdsInstance.StorageIops), - StorageThroughput: WrappedToFloat64(rdsInstance.StorageThroughput), - } - - newMetrics := convertMetrics(metrics) - newPreferences := make(map[string]*string) - for k, v := range preferences { - newPreferences[k] = WrappedToString(v) - } - - result, err := s.AwsRdsRecommendation(ctx, region, newRdsInstance, newMetrics, newPreferences, usageAverageType) - if err != nil { - return nil, err - } - - return &aws.RDSInstanceRightSizingRecommendation{ - Current: convertRightsizingRDSInstance(&result.Current), - Recommended: convertRightsizingRDSInstance(result.Recommended), - Vcpu: convertUsage(&result.VCPU), - FreeMemoryBytes: convertUsage(&result.FreeMemoryBytes), - FreeStorageBytes: convertUsage(&result.FreeStorageBytes), - NetworkThroughputBytes: convertUsage(&result.NetworkThroughputBytes), - StorageIops: convertUsage(&result.StorageIops), - StorageThroughput: convertUsage(&result.StorageThroughput), - VolumeBytesUsed: convertUsage(&result.VolumeBytesUsed), - Description: result.Description, - }, nil -} - -func convertRightsizingRDSInstance(rightSizing *entity.RightsizingAwsRds) *aws.RightsizingAwsRds { - if rightSizing == nil { - return nil - } - return &aws.RightsizingAwsRds{ - Region: rightSizing.Region, - InstanceType: rightSizing.InstanceType, - Engine: rightSizing.Engine, - EngineVersion: rightSizing.EngineVersion, - ClusterType: string(rightSizing.ClusterType), - Processor: rightSizing.Processor, - Architecture: rightSizing.Architecture, - Vcpu: rightSizing.VCPU, - MemoryGb: rightSizing.MemoryGb, - StorageType: StringToWrapper(rightSizing.StorageType), - StorageSize: Int32ToWrapper(rightSizing.StorageSize), - StorageIops: Int32ToWrapper(rightSizing.StorageIops), - StorageThroughput: Float64ToWrapper(rightSizing.StorageThroughput), - Cost: rightSizing.Cost, - CostComponents: rightSizing.CostComponents, - ComputeCost: rightSizing.ComputeCost, - ComputeCostComponents: rightSizing.ComputeCostComponents, - StorageCost: rightSizing.StorageCost, - StorageCostComponents: rightSizing.StorageCostComponents, - } -} - -func (s *Service) AwsRdsRecommendation( - ctx context.Context, - region string, - rdsInstance entity.AwsRds, - metrics map[string][]types2.Datapoint, - preferences map[string]*string, - usageAverageType UsageAverageType, -) (*entity.AwsRdsRightsizingRecommendation, error) { - usageCpuPercent := extractUsage(metrics["CPUUtilization"], usageAverageType) - usageFreeMemoryBytes := extractUsage(metrics["FreeableMemory"], usageAverageType) - usageFreeStorageBytes := extractUsage(metrics["FreeStorageSpace"], usageAverageType) - usageVolumeBytesUsed := extractUsage(metrics["VolumeBytesUsed"], usageAverageType) - usageNetworkThroughputBytes := extractUsage(sumMergeDatapoints(metrics["NetworkReceiveThroughput"], metrics["NetworkTransmitThroughput"]), usageAverageType) - usageStorageIops := extractUsage(sumMergeDatapoints(metrics["ReadIOPS"], metrics["WriteIOPS"]), usageAverageType) - usageStorageThroughputBytes := extractUsage(sumMergeDatapoints(metrics["ReadThroughput"], metrics["WriteThroughput"]), usageAverageType) - usageStorageThroughputMB := entity.Usage{ - Avg: funcP(usageStorageThroughputBytes.Avg, usageStorageThroughputBytes.Avg, func(a, _ float64) float64 { return a / (1024 * 1024) }), - Min: funcP(usageStorageThroughputBytes.Min, usageStorageThroughputBytes.Min, func(a, _ float64) float64 { return a / (1024 * 1024) }), - Max: funcP(usageStorageThroughputBytes.Max, usageStorageThroughputBytes.Max, func(a, _ float64) float64 { return a / (1024 * 1024) }), - } - - awsRdsDbKind, ok := dbTypeMap[strings.ToLower(rdsInstance.Engine)] - if !ok { - s.logger.Warn("rds engine not found", zap.String("engine", rdsInstance.Engine)) - awsRdsDbKind = awsRdsDbType{strings.ToLower(rdsInstance.Engine), ""} - } - - currentInstanceTypeList, err := s.awsRDSDBInstanceRepo.ListByInstanceType(ctx, region, rdsInstance.InstanceType, awsRdsDbKind.Engine, awsRdsDbKind.Edition, string(rdsInstance.ClusterType)) - if err != nil { - return nil, err - } - if len(currentInstanceTypeList) == 0 { - s.logger.Error("rds instance type not found", zap.String("instance_type", rdsInstance.InstanceType)) - return nil, echo.NewHTTPError(http.StatusNotFound, fmt.Sprintf("rds instance type %s with %s engine not found", rdsInstance.InstanceType, rdsInstance.Engine)) - } - currentInstanceRow := currentInstanceTypeList[0] - - if strings.Contains(strings.ToLower(rdsInstance.Engine), "aurora") { - rdsInstance.StorageSize = utils.GetPointer(int32(math.Ceil(getValueOrZero(usageVolumeBytesUsed.Avg) / (1024 * 1024 * 1024)))) - if usageVolumeBytesUsed.Last.Maximum != nil { - rdsInstance.StorageSize = utils.GetPointer(int32(math.Ceil(getValueOrZero(usageVolumeBytesUsed.Last.Maximum) / (1024 * 1024 * 1024)))) - } - rdsInstance.StorageIops = nil - rdsInstance.StorageThroughput = nil - } - - currentComputeCost, currentComputeCostComponents, err := s.costSvc.GetRDSComputeCost(ctx, region, rdsInstance, metrics) - if err != nil { - s.logger.Error("failed to get rds compute cost", zap.Error(err)) - return nil, err - } - currentStorageCost, currentStorageCostComponents, err := s.costSvc.GetRDSStorageCost(ctx, region, rdsInstance, metrics) - if err != nil { - s.logger.Error("failed to get rds storage cost", zap.Error(err)) - return nil, err - } - - currentCostComponents := make(map[string]float64) - for k, v := range currentComputeCostComponents { - currentCostComponents[k] = v - } - for k, v := range currentStorageCostComponents { - currentCostComponents[k] = v - } - - current := entity.RightsizingAwsRds{ - Region: region, - InstanceType: rdsInstance.InstanceType, - Engine: rdsInstance.Engine, - EngineVersion: rdsInstance.EngineVersion, - ClusterType: rdsInstance.ClusterType, - Architecture: currentInstanceRow.ProcessorArchitecture, - Processor: currentInstanceRow.PhysicalProcessor, - VCPU: int64(currentInstanceRow.VCpu), - MemoryGb: int64(currentInstanceRow.MemoryGb), - StorageType: rdsInstance.StorageType, - StorageSize: rdsInstance.StorageSize, - StorageIops: rdsInstance.StorageIops, - StorageThroughput: rdsInstance.StorageThroughput, - - Cost: currentComputeCost + currentStorageCost, - CostComponents: currentCostComponents, - ComputeCost: currentComputeCost, - ComputeCostComponents: currentComputeCostComponents, - StorageCost: currentStorageCost, - StorageCostComponents: currentStorageCostComponents, - } - - neededVCPU := (getValueOrZero(usageCpuPercent.Avg) / 100.0) * currentInstanceRow.VCpu - if v, ok := preferences["CpuBreathingRoom"]; ok && v != nil { - vPercent, err := strconv.ParseInt(*v, 10, 64) - if err != nil { - s.logger.Error("invalid CpuBreathingRoom value", zap.String("value", *v)) - return nil, echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("invalid CpuBreathingRoom value: %s", *v)) - } - neededVCPU = calculateHeadroom(neededVCPU, vPercent) - } - usageFreeMemoryBytesMin := 0.0 - if usageFreeMemoryBytes.Min != nil { - usageFreeMemoryBytesMin = *usageFreeMemoryBytes.Min - } else if usageFreeMemoryBytes.Avg != nil { - usageFreeMemoryBytesMin = *usageFreeMemoryBytes.Avg - } - neededMemoryGb := currentInstanceRow.MemoryGb - (usageFreeMemoryBytesMin / (1024 * 1024 * 1024)) - if v, ok := preferences["MemoryBreathingRoom"]; ok { - vPercent, err := strconv.ParseInt(*v, 10, 64) - if err != nil { - s.logger.Error("invalid MemoryBreathingRoom value", zap.String("value", *v)) - return nil, echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("invalid MemoryBreathingRoom value: %s", *v)) - } - neededMemoryGb = calculateHeadroom(neededMemoryGb, vPercent) - } - neededNetworkThroughput := 0.0 - if usageNetworkThroughputBytes.Avg != nil { - neededNetworkThroughput = *usageNetworkThroughputBytes.Avg - } - if v, ok := preferences["NetworkBreathingRoom"]; ok && v != nil { - vPercent, err := strconv.ParseInt(*v, 10, 64) - if err != nil { - s.logger.Error("invalid NetworkBreathingRoom value", zap.String("value", *v)) - return nil, echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("invalid NetworkBreathingRoom value: %s", *v)) - } - neededNetworkThroughput = calculateHeadroom(neededNetworkThroughput, vPercent) - } - - neededStorageSize := int32(0) - if rdsInstance.StorageSize != nil { - usageFreeStorageBytesMin := 0.0 - if usageFreeStorageBytes.Min != nil { - usageFreeStorageBytesMin = *usageFreeStorageBytes.Min - } else if usageFreeStorageBytes.Avg != nil { - usageFreeStorageBytesMin = *usageFreeStorageBytes.Avg - } - neededStorageSizeFloat := float64(*rdsInstance.StorageSize) - (usageFreeStorageBytesMin / (1024 * 1024 * 1024)) - if strings.Contains(strings.ToLower(rdsInstance.Engine), "aurora") { - if usageVolumeBytesUsed.Max != nil { - neededStorageSizeFloat = *usageVolumeBytesUsed.Max / (1024 * 1024 * 1024) - } else if usageVolumeBytesUsed.Avg != nil { - neededStorageSizeFloat = *usageVolumeBytesUsed.Avg / (1024 * 1024 * 1024) - } - } - if v, ok := preferences["StorageSizeBreathingRoom"]; ok && v != nil { - vPercent, err := strconv.ParseInt(*v, 10, 64) - if err != nil { - s.logger.Error("invalid StorageSizeBreathingRoom value", zap.String("value", *v)) - return nil, echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("invalid StorageSizeBreathingRoom value: %s", *v)) - } - neededStorageSizeFloat = calculateHeadroom(neededStorageSizeFloat, vPercent) - } - neededStorageSize = int32(math.Ceil(neededStorageSizeFloat)) - } - neededStorageIops := int32(0) - if usageStorageIops.Avg != nil { - neededStorageIopsFloat := *usageStorageIops.Avg - if v, ok := preferences["StorageIopsBreathingRoom"]; ok && v != nil { - vPercent, err := strconv.ParseInt(*v, 10, 64) - if err != nil { - s.logger.Error("invalid StorageIopsBreathingRoom value", zap.String("value", *v)) - return nil, echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("invalid StorageIopsBreathingRoom value: %s", *v)) - } - neededStorageIopsFloat = calculateHeadroom(neededStorageIopsFloat, vPercent) - } - neededStorageIops = int32(neededStorageIopsFloat) - } - neededStorageThroughputMB := 0.0 - if usageStorageThroughputMB.Avg != nil { - neededStorageThroughputMB = *usageStorageThroughputMB.Avg - if v, ok := preferences["StorageThroughputBreathingRoom"]; ok { - vPercent, err := strconv.ParseInt(*v, 10, 64) - if err != nil { - s.logger.Error("invalid StorageThroughputBreathingRoom value", zap.String("value", *v)) - return nil, echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("invalid StorageThroughputBreathingRoom value: %s", *v)) - } - neededStorageThroughputMB = calculateHeadroom(neededStorageThroughputMB, vPercent) - } - } - - instancePref := map[string]any{} - for k, v := range preferences { - var vl any - if v == nil { - vl = extractFromRdsInstance(rdsInstance, currentInstanceRow, region, k) - } else { - vl = *v - } - if _, ok := aws_rds.PreferenceInstanceDBKey[k]; !ok { - continue - } - if aws_rds.PreferenceInstanceDBKey[k] == "" { - continue - } - - cond := "=" - if sc, ok := aws_rds.PreferenceInstanceSpecialCond[k]; ok { - cond = sc - } - instancePref[fmt.Sprintf("%s %s ?", aws_rds.PreferenceInstanceDBKey[k], cond)] = vl - } - if _, ok := preferences["vCPU"]; !ok { - instancePref["v_cpu >= ?"] = neededVCPU - } - if _, ok := preferences["MemoryGB"]; !ok { - instancePref["memory_gb >= ?"] = neededMemoryGb - } - if _, ok := preferences["NetworkThroughput"]; !ok { - instancePref["network_throughput IS NULL OR network_throughput >= ?"] = neededNetworkThroughput - } - if v, ok := instancePref["database_engine = ?"]; ok { - kind := dbTypeMap[strings.ToLower(v.(string))] - instancePref["database_engine = ?"] = kind.Engine - if kind.Edition != "" { - instancePref["database_edition = ?"] = kind.Edition - } - } - - excluedBurstable := false - if _, ok := instancePref["instance_type = ?"]; !ok { - if value, ok := preferences["ExcludeBurstableInstances"]; ok && value != nil { - if *value == "Yes" { - excluedBurstable = true - instancePref["NOT(instance_type like ?)"] = "db.t%" - } else if *value == "if current resource is burstable" { - if !strings.HasPrefix(rdsInstance.InstanceType, "db.t") { - excluedBurstable = true - instancePref["NOT(instance_type like ?)"] = "db.t%" - } - } - } - } - - rightSizedInstanceRow, err := s.awsRDSDBInstanceRepo.GetCheapestByPref(ctx, instancePref) - if err != nil { - s.logger.Error("failed to get rds instance type", zap.Error(err)) - return nil, err - } - - var resultEngine, resultEdition string - var resultClusterType entity.AwsRdsClusterType - if rightSizedInstanceRow != nil { - resultEngine = rightSizedInstanceRow.DatabaseEngine - resultEdition = rightSizedInstanceRow.DatabaseEdition - resultClusterType = entity.AwsRdsClusterType(rightSizedInstanceRow.DeploymentOption) - } else { - resultEngine = awsRdsDbKind.Engine - resultEdition = awsRdsDbKind.Edition - resultClusterType = rdsInstance.ClusterType - } - // Aurora instance types storage configs are very different from other RDS instance types - isResultAurora := !((rightSizedInstanceRow != nil && !strings.Contains(strings.ToLower(rightSizedInstanceRow.InstanceType), "aurora")) || (rightSizedInstanceRow == nil && !strings.Contains(strings.ToLower(currentInstanceRow.InstanceType), "aurora"))) - - var rightSizedStorageRow *model.RDSDBStorage - - var validTypes []model.RDSDBStorageVolumeType - if v, ok := preferences["StorageType"]; ok { - if v == nil { - st := extractFromRdsInstance(rdsInstance, currentInstanceRow, region, "StorageType") - volType := model.RDSDBStorageEBSTypeToVolumeType[st.(string)] - validTypes = append(validTypes, volType) - } else if *v != "" { - validTypes = append(validTypes, model.RDSDBStorageVolumeType(*v)) - } - } - - if v, ok := preferences["ExcludeRDSVolumeTypes"]; ok { - if v != nil && len(*v) > 0 { - if len(validTypes) == 0 { - for _, v := range model.RDSDBStorageEBSTypeToVolumeType { - validTypes = append(validTypes, v) - } - } - - excludeList := strings.Split(*v, ",") - - var newValidTypes []model.RDSDBStorageVolumeType - for _, o := range validTypes { - ignore := false - for _, e := range excludeList { - if string(o) == e { - ignore = true - } - } - - if ignore { - continue - } - newValidTypes = append(newValidTypes, o) - } - validTypes = newValidTypes - } - } - - var resSize, resIops int32 - var resThroughputMB float64 - var costBreakdown string - rightSizedStorageRow, resSize, resIops, resThroughputMB, costBreakdown, err = s.awsRDSDBStorageRepo.GetCheapestBySpecs(ctx, region, resultEngine, resultEdition, resultClusterType, neededStorageSize, neededStorageIops, neededStorageThroughputMB, validTypes) - if err != nil { - s.logger.Error("failed to get rds storage type", zap.Error(err)) - return nil, err - } - neededStorageSize = resSize - if !isResultAurora { - neededStorageIops = resIops - neededStorageThroughputMB = resThroughputMB - } - - var recommended *entity.RightsizingAwsRds - var newInstance entity.AwsRds - if rightSizedInstanceRow != nil { - newInstance = rdsInstance - newInstance.InstanceType = rightSizedInstanceRow.InstanceType - newInstance.ClusterType = entity.AwsRdsClusterType(rightSizedInstanceRow.DeploymentOption) - for k, v := range dbTypeMap { - if strings.ToLower(v.Engine) == strings.ToLower(rightSizedInstanceRow.DatabaseEngine) && (v.Edition == "" || strings.ToLower(v.Edition) == strings.ToLower(rightSizedInstanceRow.DatabaseEdition)) { - newInstance.Engine = k - break - } - } - newInstance.LicenseModel = rightSizedInstanceRow.LicenseModel - - recommended = &entity.RightsizingAwsRds{ - Region: rightSizedInstanceRow.RegionCode, - InstanceType: rightSizedInstanceRow.InstanceType, - Engine: awsRdsDbTypeToAPIDbType(rightSizedInstanceRow.DatabaseEngine, rightSizedInstanceRow.DatabaseEdition), - EngineVersion: newInstance.EngineVersion, - ClusterType: newInstance.ClusterType, - Architecture: rightSizedInstanceRow.ProcessorArchitecture, - Processor: rightSizedInstanceRow.PhysicalProcessor, - VCPU: int64(rightSizedInstanceRow.VCpu), - MemoryGb: int64(rightSizedInstanceRow.MemoryGb), - Cost: 0, - ComputeCost: 0, - StorageCost: 0, - } - if rightSizedStorageRow == nil { - recommended.StorageType = newInstance.StorageType - recommended.StorageSize = newInstance.StorageSize - recommended.StorageIops = newInstance.StorageIops - recommended.StorageThroughput = newInstance.StorageThroughput - } - } else { - newInstance = rdsInstance - } - if rightSizedStorageRow != nil { - if recommended == nil { - recommended = &entity.RightsizingAwsRds{ - Region: region, - InstanceType: currentInstanceRow.InstanceType, - Engine: awsRdsDbTypeToAPIDbType(currentInstanceRow.DatabaseEngine, currentInstanceRow.DatabaseEdition), - EngineVersion: rdsInstance.EngineVersion, - ClusterType: rdsInstance.ClusterType, - Architecture: currentInstanceRow.ProcessorArchitecture, - Processor: currentInstanceRow.PhysicalProcessor, - VCPU: int64(currentInstanceRow.VCpu), - MemoryGb: int64(currentInstanceRow.MemoryGb), - Cost: currentComputeCost + currentStorageCost, - ComputeCost: currentComputeCost, - StorageCost: currentStorageCost, - } - } - ebsType := model.RDSDBStorageVolumeTypeToEBSType[rightSizedStorageRow.VolumeType] - recommended.StorageType = &ebsType - newInstance.StorageType = &ebsType - - recommended.StorageSize = &neededStorageSize - newInstance.StorageSize = &neededStorageSize - - if ebsType == "io1" || ebsType == "io2" || ebsType == "gp3" { - recommended.StorageIops = &neededStorageIops - newInstance.StorageIops = &neededStorageIops - } else { - recommended.StorageIops = nil - newInstance.StorageIops = nil - } - if ebsType == "gp3" { - recommended.StorageThroughput = &neededStorageThroughputMB - newInstance.StorageThroughput = &neededStorageThroughputMB - } else { - recommended.StorageThroughput = nil - newInstance.StorageThroughput = nil - } - } - - if recommended != nil { - if rightSizedInstanceRow != nil { - recommendedComputeCost, recommendedComputeCostComponents, err := s.costSvc.GetRDSComputeCost(ctx, region, newInstance, metrics) - if err != nil { - s.logger.Error("failed to get rds instance cost", zap.Error(err)) - return nil, err - } - recommended.ComputeCost = recommendedComputeCost - recommended.ComputeCostComponents = recommendedComputeCostComponents - } - - recommendedStorageCost, recommendedStorageCostComponents, err := s.costSvc.GetRDSStorageCost(ctx, region, newInstance, metrics) - if err != nil { - s.logger.Error("failed to get rds instance cost", zap.Error(err)) - return nil, err - } - recommended.StorageCost = recommendedStorageCost - recommended.StorageCostComponents = recommendedStorageCostComponents - - costComponents := make(map[string]float64) - for k, v := range recommended.ComputeCostComponents { - costComponents[k] = v - } - for k, v := range recommended.StorageCostComponents { - costComponents[k] = v - } - - recommended.Cost = recommended.ComputeCost + recommended.StorageCost - recommended.CostComponents = costComponents - } - - recommendation := entity.AwsRdsRightsizingRecommendation{ - Current: current, - Recommended: recommended, - - VCPU: usageCpuPercent, - StorageIops: usageStorageIops, - FreeMemoryBytes: usageFreeMemoryBytes, - NetworkThroughputBytes: usageNetworkThroughputBytes, - FreeStorageBytes: usageFreeStorageBytes, - StorageThroughput: usageStorageThroughputBytes, - VolumeBytesUsed: usageVolumeBytesUsed, - - Description: "", - } - - if preferences["ExcludeUpsizingFeature"] != nil { - if *preferences["ExcludeUpsizingFeature"] == "Yes" { - if recommendation.Recommended != nil && recommendation.Recommended.Cost > recommendation.Current.Cost { - recommendation.Recommended = &recommendation.Current - recommendation.Description += "\nNo recommendation available as upsizing feature is disabled" - return &recommendation, nil - } - } - } - - var computeDescription, storageDescription string - if rightSizedInstanceRow != nil { - computeDescription, err = s.generateRdsInstanceComputeDescription(ctx, rdsInstance, region, ¤tInstanceRow, - rightSizedInstanceRow, metrics, excluedBurstable, preferences, neededVCPU, neededMemoryGb, neededNetworkThroughput, usageAverageType) - if err != nil { - s.logger.Error("failed to generate rds instance compute description", zap.Error(err)) - } - } - if rightSizedStorageRow != nil && recommended != nil { - storageDescription, err = s.generateRdsInstanceStorageDescription(ctx, rdsInstance, region, - *rdsInstance.StorageType, rdsInstance.StorageSize, rdsInstance.StorageIops, rdsInstance.StorageThroughput, - *recommended.StorageType, recommended.StorageSize, recommended.StorageIops, recommended.StorageThroughput, metrics, - preferences, neededStorageSize, neededStorageIops, neededStorageThroughputMB, usageAverageType) - if err != nil { - s.logger.Error("failed to generate rds storage compute description", zap.Error(err)) - } - storageDescription += "\nStorage cost break down: " + costBreakdown - } - - recommendation.Description = strings.TrimSpace(computeDescription + "\n" + storageDescription) - return &recommendation, nil -} - -func extractFromRdsInstance(instance entity.AwsRds, i model.RDSDBInstance, region string, k string) any { - switch k { - case "Region": - return region - case "vCPU": - return i.VCpu - case "MemoryGB": - return i.MemoryGb - case "InstanceType": - return instance.InstanceType - case "Engine": - return instance.Engine - case "ClusterType": - return instance.ClusterType - case "StorageType": - return instance.StorageType - case "InstanceFamily": - return i.InstanceFamily - case "LicenseModel": - return i.LicenseModel - } - return "" -} - -func (s *Service) generateRdsInstanceComputeDescription(ctx context.Context, rdsInstance entity.AwsRds, region string, currentInstanceType, - rightSizedInstanceType *model.RDSDBInstance, metrics map[string][]types2.Datapoint, excludeBurstable bool, - preferences map[string]*string, neededCPU, neededMemory, neededNetworkThroughput float64, usageAverageType UsageAverageType) (string, error) { - usageCpuPercent := extractUsage(metrics["CPUUtilization"], usageAverageType) - usageFreeMemoryBytes := extractUsage(metrics["FreeableMemory"], usageAverageType) - usageNetworkThroughputBytes := extractUsage(sumMergeDatapoints(metrics["NetworkReceiveThroughput"], metrics["NetworkTransmitThroughput"]), usageAverageType) - - usage := fmt.Sprintf("- %s has %.1f vCPUs. Usage over the course of last week is ", currentInstanceType.InstanceType, currentInstanceType.VCpu) - if usageCpuPercent.Min == nil && usageCpuPercent.Avg == nil && usageCpuPercent.Max == nil { - usage += "not available." - } else { - if usageCpuPercent.Min != nil { - usage += fmt.Sprintf("min=%.2f%%, ", *usageCpuPercent.Min) - } - if usageCpuPercent.Avg != nil { - usage += fmt.Sprintf("avg=%.2f%%, ", *usageCpuPercent.Avg) - } - if usageCpuPercent.Max != nil { - usage += fmt.Sprintf("max=%.2f%%, ", *usageCpuPercent.Max) - } - usage += fmt.Sprintf("so you only need %.1f vCPUs. %s has %d vCPUs.\n", neededCPU, rightSizedInstanceType.InstanceType, int32(rightSizedInstanceType.VCpu)) - } - - usage += fmt.Sprintf("- %s has %.1fGB Memory. Free Memory over the course of last week is ", currentInstanceType.InstanceType, currentInstanceType.MemoryGb) - if usageFreeMemoryBytes.Min == nil && usageFreeMemoryBytes.Avg == nil && usageFreeMemoryBytes.Max == nil { - usage += "not available." - } else { - if usageFreeMemoryBytes.Min != nil { - usage += fmt.Sprintf("min=%.2fGB, ", *usageFreeMemoryBytes.Min/(1024.0*1024.0*1024.0)) - } - if usageFreeMemoryBytes.Avg != nil { - usage += fmt.Sprintf("avg=%.2fGB, ", *usageFreeMemoryBytes.Avg/(1024.0*1024.0*1024.0)) - } - if usageFreeMemoryBytes.Max != nil { - usage += fmt.Sprintf("max=%.2fGB, ", *usageFreeMemoryBytes.Max/(1024.0*1024.0*1024.0)) - } - usage += fmt.Sprintf("so you only need %.1fGB Memory. %s has %.1fGB Memory.\n", neededMemory, rightSizedInstanceType.InstanceType, rightSizedInstanceType.MemoryGb) - } - - usage += fmt.Sprintf("- %s's network performance is %s. Throughput over the course of last week is ", currentInstanceType.InstanceType, currentInstanceType.NetworkPerformance) - if usageNetworkThroughputBytes.Min == nil && usageNetworkThroughputBytes.Avg == nil && usageNetworkThroughputBytes.Max == nil { - usage += "not available." - } else { - if usageNetworkThroughputBytes.Min != nil { - usage += fmt.Sprintf("min=%.2fMB, ", *usageNetworkThroughputBytes.Min/(1024*1024)) - } - if usageNetworkThroughputBytes.Avg != nil { - usage += fmt.Sprintf("avg=%.2fMB, ", *usageNetworkThroughputBytes.Avg/(1024*1024)) - } - if usageNetworkThroughputBytes.Max != nil { - usage += fmt.Sprintf("max=%.2fMB, ", *usageNetworkThroughputBytes.Max/(1024*1024)) - } - usage += fmt.Sprintf("so you only need %.2fMB Throughput. %s has %s Throughput.\n", neededNetworkThroughput/(1024.0*1024.0), rightSizedInstanceType.InstanceType, rightSizedInstanceType.NetworkPerformance) - } - - needs := "" - for k, v := range preferences { - if _, ok := aws_rds.PreferenceInstanceDBKey[k]; !ok { - continue - } - if aws_rds.PreferenceInstanceDBKey[k] == "" { - continue - } - if v == nil { - vl := extractFromRdsInstance(rdsInstance, *currentInstanceType, region, k) - needs += fmt.Sprintf("- You asked %s to be same as the current instance value which is %v\n", k, vl) - } else { - needs += fmt.Sprintf("- You asked %s to be %s\n", k, *v) - } - } - - prompt := fmt.Sprintf(` -I'm giving recommendation on aws rds db instance right sizing. Based on user's usage and needs I have concluded that the best option for them is to use %s instead of %s. I need help summarizing the explanation into 280 characters (it's not a tweet! dont use hashtag!) while keeping these rules: -- mention the requirements from user side. -- for those fields which are changing make sure you mention the change. - -Here's usage data: -%s - -User's needs: -%s -`, rightSizedInstanceType.InstanceType, currentInstanceType.InstanceType, usage, needs) - if excludeBurstable { - prompt += "\nBurstable instances are excluded." - } - resp, err := s.openaiSvc.CreateChatCompletion( - ctx, - openai.ChatCompletionRequest{ - Model: openai.GPT4TurboPreview, - Messages: []openai.ChatCompletionMessage{ - { - Role: openai.ChatMessageRoleUser, - Content: prompt, - }, - }, - }, - ) - - if err != nil { - return "", err - } - - if len(resp.Choices) == 0 { - return "", errors.New("empty choices") - } - return strings.TrimSpace(resp.Choices[0].Message.Content), nil -} - -func (s *Service) generateRdsInstanceStorageDescription(ctx context.Context, rdsInstance entity.AwsRds, region string, - currStorageType string, currStorageSize *int32, currStorageIops *int32, currStorageThroughput *float64, - recStorageType string, recStorageSize *int32, recStorageIops *int32, recStorageThroughput *float64, metrics map[string][]types2.Datapoint, - preferences map[string]*string, neededStorageSize int32, neededStorageIops int32, neededStorageThroughputMB float64, usageAverageType UsageAverageType) (string, error) { - usageFreeStorageBytes := extractUsage(metrics["FreeStorageSpace"], usageAverageType) - usageStorageIops := extractUsage(sumMergeDatapoints(metrics["ReadIOPS"], metrics["WriteIOPS"]), usageAverageType) - usageStorageThroughputBytes := extractUsage(sumMergeDatapoints(metrics["ReadThroughput"], metrics["WriteThroughput"]), usageAverageType) - usageStorageThroughputMB := entity.Usage{ - Avg: funcP(usageStorageThroughputBytes.Avg, usageStorageThroughputBytes.Avg, func(a, _ float64) float64 { return a / (1024 * 1024) }), - Min: funcP(usageStorageThroughputBytes.Min, usageStorageThroughputBytes.Min, func(a, _ float64) float64 { return a / (1024 * 1024) }), - Max: funcP(usageStorageThroughputBytes.Max, usageStorageThroughputBytes.Max, func(a, _ float64) float64 { return a / (1024 * 1024) }), - } - usageVolumeBytesUsed := extractUsage(metrics["VolumeBytesUsed"], usageAverageType) - - var usage string - if strings.Contains(strings.ToLower(rdsInstance.Engine), "aurora") { - if currStorageSize != nil && recStorageSize != nil && *currStorageSize != 0 && *recStorageSize != 0 { - usage += fmt.Sprintf("- %s has %dGB Storage. Usage over the course of last week is ", currStorageType, *currStorageSize) - if usageFreeStorageBytes.Max != nil { - usage += fmt.Sprintf("min=%.2fGB, ", float64(*currStorageSize)-*usageFreeStorageBytes.Max/(1024*1024*1024)) - } - if usageFreeStorageBytes.Avg != nil { - usage += fmt.Sprintf("avg=%.2fGB, ", float64(*currStorageSize)-*usageFreeStorageBytes.Avg/(1024*1024*1024)) - } - if usageFreeStorageBytes.Min != nil { - usage += fmt.Sprintf("max=%.2fGB, ", float64(*currStorageSize)-*usageFreeStorageBytes.Min/(1024*1024*1024)) - } - usage += fmt.Sprintf("so you only need %dGB Storage. %s has %dGB Storage.\n", neededStorageSize, recStorageType, recStorageSize) - } - } else { - if currStorageSize != nil && recStorageSize != nil && *currStorageSize != 0 && *recStorageSize != 0 { - usage += fmt.Sprintf("- %s has %dGB Storage. Usage over the course of last week is ", currStorageType, *currStorageSize) - if usageVolumeBytesUsed.Min != nil { - usage += fmt.Sprintf("min=%.2fGB, ", *usageVolumeBytesUsed.Min/(1024*1024*1024)) - } - if usageVolumeBytesUsed.Avg != nil { - usage += fmt.Sprintf("avg=%.2fGB, ", *usageVolumeBytesUsed.Avg/(1024*1024*1024)) - } - if usageVolumeBytesUsed.Max != nil { - usage += fmt.Sprintf("max=%.2fGB, ", *usageVolumeBytesUsed.Max/(1024*1024*1024)) - } - usage += fmt.Sprintf("so you only need %dGB Storage. %s has %dGB Storage.\n", neededStorageSize, recStorageType, recStorageSize) - } - } - if currStorageIops != nil && recStorageIops != nil && *currStorageIops != 0 && *recStorageIops != 0 { - if getValueOrZero(usageStorageIops.Min) == 0 && getValueOrZero(usageStorageIops.Avg) == 0 && getValueOrZero(usageStorageIops.Max) == 0 { - usage += fmt.Sprintf("- %s has %d IOPS. Usage over the course of last week is ", currStorageType, getValueOrZero(currStorageIops)) - if usageStorageIops.Min != nil { - usage += fmt.Sprintf("min=%.2f, ", *usageStorageIops.Min) - } - if usageStorageIops.Avg != nil { - usage += fmt.Sprintf("avg=%.2f, ", *usageStorageIops.Avg) - } - if usageStorageIops.Max != nil { - usage += fmt.Sprintf("max=%.2f, ", *usageStorageIops.Max) - } - usage += fmt.Sprintf("so you only need %d io/s. %s has %d IOPS.\n", neededStorageIops, recStorageType, recStorageIops) - } else { - usage += fmt.Sprintf("- %s has %d IOPS. Usage data is not available. you need %d io/s. %s has %d IOPS.\n", currStorageType, getValueOrZero(currStorageIops), neededStorageIops, recStorageType, recStorageIops) - } - } - if currStorageThroughput != nil && recStorageThroughput != nil && *currStorageThroughput != 0 && *recStorageThroughput != 0 { - if getValueOrZero(usageStorageThroughputMB.Min) == 0 && getValueOrZero(usageStorageThroughputMB.Avg) == 0 && getValueOrZero(usageStorageThroughputMB.Max) == 0 { - usage += fmt.Sprintf("- %s has %.1fMB Throughput. Usage over the course of last week is ", currStorageType, *currStorageThroughput) - if usageStorageThroughputMB.Min != nil { - usage += fmt.Sprintf("min=%.2fMB, ", *usageStorageThroughputMB.Min) - } - if usageStorageThroughputMB.Avg != nil { - usage += fmt.Sprintf("avg=%.2fMB, ", *usageStorageThroughputMB.Avg) - } - if usageStorageThroughputMB.Max != nil { - usage += fmt.Sprintf("max=%.2fMB, ", *usageStorageThroughputMB.Max) - } - usage += fmt.Sprintf("so you only need %.2f MB. %s has %.2fMB Throughput.\n", neededStorageThroughputMB, recStorageType, *recStorageThroughput) - } else { - usage += fmt.Sprintf("- %s has %.1fMB Throughput. Usage data is not available. you only need %.2f MB. %s has %.2fMB Throughput.\n", currStorageType, getValueOrZero(currStorageThroughput), neededStorageThroughputMB, recStorageType, getValueOrZero(recStorageThroughput)) - } - } - - needs := "" - for k, v := range preferences { - if _, ok := aws_rds.PreferenceStorageDBKey[k]; !ok { - continue - } - if aws_rds.PreferenceStorageDBKey[k] == "" { - continue - } - if v == nil { - vl := extractFromRdsInstance(rdsInstance, model.RDSDBInstance{}, region, k) - needs += fmt.Sprintf("- You asked %s to be same as the current instance value which is %v\n", k, vl) - } else { - needs += fmt.Sprintf("- You asked %s to be %s\n", k, *v) - } - } - - prompt := fmt.Sprintf(` -I'm giving recommendation on aws rds db instance storage right sizing. Based on user's usage and needs I have concluded that the best option for them is to use %s instead of %s. I need help summarizing the explanation into 280 characters (it's not a tweet! dont use hashtag!) while keeping these rules: -- mention the requirements from user side. -- for those fields which are changing make sure you mention the change. - -Here's usage data: -%s - -User's needs: -%s -`, recStorageType, currStorageType, usage, needs) - resp, err := s.openaiSvc.CreateChatCompletion( - ctx, - openai.ChatCompletionRequest{ - Model: openai.GPT4TurboPreview, - Messages: []openai.ChatCompletionMessage{ - { - Role: openai.ChatMessageRoleUser, - Content: prompt, - }, - }, - }, - ) - - if err != nil { - return "", err - } - - if len(resp.Choices) == 0 { - return "", errors.New("empty choices") - } - return strings.TrimSpace(resp.Choices[0].Message.Content), nil -} diff --git a/services/wastage/recommendation/gcp_compute_instance.go b/services/wastage/recommendation/gcp_compute_instance.go deleted file mode 100644 index 3ef5dc1bb..000000000 --- a/services/wastage/recommendation/gcp_compute_instance.go +++ /dev/null @@ -1,911 +0,0 @@ -package recommendation - -import ( - "context" - "errors" - "fmt" - "github.com/opengovern/opengovernance/services/wastage/db/model" - "github.com/opengovern/opengovernance/services/wastage/recommendation/preferences/gcp_compute" - gcp "github.com/opengovern/plugin-gcp/plugin/proto/src/golang/gcp" - "github.com/sashabaranov/go-openai" - "go.uber.org/zap" - "google.golang.org/protobuf/types/known/wrapperspb" - "regexp" - "strconv" - "strings" -) - -func (s *Service) GCPComputeInstanceRecommendation( - ctx context.Context, - instance gcp.GcpComputeInstance, - metrics map[string]*gcp.Metric, - preferences map[string]*wrapperspb.StringValue, -) (*gcp.GcpComputeInstanceRightsizingRecommendation, *model.GCPComputeMachineType, *model.GCPComputeMachineType, error) { - var machine *model.GCPComputeMachineType - var err error - - if instance.MachineType == "" { - return nil, nil, nil, fmt.Errorf("no machine type provided") - } - if strings.Contains(instance.MachineType, "custom") { - machine, err = s.extractCustomInstanceDetails(instance) - } else { - machine, err = s.gcpComputeMachineTypeRepo.Get(instance.MachineType) - if err != nil { - return nil, nil, nil, err - } - } - currentCost, currLicenseCost, err := s.costSvc.GetGCPComputeInstanceCost(ctx, instance) - if err != nil { - return nil, nil, nil, err - } - - region := strings.Join([]string{strings.Split(instance.Zone, "-")[0], strings.Split(instance.Zone, "-")[1]}, "-") - - result := gcp.GcpComputeInstanceRightsizingRecommendation{ - Current: &gcp.RightsizingGcpComputeInstance{ - Zone: instance.Zone, - Region: region, - MachineType: instance.MachineType, - MachineFamily: machine.MachineFamily, - Cpu: machine.GuestCpus, - MemoryMb: machine.MemoryMb, - Preemptible: instance.Preemptible, - - Cost: currentCost, - OsLicenseCost: currLicenseCost, - }, - } - - if v, ok := metrics["cpuUtilization"]; !ok || v == nil { - return nil, nil, nil, fmt.Errorf("cpuUtilization metric not found") - } - if v, ok := metrics["memoryUtilization"]; !ok || v == nil { - return nil, nil, nil, fmt.Errorf("memoryUtilization metric not found") - } - cpuUsage := extractGCPUsage(metrics["cpuUtilization"].Data) - memoryUsage := extractGCPUsage(metrics["memoryUtilization"].Data) - - result.Cpu = &cpuUsage - result.Memory = &memoryUsage - - vCPU := machine.GuestCpus - cpuBreathingRoom := int64(0) - if preferences["CPUBreathingRoom"] != nil { - cpuBreathingRoom, _ = strconv.ParseInt(preferences["CPUBreathingRoom"].GetValue(), 10, 64) - } - memoryBreathingRoom := int64(0) - if preferences["MemoryBreathingRoom"] != nil { - memoryBreathingRoom, _ = strconv.ParseInt(preferences["MemoryBreathingRoom"].GetValue(), 10, 64) - } - neededCPU := float64(vCPU) * (PWrapperDouble(cpuUsage.Avg) + (float64(cpuBreathingRoom) / 100.0)) - if neededCPU < 2 { - neededCPU = 2 - } - - neededMemoryMb := 0.0 - if memoryUsage.Avg != nil { - neededMemoryMb = calculateHeadroom(PWrapperDouble(memoryUsage.Avg)/(1024*1024), memoryBreathingRoom) - } - if neededMemoryMb < 1024 { - neededMemoryMb = 1024 - } - - pref := make(map[string]any) - - for k, v := range preferences { - var vl any - if v == nil || v.GetValue() == "" { - vl = extractFromGCPComputeInstance(region, machine, k) - } else { - vl = v.GetValue() - } - if _, ok := gcp_compute.PreferenceInstanceKey[k]; !ok { - continue - } - - cond := "=" - if sc, ok := gcp_compute.PreferenceInstanceSpecialCond[k]; ok { - cond = sc - } - if k == "MemoryGB" { - vl = int64(vl.(float64) * 1024) - } - if k == "MachineFamily" { - if vl == "custom" { - continue - } - } - pref[fmt.Sprintf("%s %s ?", gcp_compute.PreferenceInstanceKey[k], cond)] = vl - } - - var preemptible bool - if preferences["ProvisioningModel"] == nil || preferences["ProvisioningModel"].GetValue() == "" { - if instance.Preemptible { - preemptible = true - } else { - preemptible = false - } - } else { - if preferences["ProvisioningModel"].GetValue() == "Spot" { - preemptible = true - } else { - preemptible = false - } - } - pref["preemptible = ?"] = preemptible - - suggestedMachineType, err := s.gcpComputeMachineTypeRepo.GetCheapestByCoreAndMemory(neededCPU, neededMemoryMb, pref) - if err != nil { - return nil, nil, nil, err - } - - excludeCustom := false - if preferences["ExcludeCustomInstances"] != nil { - if preferences["ExcludeCustomInstances"].GetValue() == "Yes" { - excludeCustom = true - } - } - - if suggestedMachineType != nil { - instance.Zone = suggestedMachineType.Zone - instance.MachineType = suggestedMachineType.Name - instance.Preemptible = preemptible - suggestedCost, suggLicenseCost, err := s.costSvc.GetGCPComputeInstanceCost(ctx, instance) - if err != nil { - return nil, nil, nil, err - } - - if !excludeCustom { - customMachines, err := s.checkCustomMachines(ctx, region, int64(neededCPU), int64(neededMemoryMb), preferences) - if err != nil { - return nil, nil, nil, err - } - for _, customMachine := range customMachines { - if customMachine.Cost < suggestedCost { - suggestedMachineType = &customMachine.MachineType - suggestedCost = customMachine.Cost - } - } - } - - result.Recommended = &gcp.RightsizingGcpComputeInstance{ - Zone: suggestedMachineType.Zone, - Region: suggestedMachineType.Region, - MachineType: suggestedMachineType.Name, - MachineFamily: suggestedMachineType.MachineFamily, - Cpu: suggestedMachineType.GuestCpus, - MemoryMb: suggestedMachineType.MemoryMb, - Preemptible: preemptible, - - Cost: suggestedCost, - OsLicenseCost: suggLicenseCost, - } - } else if !excludeCustom { - customMachines, err := s.checkCustomMachines(ctx, region, int64(neededCPU), int64(neededMemoryMb), preferences) - if err != nil { - return nil, nil, nil, err - } - suggestedMachineType = machine - suggestedCost := currentCost - - for _, customMachine := range customMachines { - if customMachine.Cost < suggestedCost { - suggestedMachineType = &customMachine.MachineType - suggestedCost = customMachine.Cost - } - } - - result.Recommended = &gcp.RightsizingGcpComputeInstance{ - Zone: suggestedMachineType.Zone, - Region: suggestedMachineType.Region, - MachineType: suggestedMachineType.Name, - MachineFamily: suggestedMachineType.MachineFamily, - Cpu: suggestedMachineType.GuestCpus, - MemoryMb: suggestedMachineType.MemoryMb, - Preemptible: preemptible, - - Cost: suggestedCost, - } - } - if suggestedMachineType == nil { - suggestedMachineType = machine - } - - description, err := s.generateGcpComputeInstanceDescription(ctx, region, instance, metrics, preferences, neededCPU, - neededMemoryMb, machine, suggestedMachineType) - if err != nil { - s.logger.Error("Failed to generate description", zap.Error(err)) - } else { - result.Description = description - } - - if preferences["ExcludeUpsizingFeature"] != nil { - if preferences["ExcludeUpsizingFeature"].GetValue() == "Yes" { - if result.Recommended != nil && result.Recommended.Cost > result.Current.Cost { - result.Recommended = result.Current - result.Description = "No recommendation available as upsizing feature is disabled" - return &result, machine, machine, nil - } - } - } - - return &result, machine, suggestedMachineType, nil -} - -func (s *Service) GCPComputeDiskRecommendation( - ctx context.Context, - disk gcp.GcpComputeDisk, - currentMachine *model.GCPComputeMachineType, - recommendedMachine *model.GCPComputeMachineType, - metrics gcp.DiskMetrics, - preferences map[string]*wrapperspb.StringValue, -) (*gcp.GcpComputeDiskRecommendation, error) { - currentCost, err := s.costSvc.GetGCPComputeDiskCost(ctx, disk) - if err != nil { - return nil, err - } - - readIopsUsage := extractGCPUsage(metrics.Metrics["DiskReadIOPS"].Data) - writeIopsUsage := extractGCPUsage(metrics.Metrics["DiskWriteIOPS"].Data) - readThroughputUsageBytes := extractGCPUsage(metrics.Metrics["DiskReadThroughput"].Data) - readThroughputUsageMb := gcp.Usage{ - Avg: funcPWrapper(readThroughputUsageBytes.Avg, readThroughputUsageBytes.Avg, func(a, _ float64) float64 { return a / (1024 * 1024) }), - Min: funcPWrapper(readThroughputUsageBytes.Min, readThroughputUsageBytes.Min, func(a, _ float64) float64 { return a / (1024 * 1024) }), - Max: funcPWrapper(readThroughputUsageBytes.Max, readThroughputUsageBytes.Max, func(a, _ float64) float64 { return a / (1024 * 1024) }), - } - writeThroughputUsageBytes := extractGCPUsage(metrics.Metrics["DiskWriteThroughput"].Data) - writeThroughputUsageMb := gcp.Usage{ - Avg: funcPWrapper(writeThroughputUsageBytes.Avg, writeThroughputUsageBytes.Avg, func(a, _ float64) float64 { return a / (1024 * 1024) }), - Min: funcPWrapper(writeThroughputUsageBytes.Min, writeThroughputUsageBytes.Min, func(a, _ float64) float64 { return a / (1024 * 1024) }), - Max: funcPWrapper(writeThroughputUsageBytes.Max, writeThroughputUsageBytes.Max, func(a, _ float64) float64 { return a / (1024 * 1024) }), - } - - readIopsLimit, writeIopsLimit, readThroughputLimit, writeThroughputLimit, err := s.getMaximums(currentMachine.MachineFamily, - currentMachine.MachineType, disk.DiskType, currentMachine.GuestCpus, disk.DiskSize.Value) - if err != nil { - return nil, err - } - - result := gcp.GcpComputeDiskRecommendation{ - Current: &gcp.RightsizingGcpComputeDisk{ - DiskType: disk.DiskType, - DiskSize: disk.DiskSize.Value, - ReadIopsLimit: readIopsLimit, - WriteIopsLimit: writeIopsLimit, - ReadThroughputLimit: readThroughputLimit, - WriteThroughputLimit: writeThroughputLimit, - - Zone: disk.Zone, - Region: disk.Region, - - Cost: currentCost, - }, - ReadIops: &readIopsUsage, - WriteIops: &writeIopsUsage, - ReadThroughput: &readThroughputUsageMb, - WriteThroughput: &writeThroughputUsageMb, - } - - iopsBreathingRoom := int64(0) - if preferences["IOPSBreathingRoom"] != nil { - iopsBreathingRoom, _ = strconv.ParseInt(preferences["IopsBreathingRoom"].GetValue(), 10, 64) - } - - throughputBreathingRoom := int64(0) - if preferences["ThroughputBreathingRoom"] != nil { - throughputBreathingRoom, _ = strconv.ParseInt(preferences["ThroughputBreathingRoom"].GetValue(), 10, 64) - } - - neededReadIops := pWrapperCalculateHeadroom(readIopsUsage.Avg, iopsBreathingRoom) - neededReadThroughput := pWrapperCalculateHeadroom(readThroughputUsageMb.Avg, throughputBreathingRoom) - neededWriteIops := pWrapperCalculateHeadroom(writeIopsUsage.Avg, iopsBreathingRoom) - neededWriteThroughput := pWrapperCalculateHeadroom(writeThroughputUsageMb.Avg, throughputBreathingRoom) - - pref := make(map[string]any) - - diskSize := disk.DiskSize.Value - if ds, ok := preferences["DiskSizeGb"]; ok { - if ds != nil { - diskSize, _ = strconv.ParseInt(ds.GetValue(), 10, 64) - } - } - - suggestions, err := s.findCheapestDiskType(recommendedMachine.MachineFamily, recommendedMachine.MachineType, recommendedMachine.GuestCpus, - neededReadIops, neededWriteIops, neededReadThroughput, neededWriteThroughput, diskSize) - if err != nil { - return nil, err - } - - var suggestedType *string - var suggestedSize *int64 - - if suggestions != nil && len(suggestions) > 0 { - for i, _ := range suggestions { - newDisk := gcp.GcpComputeDisk{ - Id: disk.Id, - Zone: disk.Zone, - Region: disk.Region, - DiskType: suggestions[i].Type, - DiskSize: wrapperspb.Int64(suggestions[i].Size), - } - suggestedCost, err := s.costSvc.GetGCPComputeDiskCost(ctx, newDisk) - if err != nil { - return nil, err - } - suggestions[i].Cost = &suggestedCost - } - s.logger.Info("Disk suggestions", zap.Any("suggestions", suggestions)) - minPriceSuggestion := suggestions[0] - for _, sug := range suggestions { - if _, ok := preferences["DiskSizeGb"]; ok { - if diskSize != minPriceSuggestion.Size { - continue - } - } - if *sug.Cost < *minPriceSuggestion.Cost { - minPriceSuggestion = sug - } - } - suggestedType = &minPriceSuggestion.Type - suggestedSize = &minPriceSuggestion.Size - } - - if suggestedType == nil && suggestedSize == nil { - suggestedType = &disk.DiskType - suggestedSize = &disk.DiskSize.Value - } - - pref["storage_type = ?"] = suggestedType - - for k, v := range preferences { - var vl any - if v == nil || v.GetValue() == "" { - vl = extractFromGCPComputeDisk(disk, k) - } else { - vl = v.GetValue() - } - if _, ok := gcp_compute.PreferenceDiskKey[k]; !ok { - continue - } - - cond := "=" - - pref[fmt.Sprintf("%s %s ?", gcp_compute.PreferenceDiskKey[k], cond)] = vl - } - - suggestedStorageType, err := s.gcpComputeDiskTypeRepo.GetCheapest(pref) - if err != nil { - return nil, err - } - recommendedReadIopsLimit, recommendedWriteIopsLimit, recommendedReadThroughputLimit, recommendedWriteThroughputLimit, err := s.getMaximums(recommendedMachine.MachineFamily, - recommendedMachine.MachineType, suggestedStorageType.StorageType, recommendedMachine.GuestCpus, PWrapperInt64(disk.DiskSize)) - if err != nil { - return nil, err - } - - s.logger.Info("calc price debug --- before checking suggestedStorageType", zap.Any("suggestedStorageType", suggestedStorageType)) - if suggestedStorageType != nil { - disk.Zone = suggestedStorageType.Zone - disk.DiskType = *suggestedType - disk.Region = suggestedStorageType.Region - disk.DiskSize = PInt64Wrapper(suggestedSize) - - s.logger.Info("calc price debug --- before calculating cost", zap.Any("disk", disk), zap.Any("suggestedStorageType", suggestedStorageType)) - - suggestedCost, err := s.costSvc.GetGCPComputeDiskCost(ctx, disk) - if err != nil { - return nil, err - } - - s.logger.Info("calc price debug --- after calculating cost", zap.Any("suggestedCost", suggestedCost)) - - result.Recommended = &gcp.RightsizingGcpComputeDisk{ - Zone: suggestedStorageType.Zone, - Region: suggestedStorageType.Region, - DiskType: suggestedStorageType.StorageType, - DiskSize: disk.DiskSize.Value, - ReadIopsLimit: recommendedReadIopsLimit, - WriteIopsLimit: recommendedWriteIopsLimit, - ReadThroughputLimit: recommendedReadThroughputLimit, - WriteThroughputLimit: recommendedWriteThroughputLimit, - Cost: suggestedCost, - } - - s.logger.Info("calc price debug --- after setting result", zap.Any("result recommended", result.Recommended)) - } - - description, err := s.generateGcpComputeDiskDescription(ctx, disk, currentMachine, recommendedMachine, metrics, - preferences, readIopsLimit, writeIopsLimit, readThroughputLimit, writeThroughputLimit, neededReadIops, - neededWriteIops, neededReadThroughput, neededWriteThroughput, recommendedReadIopsLimit, recommendedWriteIopsLimit, - recommendedReadThroughputLimit, recommendedWriteThroughputLimit, *suggestedType, *suggestedSize) - if err != nil { - s.logger.Error("Failed to generate description", zap.Error(err)) - } else { - result.Description = description - } - - if preferences["ExcludeUpsizingFeature"] != nil { - if preferences["ExcludeUpsizingFeature"].GetValue() == "Yes" { - if result.Recommended != nil && result.Recommended.Cost > result.Current.Cost { - result.Recommended = result.Current - result.Description = "No recommendation available as upsizing feature is disabled" - return &result, nil - } - } - } - - return &result, nil -} - -func extractFromGCPComputeInstance(region string, machine *model.GCPComputeMachineType, k string) any { - switch k { - case "Region": - return region - case "vCPU": - return machine.GuestCpus - case "MemoryGB": - return machine.MemoryMb / 1024 - case "MachineFamily": - return machine.MachineFamily - case "MachineType": - return machine.MachineType - } - return "" -} - -func extractFromGCPComputeDisk(disk gcp.GcpComputeDisk, k string) any { - switch k { - case "Region": - return disk.Region - case "DiskType": - return disk.DiskType - } - return "" -} - -func (s *Service) extractCustomInstanceDetails(instance gcp.GcpComputeInstance) (*model.GCPComputeMachineType, error) { - re := regexp.MustCompile(`(\D.+)-(\d+)-(\d.+)`) - machineTypePrefix := re.ReplaceAllString(instance.MachineType, "$1") - strCPUAmount := re.ReplaceAllString(instance.MachineType, "$2") - strRAMAmount := re.ReplaceAllString(instance.MachineType, "$3") - - region := strings.Join([]string{strings.Split(instance.Zone, "-")[0], strings.Split(instance.Zone, "-")[1]}, "-") - cpu, err := strconv.ParseInt(strCPUAmount, 10, 64) - if err != nil { - return nil, err - } - memoryMb, err := strconv.ParseInt(strRAMAmount, 10, 64) - if err != nil { - return nil, err - } - - family := "custom" - if machineTypePrefix != "custom" { - family = strings.Split(machineTypePrefix, "-")[0] - } - - if family == "e2" { - return nil, fmt.Errorf("e2 instances are not supported") - } - - return &model.GCPComputeMachineType{ - Name: instance.MachineType, - MachineType: instance.MachineType, - MachineFamily: family, - GuestCpus: cpu, - MemoryMb: memoryMb, - Zone: instance.Zone, - Region: region, - Description: "", - ImageSpaceGb: 0, - - UnitPrice: 0, - }, nil -} - -func (s *Service) checkCustomMachines(ctx context.Context, region string, neededCpu, neededMemoryMb int64, preferences map[string]*wrapperspb.StringValue) ([]CustomOffer, error) { - if preferences["MemoryGB"] != nil && preferences["MemoryGB"].GetValue() != "" { - neededMemoryGb, _ := strconv.ParseInt(preferences["MemoryGB"].GetValue(), 10, 64) - neededMemoryMb = neededMemoryGb * 1024 - } - if preferences["vCPU"] != nil && preferences["vCPU"].GetValue() != "" { - neededCpu, _ = strconv.ParseInt(preferences["vCPU"].GetValue(), 10, 64) - } - - offers := make([]CustomOffer, 0) - if preferences["MachineFamily"] != nil && preferences["MachineFamily"].GetValue() != "" { - offer, err := s.checkCustomMachineForFamily(ctx, region, preferences["MachineFamily"].GetValue(), neededCpu, neededMemoryMb, preferences) - if err != nil { - return nil, err - } - if offer == nil { - return nil, fmt.Errorf("machine family does not have any custom machines") - } - return offer, nil - } - - if neededCpu <= 128 && neededMemoryMb <= 665600 { - n2Offer, err := s.checkCustomMachineForFamily(ctx, region, "n2", neededCpu, neededMemoryMb, preferences) - if err != nil { - return nil, err - } - offers = append(offers, n2Offer...) - } - if neededCpu <= 80 && neededMemoryMb <= 665600 { - n4Offer, err := s.checkCustomMachineForFamily(ctx, region, "n4", neededCpu, neededMemoryMb, preferences) - if err != nil { - return nil, err - } - offers = append(offers, n4Offer...) - } - if neededCpu <= 224 && neededMemoryMb <= 786432 { - n2dOffer, err := s.checkCustomMachineForFamily(ctx, region, "n2d", neededCpu, neededMemoryMb, preferences) - if err != nil { - return nil, err - } - offers = append(offers, n2dOffer...) - } - // TODO: add e2 custom machines - g2Offer, err := s.checkCustomMachineForFamily(ctx, region, "g2", neededCpu, neededMemoryMb, preferences) - if err != nil { - return nil, err - } - offers = append(offers, g2Offer...) - - s.logger.Info("custom machines", zap.Any("offers", offers)) - for _, offer := range offers { - s.logger.Info("custom machine info", zap.String("family", offer.Family), zap.Any("machineType", offer.MachineType), zap.Float64("cost", offer.Cost)) - } - - return offers, nil -} - -func (s *Service) checkCustomMachineForFamily(ctx context.Context, region, family string, neededCpu, neededMemoryMb int64, preferences map[string]*wrapperspb.StringValue) ([]CustomOffer, error) { - if neededCpu > 2 { - neededCpu = roundUpToMultipleOf(neededCpu, 4) - } - if family == "n2" || family == "n2d" { - neededMemoryMb = roundUpToMultipleOf(neededMemoryMb, 256) - if neededMemoryMb < neededCpu*512 { - neededMemoryMb = neededCpu * 512 - } - } else if family == "n4" { - neededMemoryMb = roundUpToMultipleOf(neededMemoryMb, 256) - if neededMemoryMb < neededCpu*2048 { - neededMemoryMb = neededCpu * 2048 - } - } else if family == "g2" { - neededMemoryMb = roundUpToMultipleOf(neededMemoryMb, 1024) - if neededMemoryMb < neededCpu*4096 { - neededMemoryMb = neededCpu * 4096 - } - } - - if neededMemoryMb > 8192*neededCpu { - neededCpu = roundUpToMultipleOf(neededMemoryMb, 8192) / 8192 - neededCpu = roundUpToMultipleOf(neededCpu, 4) - } - - pref := make(map[string]any) - for k, v := range preferences { - if k == "Region" { - if v != nil && v.GetValue() != "" { - pref["location = ?"] = *v - } else { - pref["location = ?"] = region - } - } - } - - var customOffers []CustomOffer - cpuSku, err := s.gcpComputeSKURepo.GetCheapestCustomCore(family, pref) - if err != nil { - return nil, err - } - if cpuSku == nil { - return nil, nil - } - memorySku, err := s.gcpComputeSKURepo.GetCheapestCustomRam(family, pref) - if err != nil { - return nil, err - } - if memorySku == nil { - return nil, nil - } - - machineType := fmt.Sprintf("%s-custom-%d-%d", family, neededCpu, neededMemoryMb) - - if memorySku.Location == cpuSku.Location { - cost, _, err := s.costSvc.GetGCPComputeInstanceCost(ctx, gcp.GcpComputeInstance{ - Id: "", - Zone: cpuSku.Location + "-a", - MachineType: machineType, - }) - if err != nil { - return nil, err - } - - return []CustomOffer{{ - Family: family, - MachineType: model.GCPComputeMachineType{ - Name: machineType, - MachineType: machineType, - MachineFamily: family, - GuestCpus: neededCpu, - MemoryMb: neededMemoryMb, - Zone: cpuSku.Location + "-a", - Region: cpuSku.Location, - }, - Cost: cost, - }}, nil - } - - cpuRegionCost, _, err := s.costSvc.GetGCPComputeInstanceCost(ctx, gcp.GcpComputeInstance{ - Id: "", - Zone: cpuSku.Location + "-a", - MachineType: machineType, - }) - if err != nil { - return nil, err - } - - customOffers = append(customOffers, CustomOffer{ - Family: family, - MachineType: model.GCPComputeMachineType{ - Name: machineType, - MachineType: machineType, - GuestCpus: neededCpu, - MemoryMb: neededMemoryMb, - Zone: cpuSku.Location + "-a", - Region: cpuSku.Location, - }, - Cost: cpuRegionCost, - }) - - memoryRegionCost, _, err := s.costSvc.GetGCPComputeInstanceCost(ctx, gcp.GcpComputeInstance{ - Id: "", - Zone: memorySku.Location + "-a", - MachineType: machineType, - }) - if err != nil { - return nil, err - } - - customOffers = append(customOffers, CustomOffer{ - Family: family, - MachineType: model.GCPComputeMachineType{ - Name: machineType, - MachineType: machineType, - GuestCpus: neededCpu, - MemoryMb: neededMemoryMb, - Zone: memorySku.Location + "-a", - Region: memorySku.Location, - }, - Cost: memoryRegionCost, - }) - - return customOffers, nil -} - -type CustomOffer struct { - Family string - MachineType model.GCPComputeMachineType - Cost float64 -} - -func roundUpToMultipleOf(number, multipleOf int64) int64 { - if number%multipleOf == 0 { - return number - } - return ((number / multipleOf) + 1) * multipleOf -} - -func (s *Service) generateGcpComputeInstanceDescription(ctx context.Context, region string, instance gcp.GcpComputeInstance, - metrics map[string]*gcp.Metric, preferences map[string]*wrapperspb.StringValue, - neededCpu, neededMemoryMb float64, currentMachine *model.GCPComputeMachineType, - suggestedMachineType *model.GCPComputeMachineType) (string, error) { - if v, ok := metrics["cpuUtilization"]; !ok || v == nil { - return "", fmt.Errorf("cpuUtilization metric not found") - } - if v, ok := metrics["memoryUtilization"]; !ok || v == nil { - return "", fmt.Errorf("memoryUtilization metric not found") - } - cpuUsage := extractGCPUsage(metrics["cpuUtilization"].Data) - memoryUsage := extractGCPUsage(metrics["memoryUtilization"].Data) - - var usage string - if len(metrics["cpuUtilization"].Data) > 0 { - usage = fmt.Sprintf("- %s has %d vCPUs. Usage over the course of last week is min=%.2f%%, avg=%.2f%%, max=%.2f%%, so you only need %.2f vCPUs. %s has %d vCPUs.\n", instance.MachineType, currentMachine.GuestCpus, PWrapperDouble(cpuUsage.Min), PWrapperDouble(cpuUsage.Avg), PWrapperDouble(cpuUsage.Max), neededCpu, suggestedMachineType.MachineType, suggestedMachineType.GuestCpus) - } else { - usage = fmt.Sprintf("- %s has %d vCPUs. Usage is not available. You need to install CloudWatch Agent on your instance to get this data. %s has %d vCPUs.\n", instance.MachineType, currentMachine.GuestCpus, suggestedMachineType.MachineType, suggestedMachineType.GuestCpus) - - } - if len(metrics["memoryUtilization"].Data) > 0 { - usage += fmt.Sprintf("- %s has %dMb Memory. Usage over the course of last week is min=%.2f%%, avg=%.2f%%, max=%.2f%%, so you only need %.2fMb Memory. %s has %dMb Memory.\n", instance.MachineType, currentMachine.MemoryMb, PWrapperDouble(memoryUsage.Min), PWrapperDouble(memoryUsage.Avg), PWrapperDouble(memoryUsage.Max), neededMemoryMb, suggestedMachineType.MachineType, suggestedMachineType.MemoryMb) - } else { - usage += fmt.Sprintf("- %s has %dMb Memory. Usage is not available. You need to install CloudWatch Agent on your instance to get this data. %s has %dMb Memory.\n", instance.MachineType, currentMachine.MemoryMb, suggestedMachineType.MachineType, suggestedMachineType.MemoryMb) - } - - needs := "" - for k, v := range preferences { - if gcp_compute.PreferenceInstanceKey[k] == "" { - continue - } - if v == nil { - vl := extractFromGCPComputeInstance(region, currentMachine, k) - needs += fmt.Sprintf("- You asked %s to be same as the current instance value which is %v\n", k, vl) - } else { - needs += fmt.Sprintf("- You asked %s to be %s\n", k, v.GetValue()) - } - } - - prompt := fmt.Sprintf(` -I'm giving recommendation on GCP Compute Instance right sizing. Based on user's usage and needs I have concluded that the best option for him is to use %s instead of %s. I need help summarizing the explanation into 280 characters (it's not a tweet! dont use hashtag!) while keeping these rules: -- mention the requirements from user side. -- for those fields which are changing make sure you mention the change. - -Here's usage data: -%s - -User's needs: -%s -`, suggestedMachineType.MachineType, currentMachine.MachineType, usage, needs) - - resp, err := s.openaiSvc.CreateChatCompletion( - ctx, - openai.ChatCompletionRequest{ - Model: openai.GPT4TurboPreview, - Messages: []openai.ChatCompletionMessage{ - { - Role: openai.ChatMessageRoleUser, - Content: prompt, - }, - }, - }, - ) - - if err != nil { - return "", err - } - - if len(resp.Choices) == 0 { - return "", errors.New("empty choices") - } - - s.logger.Info("GPT results", zap.String("prompt", prompt), zap.String("result", strings.TrimSpace(resp.Choices[0].Message.Content))) - - return strings.TrimSpace(resp.Choices[0].Message.Content), nil -} - -func (s *Service) generateGcpComputeDiskDescription(ctx context.Context, disk gcp.GcpComputeDisk, - currentMachine *model.GCPComputeMachineType, - recommendedMachine *model.GCPComputeMachineType, - metrics gcp.DiskMetrics, preferences map[string]*wrapperspb.StringValue, - readIopsLimit, writeIopsLimit int64, readThroughputLimit, writeThroughputLimit float64, - neededReadIops, neededWriteIops, neededReadThroughput, neededWriteThroughput float64, - recommendedReadIopsLimit, recommendedWriteIopsLimit int64, recommendedReadThroughputLimit, recommendedWriteThroughputLimit float64, - suggestedType string, suggestedSize int64, -) (string, error) { - if v, ok := metrics.Metrics["DiskReadIOPS"]; !ok || v == nil { - return "", fmt.Errorf("DiskReadIOPS metric not found") - } - if v, ok := metrics.Metrics["DiskWriteIOPS"]; !ok || v == nil { - return "", fmt.Errorf("DiskWriteIOPS metric not found") - } - if v, ok := metrics.Metrics["DiskReadThroughput"]; !ok || v == nil { - return "", fmt.Errorf("DiskReadThroughput metric not found") - } - if v, ok := metrics.Metrics["DiskWriteThroughput"]; !ok || v == nil { - return "", fmt.Errorf("DiskWriteThroughput metric not found") - } - readIopsUsage := extractGCPUsage(metrics.Metrics["DiskReadIOPS"].Data) - writeIopsUsage := extractGCPUsage(metrics.Metrics["DiskWriteIOPS"].Data) - readThroughputUsageBytes := extractGCPUsage(metrics.Metrics["DiskReadThroughput"].Data) - readThroughputUsageMb := gcp.Usage{ - Avg: funcPWrapper(readThroughputUsageBytes.Avg, readThroughputUsageBytes.Avg, func(a, _ float64) float64 { return a / (1024 * 1024) }), - Min: funcPWrapper(readThroughputUsageBytes.Min, readThroughputUsageBytes.Min, func(a, _ float64) float64 { return a / (1024 * 1024) }), - Max: funcPWrapper(readThroughputUsageBytes.Max, readThroughputUsageBytes.Max, func(a, _ float64) float64 { return a / (1024 * 1024) }), - } - writeThroughputUsageBytes := extractGCPUsage(metrics.Metrics["DiskWriteThroughput"].Data) - writeThroughputUsageMb := gcp.Usage{ - Avg: funcPWrapper(writeThroughputUsageBytes.Avg, writeThroughputUsageBytes.Avg, func(a, _ float64) float64 { return a / (1024 * 1024) }), - Min: funcPWrapper(writeThroughputUsageBytes.Min, writeThroughputUsageBytes.Min, func(a, _ float64) float64 { return a / (1024 * 1024) }), - Max: funcPWrapper(writeThroughputUsageBytes.Max, writeThroughputUsageBytes.Max, func(a, _ float64) float64 { return a / (1024 * 1024) }), - } - - var usage string - if len(metrics.Metrics["DiskReadIOPS"].Data) > 0 || len(metrics.Metrics["DiskWriteIOPS"].Data) > 0 { - usage = fmt.Sprintf("- Disk Type %s with Machine Type %s with size %d has %d Write IOPS estimation. Usage over the course of last week is min=%.2f%%, avg=%.2f%%, max=%.2f%%, so you only need %.1f Write IOPS estimation. Disk Type %s with Machine Type %s with size %d has %d Write IOPS estimation.\n", disk.DiskType, currentMachine.MachineType, PWrapperInt64(disk.DiskSize), writeIopsLimit, PWrapperDouble(writeIopsUsage.Min), PWrapperDouble(writeIopsUsage.Avg), PWrapperDouble(writeIopsUsage.Max), neededWriteIops, suggestedType, recommendedMachine.MachineType, suggestedSize, recommendedWriteIopsLimit) - usage += fmt.Sprintf("- Disk Type %s with Machine Type %s with size %d has %d Read IOPS estimation. Usage over the course of last week is min=%.2f%%, avg=%.2f%%, max=%.2f%%, so you only need %.1f Read IOPS estimation. Disk Type %s with Machine Type %s with size %d has %d Read IOPS estimation.\n", disk.DiskType, currentMachine.MachineType, PWrapperInt64(disk.DiskSize), readIopsLimit, PWrapperDouble(readIopsUsage.Min), PWrapperDouble(readIopsUsage.Avg), PWrapperDouble(readIopsUsage.Max), neededReadIops, suggestedType, recommendedMachine.MachineType, suggestedSize, recommendedReadIopsLimit) - } else { - usage = fmt.Sprintf("- Disk Type %s with Machine Type %s with size %d has %d Write IOPS estimation. Usage is not available. You need to install CloudWatch Agent on your instance to get this data. Disk Type %s with Machine Type %s with size %d has %d IOPS estimation.\n", disk.DiskType, currentMachine.MachineType, PWrapperInt64(disk.DiskSize), writeIopsLimit, suggestedType, recommendedMachine.MachineType, suggestedSize, recommendedWriteIopsLimit) - usage += fmt.Sprintf("- Disk Type %s with Machine Type %s with size %d has %d Write IOPS estimation. Usage is not available. You need to install CloudWatch Agent on your instance to get this data. Disk Type %s with Machine Type %s with size %d has %d IOPS estimation.\n", disk.DiskType, currentMachine.MachineType, PWrapperInt64(disk.DiskSize), readIopsLimit, suggestedType, recommendedMachine.MachineType, suggestedSize, recommendedReadIopsLimit) - } - if len(metrics.Metrics["DiskReadThroughput"].Data) > 0 || len(metrics.Metrics["DiskWriteThroughput"].Data) > 0 { - usage += fmt.Sprintf("- Disk Type %s with Machine Type %s with size %d has %.2f Mb Write Throughput estimation. Usage over the course of last week is min=%.2f%%, avg=%.2f%%, max=%.2f%%, so you only need %.2f Mb Write Throughput estimation. Disk Type %s with Machine Type %s with size %d has %.2f Mb Write Throughput estimation.\n", disk.DiskType, currentMachine.MachineType, PWrapperInt64(disk.DiskSize), writeThroughputLimit, PWrapperDouble(writeThroughputUsageMb.Min), PWrapperDouble(writeThroughputUsageMb.Avg), PWrapperDouble(writeThroughputUsageMb.Max), neededWriteThroughput, suggestedType, recommendedMachine.MachineType, suggestedSize, recommendedWriteThroughputLimit) - usage += fmt.Sprintf("- Disk Type %s with Machine Type %s with size %d has %.2f Mb Read Throughput estimation. Usage over the course of last week is min=%.2f%%, avg=%.2f%%, max=%.2f%%, so you only need %.2f Mb Read Throughput estimation. Disk Type %s with Machine Type %s with size %d has %.2f Mb Read Throughput estimation.\n", disk.DiskType, currentMachine.MachineType, PWrapperInt64(disk.DiskSize), readThroughputLimit, PWrapperDouble(readThroughputUsageMb.Min), PWrapperDouble(readThroughputUsageMb.Avg), PWrapperDouble(readThroughputUsageMb.Max), neededReadThroughput, suggestedType, recommendedMachine.MachineType, suggestedSize, recommendedReadThroughputLimit) - } else { - usage += fmt.Sprintf("- Disk Type %s with Machine Type %s with size %d has %.2f Mb Write Throughput estimation. Usage is not available. You need to install CloudWatch Agent on your instance to get this data. Disk Type %s with Machine Type %s with size %d has %.2f Mb Write Throughput estimation.\n", disk.DiskType, currentMachine.MachineType, PWrapperInt64(disk.DiskSize), writeThroughputLimit, suggestedType, recommendedMachine.MachineType, suggestedSize, recommendedWriteThroughputLimit) - usage += fmt.Sprintf("- Disk Type %s with Machine Type %s with size %d has %.2f Mb Read Throughput estimation. Usage is not available. You need to install CloudWatch Agent on your instance to get this data. Disk Type %s with Machine Type %s with size %d has %.2f Mb Read Throughput estimation.\n", disk.DiskType, currentMachine.MachineType, PWrapperInt64(disk.DiskSize), readThroughputLimit, suggestedType, recommendedMachine.MachineType, suggestedSize, recommendedReadThroughputLimit) - } - - needs := "" - for k, v := range preferences { - if gcp_compute.PreferenceDiskKey[k] == "" { - continue - } - if v == nil { - vl := extractFromGCPComputeDisk(disk, k) - needs += fmt.Sprintf("- You asked %s to be same as the current instance value which is %v\n", k, vl) - } else { - needs += fmt.Sprintf("- You asked %s to be %s\n", k, v.GetValue()) - } - } - - prompt := fmt.Sprintf(` -I'm giving recommendation on GCP Compute Disk right sizing. Based on user's usage and needs I have concluded that the best option for him is to use %s with size %d instead of %s with size %d. I need help summarizing the explanation into 280 characters (it's not a tweet! dont use hashtag!) while keeping these rules: -- mention the requirements from user side. -- for those fields which are changing make sure you mention the change. - -Here's usage data: -%s - -User's needs: -%s -`, suggestedType, suggestedSize, disk.DiskType, disk.DiskSize.GetValue(), usage, needs) - - resp, err := s.openaiSvc.CreateChatCompletion( - ctx, - openai.ChatCompletionRequest{ - Model: openai.GPT4TurboPreview, - Messages: []openai.ChatCompletionMessage{ - { - Role: openai.ChatMessageRoleUser, - Content: prompt, - }, - }, - }, - ) - - if err != nil { - return "", err - } - - if len(resp.Choices) == 0 { - return "", errors.New("empty choices") - } - - s.logger.Info("GPT results", zap.String("prompt", prompt), zap.String("result", strings.TrimSpace(resp.Choices[0].Message.Content))) - - return strings.TrimSpace(resp.Choices[0].Message.Content), nil -} - -func PWrapperDouble(v *wrapperspb.DoubleValue) float64 { - if v == nil { - return 0 - } - return v.GetValue() -} - -func PWrapperInt64(v *wrapperspb.Int64Value) int64 { - if v == nil { - return 0 - } - return v.GetValue() -} - -func PInt64Wrapper(v *int64) *wrapperspb.Int64Value { - if v == nil { - return nil - } - return &wrapperspb.Int64Value{Value: *v} -} - -func pWrapperCalculateHeadroom(needed *wrapperspb.DoubleValue, percent int64) float64 { - if needed == nil { - return 0.0 - } - v := needed.Value - return v / (1.0 - (float64(percent) / 100.0)) -} diff --git a/services/wastage/recommendation/gcp_limitations.go b/services/wastage/recommendation/gcp_limitations.go deleted file mode 100644 index b4e162d0a..000000000 --- a/services/wastage/recommendation/gcp_limitations.go +++ /dev/null @@ -1,2232 +0,0 @@ -package recommendation - -import ( - "fmt" - "go.uber.org/zap" - "math" - "strconv" - "strings" -) - -type DiskLimitationsPerVm struct { - MaxWriteIOPS float64 `json:"max_write_iops"` - MaxReadIOPS float64 `json:"max_read_iops"` - MaxWriteThroughput float64 `json:"max_write_throughput"` // MiBps - MaxReadThroughput float64 `json:"max_read_throughput"` // MiBps -} - -type DiskLimitations struct { - WriteIOPS float64 `json:"max_write_iops"` - ReadIOPS float64 `json:"max_read_iops"` - Throughput float64 `json:"max_write_throughput"` // MiBps -} - -type DiskTypeOffer struct { - Type string `json:"type"` - Size int64 `json:"size"` - - Cost *float64 `json:"cost"` -} - -var ( - DiskLimitationsPerGb = map[string]DiskLimitations{ - "pd-standard": { - WriteIOPS: 1.5, - ReadIOPS: 0.75, - Throughput: 0.12, - }, - "pd-balanced": { - WriteIOPS: 6, - ReadIOPS: 6, - Throughput: 0.28, - }, - "pd-ssd": { - WriteIOPS: 30, - ReadIOPS: 30, - Throughput: 0.48, - }, - } -) - -// diskTypes sorted by cost per GB: pd-standard, pd-balanced, pd-extreme, pd-ssd -func (s *Service) findCheapestDiskType(machineFamily, machineType string, vCPUs int64, neededReadIops, neededWriteIops, - neededReadThroughput, neededWriteThroughput float64, sizeGb int64) ([]DiskTypeOffer, error) { - var suggestions []DiskTypeOffer - - limitations := s.findLimitations(machineFamily, machineType, vCPUs) - if len(limitations) == 0 { - s.logger.Error("could not find limitations", zap.String("machineFamily", machineFamily), - zap.String("machineType", machineType), zap.Int64("vCPUs", vCPUs)) - limitations = map[string]DiskLimitationsPerVm{ - "pd-standard": { - MaxWriteIOPS: math.MaxFloat64, - MaxReadIOPS: math.MaxFloat64, - MaxReadThroughput: math.MaxFloat64, - MaxWriteThroughput: math.MaxFloat64, - }, - "pd-balanced": { - MaxWriteIOPS: math.MaxFloat64, - MaxReadIOPS: math.MaxFloat64, - MaxReadThroughput: math.MaxFloat64, - MaxWriteThroughput: math.MaxFloat64, - }, - "pd-ssd": { - MaxWriteIOPS: math.MaxFloat64, - MaxReadIOPS: math.MaxFloat64, - MaxReadThroughput: math.MaxFloat64, - MaxWriteThroughput: math.MaxFloat64, - }, - "pd-extreme": { - MaxWriteIOPS: math.MaxFloat64, - MaxReadIOPS: math.MaxFloat64, - MaxReadThroughput: math.MaxFloat64, - MaxWriteThroughput: math.MaxFloat64, - }, - } - } - - // pd-standard - standard := s.checkStandard(limitations["pd-standard"], sizeGb, neededReadIops, neededWriteIops, neededReadThroughput, neededWriteThroughput) - if standard != nil { - suggestions = append(suggestions, *standard) - } - - // pd-balanced - balanced := s.checkBalanced(limitations["pd-balanced"], sizeGb, neededReadIops, neededWriteIops, neededReadThroughput, neededWriteThroughput) - if balanced != nil { - suggestions = append(suggestions, *balanced) - } - - // pd-extreme - extreme := limitations["pd-extreme"] - if neededReadIops <= extreme.MaxReadIOPS && neededWriteIops <= extreme.MaxWriteIOPS && - neededReadThroughput <= extreme.MaxReadThroughput && neededWriteThroughput <= extreme.MaxWriteThroughput { - suggestions = append(suggestions, DiskTypeOffer{ - Type: "pd-extreme", - Size: sizeGb, - }) - } - - // pd-ssd - ssd := s.checkSSD(limitations["pd-ssd"], sizeGb, neededReadIops, neededWriteIops, neededReadThroughput, neededWriteThroughput) - if ssd != nil { - suggestions = append(suggestions, *ssd) - } - - return suggestions, nil -} - -// getMaximums maxReadIops, maxWriteIops, maxReadThroughput, maxWriteThroughput -func (s *Service) getMaximums(machineFamily, machineType, diskType string, vCPUs, sizeGb int64) (int64, int64, float64, float64, error) { - limitations := s.findLimitations(machineFamily, machineType, vCPUs) - if len(limitations) == 0 { - s.logger.Error("could not find limitations", zap.String("machineFamily", machineType), - zap.String("machineType", machineType), zap.Int64("vCPUs", vCPUs)) - limitations = map[string]DiskLimitationsPerVm{ - "pd-standard": { - MaxWriteIOPS: math.MaxFloat64, - MaxReadIOPS: math.MaxFloat64, - MaxReadThroughput: math.MaxFloat64, - MaxWriteThroughput: math.MaxFloat64, - }, - "pd-balanced": { - MaxWriteIOPS: math.MaxFloat64, - MaxReadIOPS: math.MaxFloat64, - MaxReadThroughput: math.MaxFloat64, - MaxWriteThroughput: math.MaxFloat64, - }, - "pd-ssd": { - MaxWriteIOPS: math.MaxFloat64, - MaxReadIOPS: math.MaxFloat64, - MaxReadThroughput: math.MaxFloat64, - MaxWriteThroughput: math.MaxFloat64, - }, - "pd-extreme": { - MaxWriteIOPS: math.MaxFloat64, - MaxReadIOPS: math.MaxFloat64, - MaxReadThroughput: math.MaxFloat64, - MaxWriteThroughput: math.MaxFloat64, - }, - } - } - - // pd-standard' - if diskType == "pd-standard" { - l := limitations["pd-standard"] - maxReadIops := min(l.MaxReadIOPS, float64(sizeGb)*DiskLimitationsPerGb["pd-standard"].ReadIOPS) - maxWriteIops := min(l.MaxWriteIOPS, float64(sizeGb)*DiskLimitationsPerGb["pd-standard"].WriteIOPS) - maxReadThroughput := min(l.MaxReadThroughput, float64(sizeGb)*DiskLimitationsPerGb["pd-standard"].Throughput) - maxWriteThroughput := min(l.MaxWriteThroughput, float64(sizeGb)*DiskLimitationsPerGb["pd-standard"].Throughput) - return int64(maxReadIops), int64(maxWriteIops), maxReadThroughput, maxWriteThroughput, nil - } - - // pd-balanced - if diskType == "pd-balanced" { - l := limitations["pd-balanced"] - maxReadIops := min(l.MaxReadIOPS, 3000+float64(sizeGb)*DiskLimitationsPerGb["pd-balanced"].ReadIOPS) - maxWriteIops := min(l.MaxWriteIOPS, 3000+float64(sizeGb)*DiskLimitationsPerGb["pd-balanced"].WriteIOPS) - maxReadThroughput := min(l.MaxReadThroughput, 140+float64(sizeGb)*DiskLimitationsPerGb["pd-balanced"].Throughput) - maxWriteThroughput := min(l.MaxWriteThroughput, 140+float64(sizeGb)*DiskLimitationsPerGb["pd-balanced"].Throughput) - return int64(maxReadIops), int64(maxWriteIops), maxReadThroughput, maxWriteThroughput, nil - } - - // pd-extreme - if diskType == "pd-extreme" { - l := limitations["pd-extreme"] - return int64(l.MaxReadIOPS), int64(l.MaxWriteIOPS), l.MaxReadThroughput, l.MaxWriteThroughput, nil - } - - // pd-ssd - if diskType == "pd-ssd" { - l := limitations["pd-ssd"] - maxReadIops := min(l.MaxReadIOPS, 6000+float64(sizeGb)*DiskLimitationsPerGb["pd-ssd"].ReadIOPS) - maxWriteIops := min(l.MaxWriteIOPS, 6000+float64(sizeGb)*DiskLimitationsPerGb["pd-ssd"].WriteIOPS) - maxReadThroughput := min(l.MaxReadThroughput, 240+float64(sizeGb)*DiskLimitationsPerGb["pd-ssd"].Throughput) - maxWriteThroughput := min(l.MaxWriteThroughput, 240+float64(sizeGb)*DiskLimitationsPerGb["pd-ssd"].Throughput) - return int64(maxReadIops), int64(maxWriteIops), maxReadThroughput, maxWriteThroughput, nil - } - return 0, 0, 0, 0, fmt.Errorf("could not find disk type %s", diskType) -} - -func (s *Service) findLimitations(machineFamily, machineType string, vCPUs int64) map[string]DiskLimitationsPerVm { - limitations := make(map[string]DiskLimitationsPerVm) - if machineFamily == "n2" { - if pdExtreme, ok := machineTypeDiskLimitations[machineFamily][machineType]["pd-extreme"]; ok { - limitations["pd-extreme"] = pdExtreme - } - for k, v := range machineTypeDiskLimitationsPerCPURange[machineFamily] { - r := strings.Split(k, "-") - min, _ := strconv.ParseInt(r[0], 10, 64) - if vCPUs >= min { - if len(r) == 1 { - limitations["pd-balanced"] = v["pd-balanced"] - limitations["pd-ssd"] = v["pd-ssd"] - limitations["pd-standard"] = v["pd-standard"] - break - } else { - max, _ := strconv.ParseInt(r[1], 10, 64) - if vCPUs <= max { - limitations["pd-balanced"] = v["pd-balanced"] - limitations["pd-ssd"] = v["pd-ssd"] - limitations["pd-standard"] = v["pd-standard"] - break - } - } - } - } - return limitations - } - if l, ok := machineTypeDiskLimitations[machineFamily][machineType]; ok { - return l - } - if l, ok := machineTypeDiskLimitationsPerCPU[machineFamily]; ok { - for k, v := range l { - if k == vCPUs { - return v - } - } - } - if l, ok := machineTypeDiskLimitationsPerCPURange[machineFamily]; ok { - if machineType == "e2-medium" { - return l[machineType] - } - for k, v := range l { - r := strings.Split(k, "-") - min, _ := strconv.ParseInt(r[0], 10, 64) - if vCPUs >= min { - if len(r) == 1 { - return v - } - max, _ := strconv.ParseInt(r[1], 10, 64) - if vCPUs <= max { - return v - } - } - } - } - return limitations -} - -func (s *Service) checkStandard(l DiskLimitationsPerVm, sizeGb int64, neededReadIops, neededWriteIops, - neededReadThroughput, neededWriteThroughput float64) *DiskTypeOffer { - maxReadIops := min(l.MaxReadIOPS, float64(sizeGb)*DiskLimitationsPerGb["pd-standard"].ReadIOPS) - maxWriteIops := min(l.MaxWriteIOPS, float64(sizeGb)*DiskLimitationsPerGb["pd-standard"].WriteIOPS) - maxReadThroughput := min(l.MaxReadThroughput, float64(sizeGb)*DiskLimitationsPerGb["pd-standard"].Throughput) - maxWriteThroughput := min(l.MaxWriteThroughput, float64(sizeGb)*DiskLimitationsPerGb["pd-standard"].Throughput) - if neededReadIops <= maxReadIops && neededWriteIops <= maxWriteIops && - neededReadThroughput <= maxReadThroughput && neededWriteThroughput <= maxWriteThroughput { - return &DiskTypeOffer{ - Type: "pd-standard", - Size: sizeGb, - } - } - if neededReadIops <= l.MaxReadIOPS && neededWriteIops <= l.MaxWriteIOPS && - neededReadThroughput <= l.MaxReadThroughput && neededWriteThroughput <= l.MaxWriteThroughput { - suggestedSize := max(int64(neededReadIops/DiskLimitationsPerGb["pd-standard"].ReadIOPS+0.5), - int64(neededWriteIops/DiskLimitationsPerGb["pd-standard"].WriteIOPS+0.5), - int64(neededReadThroughput/DiskLimitationsPerGb["pd-standard"].Throughput+0.5), - int64(neededWriteThroughput/DiskLimitationsPerGb["pd-standard"].Throughput+0.5)) - return &DiskTypeOffer{ - Type: "pd-standard", - Size: suggestedSize, - } - } - return nil -} - -func (s *Service) checkBalanced(l DiskLimitationsPerVm, sizeGb int64, neededReadIops, neededWriteIops, - neededReadThroughput, neededWriteThroughput float64) *DiskTypeOffer { - maxReadIops := min(l.MaxReadIOPS, 3000+float64(sizeGb)*DiskLimitationsPerGb["pd-balanced"].ReadIOPS) - maxWriteIops := min(l.MaxWriteIOPS, 3000+float64(sizeGb)*DiskLimitationsPerGb["pd-balanced"].WriteIOPS) - maxReadThroughput := min(l.MaxReadThroughput, 140+float64(sizeGb)*DiskLimitationsPerGb["pd-balanced"].Throughput) - maxWriteThroughput := min(l.MaxWriteThroughput, 140+float64(sizeGb)*DiskLimitationsPerGb["pd-balanced"].Throughput) - if neededReadIops <= maxReadIops && neededWriteIops <= maxWriteIops && - neededReadThroughput <= maxReadThroughput && neededWriteThroughput <= maxWriteThroughput { - return &DiskTypeOffer{ - Type: "pd-balanced", - Size: sizeGb, - } - } - if neededReadIops <= l.MaxReadIOPS && neededWriteIops <= l.MaxWriteIOPS && - neededReadThroughput <= l.MaxReadThroughput && neededWriteThroughput <= l.MaxWriteThroughput { - suggestedSize := max(3000+int64(neededReadIops/DiskLimitationsPerGb["pd-balanced"].ReadIOPS+0.5), - 3000+int64(neededWriteIops/DiskLimitationsPerGb["pd-balanced"].WriteIOPS+0.5), - 140+int64(neededReadThroughput/DiskLimitationsPerGb["pd-balanced"].Throughput+0.5), - 140+int64(neededWriteThroughput/DiskLimitationsPerGb["pd-balanced"].Throughput+0.5)) - return &DiskTypeOffer{ - Type: "pd-balanced", - Size: suggestedSize, - } - } - return nil -} - -func (s *Service) checkSSD(l DiskLimitationsPerVm, sizeGb int64, neededReadIops, neededWriteIops, - neededReadThroughput, neededWriteThroughput float64) *DiskTypeOffer { - maxReadIops := min(l.MaxReadIOPS, 6000+float64(sizeGb)*DiskLimitationsPerGb["pd-ssd"].ReadIOPS) - maxWriteIops := min(l.MaxWriteIOPS, 6000+float64(sizeGb)*DiskLimitationsPerGb["pd-ssd"].WriteIOPS) - maxReadThroughput := min(l.MaxReadThroughput, 240+float64(sizeGb)*DiskLimitationsPerGb["pd-ssd"].Throughput) - maxWriteThroughput := min(l.MaxWriteThroughput, 240+float64(sizeGb)*DiskLimitationsPerGb["pd-ssd"].Throughput) - if neededReadIops <= maxReadIops && neededWriteIops <= maxWriteIops && - neededReadThroughput <= maxReadThroughput && neededWriteThroughput <= maxWriteThroughput { - return &DiskTypeOffer{ - Type: "pd-ssd", - Size: sizeGb, - } - } - if neededReadIops <= l.MaxReadIOPS && neededWriteIops <= l.MaxWriteIOPS && - neededReadThroughput <= l.MaxReadThroughput && neededWriteThroughput <= l.MaxWriteThroughput { - suggestedSize := max(6000+int64(neededReadIops/DiskLimitationsPerGb["pd-ssd"].ReadIOPS+0.5), - 6000+int64(neededWriteIops/DiskLimitationsPerGb["pd-ssd"].WriteIOPS+0.5), - 240+int64(neededReadThroughput/DiskLimitationsPerGb["pd-ssd"].Throughput+0.5), - 240+int64(neededWriteThroughput/DiskLimitationsPerGb["pd-ssd"].Throughput+0.5)) - return &DiskTypeOffer{ - Type: "pd-ssd", - Size: suggestedSize, - } - } - return nil -} - -// MachineTypeDiskLimitations is a map of machine types to disk types to disk limitations. -var ( - machineTypeDiskLimitations = map[string]map[string]map[string]DiskLimitationsPerVm{ - "a3": { - "a3-megagpu-8g": { - "pd-ssd": { - MaxWriteIOPS: 80000, - MaxReadIOPS: 80000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-balanced": { - MaxWriteIOPS: 80000, - MaxReadIOPS: 80000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - }, - "a3-highgpu-8g": { - "pd-ssd": { - MaxWriteIOPS: 80000, - MaxReadIOPS: 80000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-balanced": { - MaxWriteIOPS: 80000, - MaxReadIOPS: 80000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - }, - }, - "a2": { - "a2-highgpu-1g": { - "pd-balanced": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 15000, - MaxWriteThroughput: 800, - MaxReadThroughput: 800, - }, - "pd-ssd": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 15000, - MaxWriteThroughput: 800, - MaxReadThroughput: 800, - }, - "pd-standard": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 5000, - MaxWriteThroughput: 400, - MaxReadThroughput: 800, - }, - }, - "a2-highgpu-2g": { - "pd-balanced": { - MaxWriteIOPS: 20000, - MaxReadIOPS: 20000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-ssd": { - MaxWriteIOPS: 25000, - MaxReadIOPS: 25000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-standard": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 5000, - MaxWriteThroughput: 400, - MaxReadThroughput: 1200, - }, - }, - "a2-highgpu-4g": { - "pd-balanced": { - MaxWriteIOPS: 50000, - MaxReadIOPS: 50000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-ssd": { - MaxWriteIOPS: 60000, - MaxReadIOPS: 60000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-standard": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 5000, - MaxWriteThroughput: 400, - MaxReadThroughput: 1200, - }, - }, - "a2-highgpu-8g": { - "pd-balanced": { - MaxWriteIOPS: 80000, - MaxReadIOPS: 80000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-ssd": { - MaxWriteIOPS: 100000, - MaxReadIOPS: 100000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-standard": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 5000, - MaxWriteThroughput: 400, - MaxReadThroughput: 1200, - }, - }, - "a2-megagpu-16g": { - "pd-balanced": { - MaxWriteIOPS: 80000, - MaxReadIOPS: 80000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-ssd": { - MaxWriteIOPS: 100000, - MaxReadIOPS: 100000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-standard": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 5000, - MaxWriteThroughput: 400, - MaxReadThroughput: 1200, - }, - }, - "a2-ultragpu-1g": { - "pd-balanced": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 15000, - MaxWriteThroughput: 800, - MaxReadThroughput: 800, - }, - "pd-ssd": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 15000, - MaxWriteThroughput: 800, - MaxReadThroughput: 800, - }, - "pd-standard": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 5000, - MaxWriteThroughput: 400, - MaxReadThroughput: 800, - }, - }, - "a2-ultragpu-2g": { - "pd-balanced": { - MaxWriteIOPS: 20000, - MaxReadIOPS: 20000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-ssd": { - MaxWriteIOPS: 25000, - MaxReadIOPS: 25000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-standard": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 5000, - MaxWriteThroughput: 400, - MaxReadThroughput: 1200, - }, - }, - "a2-ultragpu-4g": { - "pd-balanced": { - MaxWriteIOPS: 50000, - MaxReadIOPS: 50000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-ssd": { - MaxWriteIOPS: 60000, - MaxReadIOPS: 60000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-standard": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 5000, - MaxWriteThroughput: 400, - MaxReadThroughput: 1200, - }, - }, - "a2-ultragpu-8g": { - "pd-balanced": { - MaxWriteIOPS: 80000, - MaxReadIOPS: 80000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-ssd": { - MaxWriteIOPS: 100000, - MaxReadIOPS: 100000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-standard": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 5000, - MaxWriteThroughput: 400, - MaxReadThroughput: 1200, - }, - }, - }, - "g2": { - "g2-standard-4": { - "pd-balanced": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 15000, - MaxWriteThroughput: 240, - MaxReadThroughput: 240, - }, - "pd-ssd": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 15000, - MaxWriteThroughput: 240, - MaxReadThroughput: 240, - }, - }, - "g2-standard-8": { - "pd-balanced": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 15000, - MaxWriteThroughput: 800, - MaxReadThroughput: 800, - }, - "pd-ssd": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 15000, - MaxWriteThroughput: 800, - MaxReadThroughput: 800, - }, - }, - "g2-standard-12": { - "pd-balanced": { - MaxWriteIOPS: 15000, - - MaxReadIOPS: 15000, - MaxWriteThroughput: 800, - MaxReadThroughput: 800, - }, - "pd-ssd": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 15000, - MaxWriteThroughput: 800, - MaxReadThroughput: 800, - }, - }, - "g2-standard-16": { - "pd-balanced": { - MaxWriteIOPS: 20000, - MaxReadIOPS: 20000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-ssd": { - MaxWriteIOPS: 25000, - MaxReadIOPS: 25000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - }, - "g2-standard-24": { - "pd-balanced": { - MaxWriteIOPS: 20000, - MaxReadIOPS: 20000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-ssd": { - MaxWriteIOPS: 25000, - MaxReadIOPS: 25000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - }, - "g2-standard-32": { - "pd-balanced": { - MaxWriteIOPS: 50000, - MaxReadIOPS: 50000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-ssd": { - MaxWriteIOPS: 60000, - MaxReadIOPS: 60000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - }, - "g2-standard-48": { - "pd-balanced": { - MaxWriteIOPS: 50000, - MaxReadIOPS: 50000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-ssd": { - MaxWriteIOPS: 60000, - MaxReadIOPS: 60000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - }, - "g2-standard-96": { - "pd-balanced": { - MaxWriteIOPS: 80000, - MaxReadIOPS: 80000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-ssd": { - MaxWriteIOPS: 100000, - MaxReadIOPS: 100000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - }, - }, - "m1": { - "m1-megamem-96": { - "pd-balanced": { - MaxWriteIOPS: 80000, - MaxReadIOPS: 80000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-ssd": { - MaxWriteIOPS: 90000, - MaxReadIOPS: 90000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-standard": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 7500, - MaxWriteThroughput: 400, - MaxReadThroughput: 1200, - }, - "pd-extreme": { - MaxWriteIOPS: 90000, - MaxReadIOPS: 90000, - MaxWriteThroughput: 2200, - MaxReadThroughput: 2200, - }, - }, - "m1-ultramem-40": { - "pd-balanced": { - MaxWriteIOPS: 60000, - MaxReadIOPS: 60000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-ssd": { - MaxWriteIOPS: 60000, - MaxReadIOPS: 60000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-standard": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 7500, - MaxWriteThroughput: 400, - MaxReadThroughput: 1200, - }, - }, - "m1-ultramem-80": { - "pd-balanced": { - MaxWriteIOPS: 70000, - MaxReadIOPS: 70000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-ssd": { - MaxWriteIOPS: 70000, - MaxReadIOPS: 70000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-standard": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 7500, - MaxWriteThroughput: 400, - MaxReadThroughput: 1200, - }, - }, - "m1-ultramem-160": { - "pd-balanced": { - MaxWriteIOPS: 70000, - MaxReadIOPS: 70000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-ssd": { - MaxWriteIOPS: 70000, - MaxReadIOPS: 70000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-standard": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 7500, - MaxWriteThroughput: 400, - MaxReadThroughput: 1200, - }, - }, - }, - "m2": { - "m2-megamem-416": { - "pd-balanced": { - MaxWriteIOPS: 40000, - MaxReadIOPS: 40000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-ssd": { - MaxWriteIOPS: 40000, - MaxReadIOPS: 40000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-standard": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 7500, - MaxWriteThroughput: 400, - MaxReadThroughput: 1200, - }, - "pd-extreme": { - MaxWriteIOPS: 40000, - MaxReadIOPS: 40000, - - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - }, - "m2-ultramem-208": { - "pd-balanced": { - MaxWriteIOPS: 40000, - MaxReadIOPS: 40000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-ssd": { - MaxWriteIOPS: 40000, - MaxReadIOPS: 40000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-standard": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 7500, - MaxWriteThroughput: 400, - MaxReadThroughput: 1200, - }, - "pd-extreme": { - MaxWriteIOPS: 40000, - MaxReadIOPS: 40000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - }, - "m2-ultramem-416": { - "pd-balanced": { - MaxWriteIOPS: 40000, - MaxReadIOPS: 40000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-ssd": { - MaxWriteIOPS: 40000, - MaxReadIOPS: 40000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-standard": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 7500, - MaxWriteThroughput: 400, - MaxReadThroughput: 1200, - }, - "pd-extreme": { - MaxWriteIOPS: 40000, - MaxReadIOPS: 40000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - }, - "m2-hypermem-416": { - "pd-balanced": { - MaxWriteIOPS: 40000, - MaxReadIOPS: 40000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-ssd": { - MaxWriteIOPS: 40000, - MaxReadIOPS: 40000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-standard": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 7500, - MaxWriteThroughput: 400, - MaxReadThroughput: 1200, - }, - "pd-extreme": { - MaxWriteIOPS: 40000, - MaxReadIOPS: 40000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - }, - }, - "m3": { - "m3-megamem-64": { - "pd-balanced": { - MaxWriteIOPS: 40000, - MaxReadIOPS: 40000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-ssd": { - MaxWriteIOPS: 40000, - MaxReadIOPS: 40000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-extreme": { - MaxWriteIOPS: 40000, - MaxReadIOPS: 40000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 2200, - }, - }, - "m3-megamem-128": { - "pd-balanced": { - MaxWriteIOPS: 80000, - MaxReadIOPS: 80000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-ssd": { - MaxWriteIOPS: 80000, - MaxReadIOPS: 80000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-extreme": { - MaxWriteIOPS: 80000, - MaxReadIOPS: 80000, - MaxWriteThroughput: 1700, - MaxReadThroughput: 2200, - }, - }, - "m3-ultramem-32": { - "pd-balanced": { - MaxWriteIOPS: 40000, - MaxReadIOPS: 40000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-ssd": { - MaxWriteIOPS: 40000, - MaxReadIOPS: 40000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-extreme": { - MaxWriteIOPS: 40000, - MaxReadIOPS: 40000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 2200, - }, - }, - "m3-ultramem-64": { - "pd-balanced": { - MaxWriteIOPS: 40000, - MaxReadIOPS: 40000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-ssd": { - MaxWriteIOPS: 40000, - MaxReadIOPS: 40000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-extreme": { - MaxWriteIOPS: 40000, - MaxReadIOPS: 40000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 2200, - }, - }, - "m3-ultramem-128": { - "pd-balanced": { - MaxWriteIOPS: 80000, - MaxReadIOPS: 80000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-ssd": { - MaxWriteIOPS: 80000, - MaxReadIOPS: 80000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-extreme": { - MaxWriteIOPS: 80000, - - MaxReadIOPS: 80000, - MaxWriteThroughput: 1700, - MaxReadThroughput: 2200, - }, - }, - }, - "n2": { - "n2-standard-64": { - "pd-extreme": { - MaxWriteIOPS: 120000, - MaxReadIOPS: 120000, - MaxWriteThroughput: 3000, - MaxReadThroughput: 4000, - }, - }, - "n2-standard-80": { - "pd-extreme": { - MaxWriteIOPS: 120000, - MaxReadIOPS: 120000, - MaxWriteThroughput: 3000, - MaxReadThroughput: 4000, - }, - }, - "n2-standard-96": { - "pd-extreme": { - MaxWriteIOPS: 120000, - MaxReadIOPS: 120000, - MaxWriteThroughput: 3000, - MaxReadThroughput: 4000, - }, - }, - "n2-standard-128": { - "pd-extreme": { - MaxWriteIOPS: 120000, - MaxReadIOPS: 120000, - MaxWriteThroughput: 3000, - MaxReadThroughput: 4000, - }, - }, - "n2-highmem-64": { - "pd-extreme": { - MaxWriteIOPS: 120000, - MaxReadIOPS: 120000, - MaxWriteThroughput: 3000, - MaxReadThroughput: 4000, - }, - }, - "n2-highmem-80": { - "pd-extreme": { - MaxWriteIOPS: 120000, - MaxReadIOPS: 120000, - MaxWriteThroughput: 3000, - MaxReadThroughput: 4000, - }, - }, - "n2-highmem-96": { - "pd-extreme": { - MaxWriteIOPS: 120000, - MaxReadIOPS: 120000, - MaxWriteThroughput: 3000, - MaxReadThroughput: 4000, - }, - }, - "n2-highmem-128": { - "pd-extreme": { - MaxWriteIOPS: 120000, - MaxReadIOPS: 120000, - MaxWriteThroughput: 3000, - MaxReadThroughput: 4000, - }, - }, - "n2-highcpu-64": { - "pd-extreme": { - MaxWriteIOPS: 120000, - MaxReadIOPS: 120000, - MaxWriteThroughput: 3000, - MaxReadThroughput: 4000, - }, - }, - "n2-highcpu-80": { - "pd-extreme": { - MaxWriteIOPS: 120000, - MaxReadIOPS: 120000, - MaxWriteThroughput: 3000, - MaxReadThroughput: 4000, - }, - }, - "n2-highcpu-96": { - "pd-extreme": { - MaxWriteIOPS: 120000, - MaxReadIOPS: 120000, - MaxWriteThroughput: 3000, - MaxReadThroughput: 4000, - }, - }, - }, - } - machineTypeDiskLimitationsPerCPU = map[string]map[int64]map[string]DiskLimitationsPerVm{ - "c2": { - 4: { - "pd-balanced": { - MaxWriteIOPS: 4000, - MaxReadIOPS: 4000, - MaxWriteThroughput: 240, - MaxReadThroughput: 240, - }, - "pd-ssd": { - MaxWriteIOPS: 4000, - MaxReadIOPS: 4000, - MaxWriteThroughput: 240, - MaxReadThroughput: 240, - }, - "pd-standard": { - MaxWriteIOPS: 4000, - MaxReadIOPS: 3000, - MaxWriteThroughput: 240, - MaxReadThroughput: 240, - }, - }, - 8: { - "pd-balanced": { - MaxWriteIOPS: 4000, - MaxReadIOPS: 4000, - MaxWriteThroughput: 240, - MaxReadThroughput: 240, - }, - "pd-ssd": { - MaxWriteIOPS: 4000, - MaxReadIOPS: 4000, - MaxWriteThroughput: 240, - MaxReadThroughput: 240, - }, - "pd-standard": { - MaxWriteIOPS: 4000, - MaxReadIOPS: 3000, - MaxWriteThroughput: 240, - MaxReadThroughput: 240, - }, - }, - 16: { - "pd-balanced": { - MaxWriteIOPS: 4000, - MaxReadIOPS: 8000, - MaxWriteThroughput: 480, - MaxReadThroughput: 600, - }, - "pd-ssd": { - MaxWriteIOPS: 4000, - MaxReadIOPS: 8000, - MaxWriteThroughput: 480, - MaxReadThroughput: 600, - }, - "pd-standard": { - MaxWriteIOPS: 4000, - MaxReadIOPS: 3000, - MaxWriteThroughput: 240, - MaxReadThroughput: 240, - }, - }, - 30: { - "pd-balanced": { - MaxWriteIOPS: 8000, - MaxReadIOPS: 15000, - MaxWriteThroughput: 480, - MaxReadThroughput: 600, - }, - "pd-ssd": { - MaxWriteIOPS: 8000, - MaxReadIOPS: 15000, - MaxWriteThroughput: 480, - MaxReadThroughput: 600, - }, - "pd-standard": { - MaxWriteIOPS: 8000, - MaxReadIOPS: 3000, - MaxWriteThroughput: 240, - MaxReadThroughput: 240, - }, - }, - 60: { - "pd-balanced": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 15000, - MaxWriteThroughput: 800, - MaxReadThroughput: 1200, - }, - "pd-ssd": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 30000, - MaxWriteThroughput: 800, - MaxReadThroughput: 1200, - }, - "pd-standard": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 3000, - MaxWriteThroughput: 240, - MaxReadThroughput: 240, - }, - }, - }, - "c2d": { - 2: { - "pd-balanced": { - MaxWriteIOPS: 4590, - MaxReadIOPS: 4080, - MaxWriteThroughput: 245, - MaxReadThroughput: 245, - }, - "pd-ssd": { - MaxWriteIOPS: 4590, - MaxReadIOPS: 4080, - MaxWriteThroughput: 245, - MaxReadThroughput: 245, - }, - "pd-standard": { - MaxWriteIOPS: 4590, - MaxReadIOPS: 3060, - MaxWriteThroughput: 245, - MaxReadThroughput: 245, - }, - }, - 4: { - "pd-balanced": { - MaxWriteIOPS: 4590, - MaxReadIOPS: 4080, - MaxWriteThroughput: 245, - MaxReadThroughput: 245, - }, - "pd-ssd": { - MaxWriteIOPS: 4590, - MaxReadIOPS: 4080, - MaxWriteThroughput: 245, - MaxReadThroughput: 245, - }, - "pd-standard": { - MaxWriteIOPS: 4590, - MaxReadIOPS: 3060, - MaxWriteThroughput: 245, - MaxReadThroughput: 245, - }, - }, - 8: { - "pd-balanced": { - MaxWriteIOPS: 4590, - MaxReadIOPS: 4080, - MaxWriteThroughput: 245, - MaxReadThroughput: 245, - }, - "pd-ssd": { - MaxWriteIOPS: 4590, - MaxReadIOPS: 4080, - MaxWriteThroughput: 245, - MaxReadThroughput: 245, - }, - "pd-standard": { - MaxWriteIOPS: 4590, - MaxReadIOPS: 3060, - MaxWriteThroughput: 245, - MaxReadThroughput: 245, - }, - }, - 16: { - "pd-balanced": { - MaxWriteIOPS: 4590, - MaxReadIOPS: 8160, - MaxWriteThroughput: 245, - MaxReadThroughput: 326, - }, - "pd-ssd": { - MaxWriteIOPS: 4590, - MaxReadIOPS: 8160, - MaxWriteThroughput: 245, - MaxReadThroughput: 326, - }, - "pd-standard": { - MaxWriteIOPS: 4590, - MaxReadIOPS: 3060, - MaxWriteThroughput: 245, - MaxReadThroughput: 245, - }, - }, - 32: { - "pd-balanced": { - - MaxWriteIOPS: 8160, - MaxReadIOPS: 15300, - MaxWriteThroughput: 245, - MaxReadThroughput: 612, - }, - "pd-ssd": { - MaxWriteIOPS: 8160, - MaxReadIOPS: 15300, - MaxWriteThroughput: 245, - MaxReadThroughput: 612, - }, - "pd-standard": { - MaxWriteIOPS: 8160, - MaxReadIOPS: 3060, - MaxWriteThroughput: 245, - MaxReadThroughput: 245, - }, - }, - 56: { - "pd-balanced": { - MaxWriteIOPS: 8160, - MaxReadIOPS: 15300, - MaxWriteThroughput: 245, - MaxReadThroughput: 612, - }, - "pd-ssd": { - MaxWriteIOPS: 8160, - MaxReadIOPS: 15300, - MaxWriteThroughput: 245, - MaxReadThroughput: 612, - }, - "pd-standard": { - MaxWriteIOPS: 8160, - MaxReadIOPS: 3060, - MaxWriteThroughput: 245, - MaxReadThroughput: 245, - }, - }, - 112: { - "pd-balanced": { - MaxWriteIOPS: 15300, - MaxReadIOPS: 30600, - MaxWriteThroughput: 408, - MaxReadThroughput: 1224, - }, - "pd-ssd": { - MaxWriteIOPS: 15300, - MaxReadIOPS: 30600, - MaxWriteThroughput: 408, - MaxReadThroughput: 1224, - }, - "pd-standard": { - MaxWriteIOPS: 15300, - MaxReadIOPS: 3060, - MaxWriteThroughput: 245, - MaxReadThroughput: 245, - }, - }, - }, - "c3d": { - 4: { - "pd-balanced": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 15000, - MaxWriteThroughput: 240, - MaxReadThroughput: 240, - }, - "pd-ssd": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 15000, - MaxWriteThroughput: 240, - MaxReadThroughput: 240, - }, - }, - 8: { - "pd-balanced": { - MaxWriteIOPS: 25000, - MaxReadIOPS: 25000, - MaxWriteThroughput: 800, - MaxReadThroughput: 800, - }, - "pd-ssd": { - MaxWriteIOPS: 25000, - MaxReadIOPS: 25000, - MaxWriteThroughput: 800, - MaxReadThroughput: 800, - }, - }, - 16: { - "pd-balanced": { - MaxWriteIOPS: 25000, - MaxReadIOPS: 25000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-ssd": { - MaxWriteIOPS: 25000, - MaxReadIOPS: 25000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - }, - 30: { - "pd-balanced": { - MaxWriteIOPS: 50000, - MaxReadIOPS: 50000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-ssd": { - MaxWriteIOPS: 50000, - MaxReadIOPS: 50000, - - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - }, - 60: { - "pd-balanced": { - MaxWriteIOPS: 80000, - MaxReadIOPS: 80000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-ssd": { - MaxWriteIOPS: 80000, - - MaxReadIOPS: 80000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - }, - 90: { - "pd-balanced": { - MaxWriteIOPS: 80000, - MaxReadIOPS: 80000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-ssd": { - MaxWriteIOPS: 80000, - MaxReadIOPS: 80000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - }, - 180: { - "pd-balanced": { - MaxWriteIOPS: 80000, - MaxReadIOPS: 80000, - MaxWriteThroughput: 2200, - MaxReadThroughput: 2200, - }, - "pd-ssd": { - MaxWriteIOPS: 80000, - MaxReadIOPS: 80000, - MaxWriteThroughput: 2200, - MaxReadThroughput: 2200, - }, - }, - 360: { - "pd-balanced": { - MaxWriteIOPS: 80000, - MaxReadIOPS: 80000, - MaxWriteThroughput: 2200, - MaxReadThroughput: 2200, - }, - "pd-ssd": { - MaxWriteIOPS: 80000, - - MaxReadIOPS: 80000, - MaxWriteThroughput: 2200, - MaxReadThroughput: 2200, - }, - }, - }, - "c3": { - 4: { - "pd-balanced": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 15000, - MaxWriteThroughput: 240, - MaxReadThroughput: 240, - }, - "pd-ssd": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 15000, - MaxWriteThroughput: 240, - MaxReadThroughput: 240, - }, - }, - 8: { - "pd-balanced": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 15000, - MaxWriteThroughput: 240, - MaxReadThroughput: 240, - }, - "pd-ssd": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 15000, - MaxWriteThroughput: 240, - MaxReadThroughput: 240, - }, - }, - 22: { - "pd-balanced": { - - MaxWriteIOPS: 20000, - MaxReadIOPS: 20000, - MaxWriteThroughput: 800, - MaxReadThroughput: 800, - }, - "pd-ssd": { - MaxWriteIOPS: 25000, - MaxReadIOPS: 25000, - MaxWriteThroughput: 800, - MaxReadThroughput: 800, - }, - }, - 44: { - "pd-balanced": { - MaxWriteIOPS: 50000, - MaxReadIOPS: 50000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-ssd": { - MaxWriteIOPS: 60000, - MaxReadIOPS: 60000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - }, - 88: { - "pd-balanced": { - MaxWriteIOPS: 80000, - MaxReadIOPS: 80000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-ssd": { - MaxWriteIOPS: 80000, - MaxReadIOPS: 80000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - }, - 176: { - "pd-balanced": { - MaxWriteIOPS: 80000, - MaxReadIOPS: 80000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-ssd": { - MaxWriteIOPS: 80000, - MaxReadIOPS: 80000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - }, - }, - "z3": { - 88: { - "pd-balanced": { - MaxWriteIOPS: 80000, - MaxReadIOPS: 80000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-ssd": { - MaxWriteIOPS: 100000, - MaxReadIOPS: 100000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - }, - 176: { - "pd-balanced": { - MaxWriteIOPS: 80000, - MaxReadIOPS: 80000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-ssd": { - MaxWriteIOPS: 100000, - MaxReadIOPS: 100000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - }, - }, - "h3": { - 88: { - "pd-balanced": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 15000, - MaxWriteThroughput: 240, - MaxReadThroughput: 240, - }, - }, - }, - } - machineTypeDiskLimitationsPerCPURange = map[string]map[string]map[string]DiskLimitationsPerVm{ - "e2": { - "e2-medium": { - "pd-balanced": { - MaxWriteIOPS: 10000, - MaxReadIOPS: 12000, - MaxWriteThroughput: 200, - MaxReadThroughput: 200, - }, - "pd-ssd": { - MaxWriteIOPS: 10000, - MaxReadIOPS: 12000, - MaxWriteThroughput: 200, - MaxReadThroughput: 200, - }, - "pd-standard": { - MaxWriteIOPS: 10000, - - MaxReadIOPS: 1000, - MaxWriteThroughput: 200, - MaxReadThroughput: 200, - }, - }, - "2-7": { - "pd-balanced": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 15000, - MaxWriteThroughput: 240, - MaxReadThroughput: 240, - }, - "pd-ssd": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 15000, - MaxWriteThroughput: 240, - MaxReadThroughput: 240, - }, - "pd-standard": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 3000, - MaxWriteThroughput: 240, - MaxReadThroughput: 240, - }, - }, - "8-15": { - "pd-balanced": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 15000, - MaxWriteThroughput: 800, - MaxReadThroughput: 800, - }, - "pd-ssd": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 15000, - MaxWriteThroughput: 800, - MaxReadThroughput: 800, - }, - "pd-standard": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 5000, - MaxWriteThroughput: 400, - MaxReadThroughput: 800, - }, - }, - "16-31": { - "pd-balanced": { - MaxWriteIOPS: 20000, - - MaxReadIOPS: 20000, - MaxWriteThroughput: 1000, - MaxReadThroughput: 1200, - }, - "pd-ssd": { - MaxWriteIOPS: 25000, - MaxReadIOPS: 25000, - MaxWriteThroughput: 1000, - MaxReadThroughput: 1200, - }, - "pd-standard": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 7500, - MaxWriteThroughput: 400, - MaxReadThroughput: 1200, - }, - }, - "32-": { - "pd-balanced": { - MaxWriteIOPS: 50000, - MaxReadIOPS: 50000, - MaxWriteThroughput: 1000, - MaxReadThroughput: 1200, - }, - "pd-ssd": { - MaxWriteIOPS: 60000, - MaxReadIOPS: 60000, - - MaxWriteThroughput: 1000, - MaxReadThroughput: 1200, - }, - "pd-standard": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 7500, - MaxWriteThroughput: 400, - MaxReadThroughput: 1200, - }, - }, - }, - "n1": { - "1-1": { - "pd-balanced": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 15000, - MaxWriteThroughput: 204, - MaxReadThroughput: 240, - }, - "pd-ssd": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 15000, - - MaxWriteThroughput: 204, - MaxReadThroughput: 240, - }, - "pd-standard": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 3000, - MaxWriteThroughput: 204, - MaxReadThroughput: 240, - }, - }, - "2-7": { - "pd-balanced": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 15000, - MaxWriteThroughput: 240, - MaxReadThroughput: 240, - }, - "pd-ssd": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 15000, - MaxWriteThroughput: 240, - MaxReadThroughput: 240, - }, - "pd-standard": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 3000, - MaxWriteThroughput: 240, - MaxReadThroughput: 240, - }, - }, - "8-15": { - "pd-balanced": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 15000, - MaxWriteThroughput: 800, - MaxReadThroughput: 800, - }, - "pd-ssd": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 15000, - MaxWriteThroughput: 800, - MaxReadThroughput: 800, - }, - "pd-standard": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 5000, - MaxWriteThroughput: 400, - MaxReadThroughput: 800, - }, - }, - "16-31": { - "pd-balanced": { - MaxWriteIOPS: 20000, - MaxReadIOPS: 20000, - MaxWriteThroughput: 1000, - MaxReadThroughput: 1200, - }, - "pd-ssd": { - MaxWriteIOPS: 25000, - MaxReadIOPS: 25000, - MaxWriteThroughput: 1000, - MaxReadThroughput: 1200, - }, - "pd-standard": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 7500, - MaxWriteThroughput: 400, - MaxReadThroughput: 1200, - }, - }, - "32-63": { - "pd-balanced": { - MaxWriteIOPS: 50000, - MaxReadIOPS: 50000, - MaxWriteThroughput: 1000, - MaxReadThroughput: 1200, - }, - "pd-ssd": { - - MaxWriteIOPS: 60000, - MaxReadIOPS: 60000, - MaxWriteThroughput: 1000, - MaxReadThroughput: 1200, - }, - "pd-standard": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 7500, - MaxWriteThroughput: 400, - MaxReadThroughput: 1200, - }, - }, - "64-": { - "pd-balanced": { - MaxWriteIOPS: 80000, - MaxReadIOPS: 80000, - MaxWriteThroughput: 1000, - MaxReadThroughput: 1200, - }, - "pd-ssd": { - MaxWriteIOPS: 100000, - MaxReadIOPS: 100000, - MaxWriteThroughput: 1000, - MaxReadThroughput: 1200, - }, - "pd-standard": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 7500, - MaxWriteThroughput: 400, - MaxReadThroughput: 1200, - }, - }, - }, - "n2": { - "2-7": { - "pd-balanced": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 15000, - MaxWriteThroughput: 240, - MaxReadThroughput: 240, - }, - "pd-ssd": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 15000, - MaxWriteThroughput: 240, - MaxReadThroughput: 240, - }, - "pd-standard": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 3000, - MaxWriteThroughput: 240, - MaxReadThroughput: 240, - }, - }, - "8-15": { - "pd-balanced": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 15000, - MaxWriteThroughput: 800, - MaxReadThroughput: 800, - }, - "pd-ssd": { - MaxWriteIOPS: 15000, - - MaxReadIOPS: 15000, - MaxWriteThroughput: 800, - MaxReadThroughput: 800, - }, - "pd-standard": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 5000, - MaxWriteThroughput: 400, - MaxReadThroughput: 800, - }, - }, - "16-31": { - "pd-balanced": { - MaxWriteIOPS: 20000, - MaxReadIOPS: 20000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-ssd": { - MaxWriteIOPS: 25000, - MaxReadIOPS: 25000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-standard": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 7500, - MaxWriteThroughput: 400, - MaxReadThroughput: 1200, - }, - }, - "32-63": { - "pd-balanced": { - MaxWriteIOPS: 50000, - MaxReadIOPS: 50000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-ssd": { - MaxWriteIOPS: 60000, - MaxReadIOPS: 60000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-standard": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 7500, - MaxWriteThroughput: 400, - MaxReadThroughput: 1200, - }, - }, - "64-": { - - "pd-balanced": { - MaxWriteIOPS: 80000, - MaxReadIOPS: 80000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-ssd": { - MaxWriteIOPS: 100000, - MaxReadIOPS: 100000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-standard": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 7500, - MaxWriteThroughput: 400, - MaxReadThroughput: 1200, - }, - }, - }, - "n2d": { - "2-7": { - "pd-balanced": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 15000, - MaxWriteThroughput: 240, - MaxReadThroughput: 240, - }, - "pd-ssd": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 15000, - MaxWriteThroughput: 240, - MaxReadThroughput: 240, - }, - "pd-standard": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 3000, - MaxWriteThroughput: 240, - MaxReadThroughput: 240, - }, - }, - "8-15": { - "pd-balanced": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 15000, - MaxWriteThroughput: 800, - MaxReadThroughput: 800, - }, - "pd-ssd": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 15000, - MaxWriteThroughput: 800, - MaxReadThroughput: 800, - }, - - "pd-standard": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 5000, - MaxWriteThroughput: 400, - MaxReadThroughput: 800, - }, - }, - "16-31": { - "pd-balanced": { - MaxWriteIOPS: 20000, - MaxReadIOPS: 20000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-ssd": { - MaxWriteIOPS: 25000, - MaxReadIOPS: 25000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-standard": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 7500, - MaxWriteThroughput: 400, - MaxReadThroughput: 1200, - }, - }, - "32-63": { - "pd-balanced": { - MaxWriteIOPS: 50000, - MaxReadIOPS: 50000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-ssd": { - MaxWriteIOPS: 60000, - MaxReadIOPS: 60000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - - "pd-standard": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 7500, - MaxWriteThroughput: 400, - MaxReadThroughput: 1200, - }, - }, - }, - "t2d": { - "1-1": { - "pd-balanced": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 15000, - MaxWriteThroughput: 204, - MaxReadThroughput: 240, - }, - "pd-ssd": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 15000, - MaxWriteThroughput: 204, - MaxReadThroughput: 240, - }, - "pd-standard": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 3000, - MaxWriteThroughput: 204, - MaxReadThroughput: 240, - }, - }, - "2-7": { - "pd-balanced": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 15000, - MaxWriteThroughput: 240, - MaxReadThroughput: 240, - }, - "pd-ssd": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 15000, - MaxWriteThroughput: 240, - MaxReadThroughput: 240, - }, - "pd-standard": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 3000, - MaxWriteThroughput: 240, - MaxReadThroughput: 240, - }, - }, - "8-15": { - "pd-balanced": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 15000, - MaxWriteThroughput: 800, - MaxReadThroughput: 800, - }, - "pd-ssd": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 15000, - MaxWriteThroughput: 800, - MaxReadThroughput: 800, - }, - "pd-standard": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 5000, - MaxWriteThroughput: 400, - MaxReadThroughput: 800, - }, - }, - "16-31": { - "pd-balanced": { - MaxWriteIOPS: 20000, - - MaxReadIOPS: 20000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-ssd": { - MaxWriteIOPS: 25000, - MaxReadIOPS: 25000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-standard": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 7500, - - MaxWriteThroughput: 400, - MaxReadThroughput: 1200, - }, - }, - "32-60": { - "pd-balanced": { - MaxWriteIOPS: 50000, - MaxReadIOPS: 50000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-ssd": { - MaxWriteIOPS: 60000, - MaxReadIOPS: 60000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-standard": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 7500, - MaxWriteThroughput: 400, - MaxReadThroughput: 1200, - }, - }, - }, - "t2a": { - // pd-balance 1 20,000 20,000 204 240 - //2-7 20,000 20,000 240 240 - //8-15 25,000 25,000 800 800 - //16-31 25,000 25,000 1,200 1,200 - //32-47 60,000 60,000 1,200 1,200 - //48 80,000 80,000 1,800 1,800 - // pd-ssd 1 20,000 20,000 204 240 - //2-7 20,000 20,000 240 240 - //8-15 25,000 25,000 800 800 - //16-31 25,000 25,000 1,200 1,200 - //32-47 60,000 60,000 1,200 1,200 - //48 80,000 80,000 1,800 1,800 - // pd-standard 1 15,000 1,000 204 240 - //2-3 15,000 2,400 240 240 - //4-7 15,000 3,000 240 240 - //8-15 15,000 5,000 400 800 - //16 or more 15,000 7,500 400 1,200 - "1-1": { - "pd-balanced": { - MaxWriteIOPS: 20000, - MaxReadIOPS: 20000, - MaxWriteThroughput: 204, - MaxReadThroughput: 240, - }, - "pd-ssd": { - - MaxWriteIOPS: 20000, - MaxReadIOPS: 20000, - MaxWriteThroughput: 204, - - MaxReadThroughput: 240, - }, - "pd-standard": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 1000, - MaxWriteThroughput: 204, - MaxReadThroughput: 240, - }, - }, - "2-3": { - "pd-balanced": { - MaxWriteIOPS: 20000, - MaxReadIOPS: 20000, - MaxWriteThroughput: 240, - MaxReadThroughput: 240, - }, - "pd-ssd": { - MaxWriteIOPS: 20000, - - MaxReadIOPS: 20000, - MaxWriteThroughput: 240, - MaxReadThroughput: 240, - }, - "pd-standard": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 2400, - MaxWriteThroughput: 240, - MaxReadThroughput: 240, - }, - }, - "4-7": { - "pd-balanced": { - MaxWriteIOPS: 20000, - MaxReadIOPS: 20000, - MaxWriteThroughput: 240, - MaxReadThroughput: 240, - }, - "pd-ssd": { - MaxWriteIOPS: 20000, - MaxReadIOPS: 20000, - MaxWriteThroughput: 240, - MaxReadThroughput: 240, - }, - "pd-standard": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 3000, - MaxWriteThroughput: 240, - MaxReadThroughput: 240, - }, - }, - "8-15": { - "pd-balanced": { - MaxWriteIOPS: 25000, - - MaxReadIOPS: 25000, - MaxWriteThroughput: 800, - MaxReadThroughput: 800, - }, - "pd-ssd": { - MaxWriteIOPS: 25000, - MaxReadIOPS: 25000, - MaxWriteThroughput: 800, - MaxReadThroughput: 800, - }, - "pd-standard": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 5000, - MaxWriteThroughput: 400, - MaxReadThroughput: 800, - }, - }, - "16-31": { - - "pd-balanced": { - MaxWriteIOPS: 25000, - - MaxReadIOPS: 25000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - - "pd-ssd": { - MaxWriteIOPS: 25000, - MaxReadIOPS: 25000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-standard": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 7500, - MaxWriteThroughput: 400, - MaxReadThroughput: 1200, - }, - }, - "32-47": { - "pd-balanced": { - - MaxWriteIOPS: 60000, - MaxReadIOPS: 60000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-ssd": { - MaxWriteIOPS: 60000, - MaxReadIOPS: 60000, - MaxWriteThroughput: 1200, - MaxReadThroughput: 1200, - }, - "pd-standard": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 7500, - MaxWriteThroughput: 400, - MaxReadThroughput: 1200, - }, - }, - "48-48": { - "pd-balanced": { - MaxWriteIOPS: 80000, - MaxReadIOPS: 80000, - - MaxWriteThroughput: 1800, - MaxReadThroughput: 1800, - }, - "pd-ssd": { - MaxWriteIOPS: 80000, - MaxReadIOPS: 80000, - MaxWriteThroughput: 1800, - MaxReadThroughput: 1800, - }, - "pd-standard": { - MaxWriteIOPS: 15000, - MaxReadIOPS: 7500, - MaxWriteThroughput: 400, - MaxReadThroughput: 1200, - }, - }, - }, - } -) diff --git a/services/wastage/recommendation/kubernetes_objects.go b/services/wastage/recommendation/kubernetes_objects.go deleted file mode 100644 index b3cad4e64..000000000 --- a/services/wastage/recommendation/kubernetes_objects.go +++ /dev/null @@ -1,671 +0,0 @@ -package recommendation - -import ( - "context" - "fmt" - "github.com/aws/aws-sdk-go-v2/service/ec2/types" - "github.com/golang/protobuf/ptypes/wrappers" - "github.com/labstack/echo/v4" - "github.com/opengovern/opengovernance/services/wastage/api/entity" - "github.com/opengovern/plugin-gcp/plugin/proto/src/golang/gcp" - pb "github.com/opengovern/plugin-kubernetes-internal/plugin/proto/src/golang" - "go.uber.org/zap" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/types/known/wrapperspb" - "net/http" - "sort" - "strconv" - "strings" -) - -func (s *Service) KubernetesPodRecommendation( - pod pb.KubernetesPod, - metrics map[string]*pb.KubernetesContainerMetrics, - preferences map[string]*wrappers.StringValue, -) (*pb.KubernetesPodRightsizingRecommendation, error) { - var containersRightsizing []*pb.KubernetesContainerRightsizingRecommendation - - for _, container := range pod.Containers { - current := pb.RightsizingKubernetesContainer{ - Name: container.Name, - - MemoryRequest: container.MemoryRequest, - MemoryLimit: container.MemoryLimit, - - CpuRequest: container.CpuRequest, - CpuLimit: container.CpuLimit, - } - - if _, ok := metrics[container.Name]; !ok { - containersRightsizing = append(containersRightsizing, &pb.KubernetesContainerRightsizingRecommendation{ - Name: container.Name, - - Current: ¤t, - - Description: "", - }) - continue - } - - cpuMax := getMetricMax(metrics[container.Name].Cpu) - cpuTrimmedMean := getTrimmedMean(metrics[container.Name].Cpu, 0.1) - memoryMax := getMetricMax(metrics[container.Name].Memory) - memoryTrimmedMean := getTrimmedMean(metrics[container.Name].Memory, 0.1) - - if pod.Name == "contour-envoy-kl545" { - s.logger.Info("contour-envoy-kl545 usage1", zap.Any("cpuMax", cpuMax), zap.String("container", container.Name), - zap.Any("cpuTrimmedMean", cpuTrimmedMean), zap.Any("memoryMax", memoryMax), zap.Any("memoryTrimmedMean", memoryTrimmedMean)) - } - - recommended := pb.RightsizingKubernetesContainer{ - Name: container.Name, - - MemoryRequest: memoryTrimmedMean, - MemoryLimit: memoryMax, - - CpuRequest: cpuTrimmedMean, - CpuLimit: cpuMax, - } - - if pod.Name == "contour-envoy-kl545" { - s.logger.Info("contour-envoy-kl545 recommended1", zap.String("container", container.Name), - zap.Any("CpuLimit", recommended.CpuLimit), zap.Any("CpuRequest", recommended.CpuRequest), zap.Any("MemoryLimit", recommended.MemoryLimit), zap.Any("MemoryRequest", recommended.MemoryRequest)) - } - - if v, ok := preferences["CPURequestBreathingRoom"]; ok && v != nil { - vPercent, err := strconv.ParseInt(v.Value, 10, 64) - if err != nil { - s.logger.Error("invalid CPURequestBreathingRoom value", zap.String("value", v.Value)) - return nil, echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("invalid CPURequestBreathingRoom value: %s", v.Value)) - } - recommended.CpuRequest = calculateHeadroom(recommended.CpuRequest, vPercent) - if recommended.CpuRequest < 0.1 { - recommended.CpuRequest = 0.1 - } - } - if v, ok := preferences["CPULimitBreathingRoom"]; ok && v != nil { - vPercent, err := strconv.ParseInt(v.Value, 10, 64) - if err != nil { - s.logger.Error("invalid CPULimitBreathingRoom value", zap.String("value", v.Value)) - return nil, echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("invalid CpuLimitBreathingRoom value: %s", v.Value)) - } - recommended.CpuLimit = calculateHeadroom(recommended.CpuLimit, vPercent) - if recommended.CpuLimit < 0.1 { - recommended.CpuLimit = 0.1 - } - } - - if v, ok := preferences["MemoryRequestBreathingRoom"]; ok && v != nil { - vPercent, err := strconv.ParseInt(v.Value, 10, 64) - if err != nil { - s.logger.Error("invalid MemoryRequestBreathingRoom value", zap.String("value", v.Value)) - return nil, echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("invalid MemoryRequestBreathingRoom value: %s", v.Value)) - } - recommended.MemoryRequest = calculateHeadroom(recommended.MemoryRequest, vPercent) - if recommended.MemoryRequest == 0 { - recommended.MemoryRequest = 100 * (1024 * 1024) - } - } - if v, ok := preferences["MemoryLimitBreathingRoom"]; ok && v != nil { - vPercent, err := strconv.ParseInt(v.Value, 10, 64) - if err != nil { - s.logger.Error("invalid MemoryLimitBreathingRoom value", zap.String("value", v.Value)) - return nil, echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("invalid MemoryLimitBreathingRoom value: %s", v.Value)) - } - recommended.MemoryLimit = calculateHeadroom(recommended.MemoryLimit, vPercent) - if recommended.MemoryLimit == 0 { - recommended.MemoryLimit = 100 * (1024 * 1024) - } - } - if v, ok := preferences["MinCpuRequest"]; ok && v != nil { - minCpuRequest, err := strconv.ParseFloat(v.Value, 64) - if err != nil { - s.logger.Error("invalid MinCpuRequest value", zap.String("value", v.Value)) - return nil, echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("invalid MinCpuRequest value: %s", v.Value)) - } - if recommended.CpuRequest < minCpuRequest { - recommended.CpuRequest = minCpuRequest - } - } - if v, ok := preferences["MinMemoryRequest"]; ok && v != nil { - minMemoryRequest, err := strconv.ParseFloat(v.Value, 64) - if err != nil { - s.logger.Error("invalid MinMemoryRequest value", zap.String("value", v.Value)) - return nil, echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("invalid MinMemoryRequest value: %s", v.Value)) - } - if recommended.MemoryRequest < minMemoryRequest { - recommended.MemoryRequest = minMemoryRequest * (1024 * 1024) - } - } - if v, ok := preferences["LeaveCPULimitEmpty"]; ok && v != nil { - leaveCPULimitEmpty, err := strconv.ParseBool(v.Value) - if err != nil { - s.logger.Error("invalid LeaveCPULimitEmpty value", zap.String("value", v.Value)) - return nil, echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("invalid LeaveCPULimitEmpty value: %s", v.Value)) - } - - if leaveCPULimitEmpty { - recommended.CpuRequest = recommended.CpuLimit - recommended.CpuLimit = current.CpuLimit - } - } - if v, ok := preferences["EqualMemoryRequestLimit"]; ok && v != nil { - equalMemoryRequestLimit, err := strconv.ParseBool(v.Value) - if err != nil { - s.logger.Error("invalid EqualMemoryRequestLimit value", zap.String("value", v.Value)) - return nil, echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("invalid EqualMemoryRequestLimit value: %s", v.Value)) - } - if equalMemoryRequestLimit { - recommended.MemoryRequest = recommended.MemoryLimit - } - } - - if pod.Name == "contour-envoy-kl545" { - s.logger.Info("contour-envoy-kl545 recommended2", zap.String("container", container.Name), - zap.Any("CpuLimit", recommended.CpuLimit), zap.Any("CpuRequest", recommended.CpuRequest), zap.Any("MemoryLimit", recommended.MemoryLimit), zap.Any("MemoryRequest", recommended.MemoryRequest)) - } - - var usageMemoryTrimmedMean, usageMemoryMax, usageCpuTrimmedMean, usageCpuMax *wrappers.DoubleValue - if len(metrics[container.Name].Cpu) > 0 { - usageCpuTrimmedMean = wrapperspb.Double(cpuTrimmedMean) - usageCpuMax = wrapperspb.Double(cpuMax) - } - if len(metrics[container.Name].Memory) > 0 { - usageMemoryTrimmedMean = wrapperspb.Double(memoryTrimmedMean) - usageMemoryMax = wrapperspb.Double(memoryMax) - } - - containersRightsizing = append(containersRightsizing, &pb.KubernetesContainerRightsizingRecommendation{ - Name: container.Name, - - Current: ¤t, - Recommended: &recommended, - - MemoryTrimmedMean: usageMemoryTrimmedMean, - MemoryMax: usageMemoryMax, - CpuTrimmedMean: usageCpuTrimmedMean, - CpuMax: usageCpuMax, - - Description: "", - }) - } - - return &pb.KubernetesPodRightsizingRecommendation{ - Name: pod.Name, - - ContainerResizing: containersRightsizing, - }, nil -} - -func (s *Service) KubernetesDeploymentRecommendation( - deployment pb.KubernetesDeployment, - metrics map[string]*pb.KubernetesPodMetrics, - preferences map[string]*wrappers.StringValue, -) (*pb.KubernetesDeploymentRightsizingRecommendation, error) { - result := pb.KubernetesDeploymentRightsizingRecommendation{ - Name: deployment.Name, - ContainerResizing: nil, - PodContainerResizing: make(map[string]*pb.KubernetesPodRightsizingRecommendation), - } - - overallMetrics := make(map[string]*pb.KubernetesContainerMetrics) - for podName, podMetrics := range metrics { - for containerName, containerMetrics := range podMetrics.Metrics { - containerMetrics := containerMetrics - overallMetrics[containerName] = mergeContainerMetrics(overallMetrics[containerName], containerMetrics, func(aa, bb float64) float64 { - return max(aa, bb) - }) - } - - podContainerResizing, err := s.KubernetesPodRecommendation(pb.KubernetesPod{ - Id: podName, - Name: podName, - Containers: deployment.Containers, - }, podMetrics.Metrics, preferences) - if err != nil { - s.logger.Error("failed to get kubernetes pod recommendation", zap.Error(err)) - return nil, err - } - result.PodContainerResizing[podName] = podContainerResizing - } - - containerResizings, err := s.KubernetesPodRecommendation(pb.KubernetesPod{ - Id: deployment.Name, - Name: deployment.Name, - Containers: deployment.Containers, - }, overallMetrics, preferences) - if err != nil { - s.logger.Error("failed to get kubernetes pod recommendation", zap.Error(err)) - return nil, err - } - result.ContainerResizing = containerResizings.ContainerResizing - for _, containerResizing := range result.ContainerResizing { - containerResizing := containerResizing - for podName, podContainerResizings := range result.PodContainerResizing { - podContainerResizings := podContainerResizings - for i, podContainerResizing := range podContainerResizings.ContainerResizing { - podContainerResizing := podContainerResizing - if podContainerResizing == nil || podContainerResizing.Name != containerResizing.Name { - continue - } - podContainerResizing.Current = containerResizing.Current - podContainerResizing.Recommended = containerResizing.Recommended - podContainerResizing.Description = containerResizing.Description - podContainerResizings.ContainerResizing[i] = podContainerResizing - } - result.PodContainerResizing[podName] = podContainerResizings - } - } - - return &result, nil -} - -func (s *Service) KubernetesStatefulsetRecommendation( - statefulset pb.KubernetesStatefulset, - metrics map[string]*pb.KubernetesPodMetrics, - preferences map[string]*wrappers.StringValue, -) (*pb.KubernetesStatefulsetRightsizingRecommendation, error) { - result := pb.KubernetesStatefulsetRightsizingRecommendation{ - Name: statefulset.Name, - ContainerResizing: nil, - PodContainerResizing: make(map[string]*pb.KubernetesPodRightsizingRecommendation), - } - - overallMetrics := make(map[string]*pb.KubernetesContainerMetrics) - for podName, podMetrics := range metrics { - for containerName, containerMetrics := range podMetrics.Metrics { - containerMetrics := containerMetrics - overallMetrics[containerName] = mergeContainerMetrics(overallMetrics[containerName], containerMetrics, func(aa, bb float64) float64 { - return max(aa, bb) - }) - } - - podContainerResizing, err := s.KubernetesPodRecommendation(pb.KubernetesPod{ - Id: podName, - Name: podName, - Containers: statefulset.Containers, - }, podMetrics.Metrics, preferences) - if err != nil { - s.logger.Error("failed to get kubernetes pod recommendation", zap.Error(err)) - return nil, err - } - result.PodContainerResizing[podName] = podContainerResizing - } - - containerResizings, err := s.KubernetesPodRecommendation(pb.KubernetesPod{ - Id: statefulset.Name, - Name: statefulset.Name, - Containers: statefulset.Containers, - }, overallMetrics, preferences) - if err != nil { - s.logger.Error("failed to get kubernetes pod recommendation", zap.Error(err)) - return nil, err - } - result.ContainerResizing = containerResizings.ContainerResizing - for _, containerResizing := range result.ContainerResizing { - containerResizing := containerResizing - for podName, podContainerResizings := range result.PodContainerResizing { - podContainerResizings := podContainerResizings - for i, podContainerResizing := range podContainerResizings.ContainerResizing { - podContainerResizing := podContainerResizing - if podContainerResizing == nil || podContainerResizing.Name != containerResizing.Name { - continue - } - podContainerResizing.Current = containerResizing.Current - podContainerResizing.Recommended = containerResizing.Recommended - podContainerResizing.Description = containerResizing.Description - podContainerResizings.ContainerResizing[i] = podContainerResizing - } - result.PodContainerResizing[podName] = podContainerResizings - } - } - - return &result, nil -} - -func (s *Service) KubernetesDaemonsetRecommendation( - daemonset pb.KubernetesDaemonset, - metrics map[string]*pb.KubernetesPodMetrics, - preferences map[string]*wrappers.StringValue, -) (*pb.KubernetesDaemonsetRightsizingRecommendation, error) { - result := pb.KubernetesDaemonsetRightsizingRecommendation{ - Name: daemonset.Name, - ContainerResizing: nil, - PodContainerResizing: make(map[string]*pb.KubernetesPodRightsizingRecommendation), - } - - overallMetrics := make(map[string]*pb.KubernetesContainerMetrics) - for podName, podMetrics := range metrics { - for containerName, containerMetrics := range podMetrics.Metrics { - containerMetrics := containerMetrics - overallMetrics[containerName] = mergeContainerMetrics(overallMetrics[containerName], containerMetrics, func(aa, bb float64) float64 { - return max(aa, bb) - }) - } - - podContainerResizing, err := s.KubernetesPodRecommendation(pb.KubernetesPod{ - Id: podName, - Name: podName, - Containers: daemonset.Containers, - }, podMetrics.Metrics, preferences) - if err != nil { - s.logger.Error("failed to get kubernetes pod recommendation", zap.Error(err)) - return nil, err - } - result.PodContainerResizing[podName] = podContainerResizing - } - - containerResizings, err := s.KubernetesPodRecommendation(pb.KubernetesPod{ - Id: daemonset.Name, - Name: daemonset.Name, - Containers: daemonset.Containers, - }, overallMetrics, preferences) - if err != nil { - s.logger.Error("failed to get kubernetes pod recommendation", zap.Error(err)) - return nil, err - } - result.ContainerResizing = containerResizings.ContainerResizing - for _, containerResizing := range result.ContainerResizing { - containerResizing := containerResizing - for podName, podContainerResizings := range result.PodContainerResizing { - podContainerResizings := podContainerResizings - for i, podContainerResizing := range podContainerResizings.ContainerResizing { - podContainerResizing := podContainerResizing - if podContainerResizing == nil || podContainerResizing.Name != containerResizing.Name { - continue - } - podContainerResizing.Current = containerResizing.Current - podContainerResizing.Recommended = containerResizing.Recommended - podContainerResizing.Description = containerResizing.Description - podContainerResizings.ContainerResizing[i] = podContainerResizing - } - result.PodContainerResizing[podName] = podContainerResizings - } - } - - return &result, nil -} - -func (s *Service) KubernetesJobRecommendation( - job pb.KubernetesJob, - metrics map[string]*pb.KubernetesPodMetrics, - preferences map[string]*wrappers.StringValue, -) (*pb.KubernetesJobRightsizingRecommendation, error) { - result := pb.KubernetesJobRightsizingRecommendation{ - Name: job.Name, - ContainerResizing: nil, - PodContainerResizing: make(map[string]*pb.KubernetesPodRightsizingRecommendation), - } - - overallMetrics := make(map[string]*pb.KubernetesContainerMetrics) - for podName, podMetrics := range metrics { - for containerName, containerMetrics := range podMetrics.Metrics { - containerMetrics := containerMetrics - overallMetrics[containerName] = mergeContainerMetrics(overallMetrics[containerName], containerMetrics, func(aa, bb float64) float64 { - return max(aa, bb) - }) - } - - podContainerResizing, err := s.KubernetesPodRecommendation(pb.KubernetesPod{ - Id: podName, - Name: podName, - Containers: job.Containers, - }, podMetrics.Metrics, preferences) - if err != nil { - s.logger.Error("failed to get kubernetes pod recommendation", zap.Error(err)) - return nil, err - } - result.PodContainerResizing[podName] = podContainerResizing - } - - containerResizings, err := s.KubernetesPodRecommendation(pb.KubernetesPod{ - Id: job.Name, - Name: job.Name, - Containers: job.Containers, - }, overallMetrics, preferences) - if err != nil { - s.logger.Error("failed to get kubernetes pod recommendation", zap.Error(err)) - return nil, err - } - result.ContainerResizing = containerResizings.ContainerResizing - for _, containerResizing := range result.ContainerResizing { - containerResizing := containerResizing - for podName, podContainerResizings := range result.PodContainerResizing { - podContainerResizings := podContainerResizings - for i, podContainerResizing := range podContainerResizings.ContainerResizing { - podContainerResizing := podContainerResizing - if podContainerResizing == nil || podContainerResizing.Name != containerResizing.Name { - continue - } - podContainerResizing.Current = containerResizing.Current - podContainerResizing.Recommended = containerResizing.Recommended - podContainerResizing.Description = containerResizing.Description - podContainerResizings.ContainerResizing[i] = podContainerResizing - } - result.PodContainerResizing[podName] = podContainerResizings - } - } - - return &result, nil -} - -func getValueFromLabelList(labels map[string]string, keys []string) (string, bool) { - for _, key := range keys { - if v, ok := labels[key]; ok { - return v, true - } - } - return "", false -} - -func (s *Service) calculateEksNodeCost(ctx context.Context, node pb.KubernetesNode) (float64, error) { - instanceType, ok := getValueFromLabelList(node.Labels, []string{"node.kubernetes.io/instance-type", "beta.kubernetes.io/instance-type"}) - if !ok { - return 0, status.Errorf(codes.InvalidArgument, "Cannot determine the instance type for the node") - } - instanceRegion, ok := getValueFromLabelList(node.Labels, []string{"topology.kubernetes.io/region", "failure-domain.beta.kubernetes.io/region"}) - if !ok { - return 0, status.Errorf(codes.InvalidArgument, "Cannot determine the region for the node") - } - instanceAvailabilityZone, ok := getValueFromLabelList(node.Labels, []string{"topology.kubernetes.io/zone", "failure-domain.beta.kubernetes.io/zone"}) - if !ok { - return 0, status.Errorf(codes.InvalidArgument, "Cannot determine the availability zone for the node") - } - instanceOs, ok := getValueFromLabelList(node.Labels, []string{"kubernetes.io/os", "beta.kubernetes.io/os"}) - if !ok { - return 0, status.Errorf(codes.InvalidArgument, "Cannot determine the operating system for the node") - } - capacityType, ok := node.Labels["eks.amazonaws.com/capacityType"] - if !ok { - capacityType = "ON_DEMAND" // or throw an error? - } - - instance := entity.EC2Instance{ - HashedInstanceId: node.Id, - State: types.InstanceStateNameRunning, - InstanceType: types.InstanceType(instanceType), - Platform: "", - UsageOperation: "", - InstanceLifecycle: types.InstanceLifecycleTypeScheduled, - Placement: &entity.EC2Placement{ - Tenancy: "default", - AvailabilityZone: instanceAvailabilityZone, - }, - } - if capacityType == "SPOT" { - instance.InstanceLifecycle = types.InstanceLifecycleTypeSpot - } - switch instanceOs { - case "linux": - instance.Platform = "Linux/UNIX" - instance.UsageOperation = "RunInstances" - case "windows": - instance.Platform = "Windows" - instance.UsageOperation = "RunInstances:0002" - default: - return 0, status.Errorf(codes.InvalidArgument, "Unsupported operating system for the node: %s", instanceOs) - } - - cost, _, err := s.costSvc.GetEC2InstanceCost(ctx, instanceRegion, instance, nil, nil) - if err != nil { - s.logger.Error("failed to get ec2 instance cost", zap.Error(err)) - return 0, err - } - - return cost, nil -} - -func (s *Service) calculateGKENodeCost(ctx context.Context, node pb.KubernetesNode) (float64, error) { - instanceType, ok := getValueFromLabelList(node.Labels, []string{"node.kubernetes.io/instance-type", "beta.kubernetes.io/instance-type"}) - if !ok { - return 0, status.Errorf(codes.InvalidArgument, "Cannot determine the instance type for the node") - } - //instanceRegion, ok := getValueFromLabelList(node.Labels, []string{"topology.kubernetes.io/region", "failure-domain.beta.kubernetes.io/region"}) - //if !ok { - // return 0, status.Errorf(codes.InvalidArgument, "Cannot determine the region for the node") - //} - instanceZone, ok := getValueFromLabelList(node.Labels, []string{"topology.kubernetes.io/zone", "failure-domain.beta.kubernetes.io/zone", "topology.gke.io/zone"}) - if !ok { - return 0, status.Errorf(codes.InvalidArgument, "Cannot determine the availability zone for the node") - } - gkeProvisioning, ok := node.Labels["cloud.google.com/gke-provisioning"] - if !ok { - gkeProvisioning = "standard" - } - - instance := gcp.GcpComputeInstance{ - Id: node.Id, - Zone: instanceZone, - MachineType: instanceType, - } - if gkeProvisioning == "spot" { - instance.Preemptible = true - } - - cost, _, err := s.costSvc.GetGCPComputeInstanceCost(ctx, instance) - if err != nil { - s.logger.Error("failed to get gcp compute instance cost", zap.Error(err)) - return 0, err - } - - return cost, nil -} - -func (s *Service) calculateAKSNodeCost(ctx context.Context, node pb.KubernetesNode) (float64, error) { - instanceType, ok := getValueFromLabelList(node.Labels, []string{"node.kubernetes.io/instance-type", "beta.kubernetes.io/instance-type"}) - if !ok { - return 0, status.Errorf(codes.InvalidArgument, "Cannot determine the instance type for the node") - } - instanceRegion, ok := getValueFromLabelList(node.Labels, []string{"topology.kubernetes.io/region", "failure-domain.beta.kubernetes.io/region"}) - if !ok { - return 0, status.Errorf(codes.InvalidArgument, "Cannot determine the region for the node") - } - instanceZone, ok := getValueFromLabelList(node.Labels, []string{"topology.kubernetes.io/zone", "failure-domain.beta.kubernetes.io/zone"}) - if !ok { - return 0, status.Errorf(codes.InvalidArgument, "Cannot determine the availability zone for the node") - } - - instance := entity.AzureVM{ - Id: node.Id, - Zone: instanceZone, - Region: instanceRegion, - InstanceType: instanceType, - } - - cost, err := s.costSvc.GetAzureComputeVMCost(ctx, instance) - if err != nil { - s.logger.Error("failed to get gcp compute instance cost", zap.Error(err)) - return 0, err - } - - return cost, nil -} - -func (s *Service) KubernetesNodeCost(ctx context.Context, node pb.KubernetesNode) (float64, error) { - for labelKey, _ := range node.Labels { - labelKey := strings.ToLower(labelKey) - switch { - case strings.HasPrefix(labelKey, "eks.amazonaws.com/"): - return s.calculateEksNodeCost(ctx, node) - case strings.HasPrefix(labelKey, "kubernetes.azure.com/"): - return s.calculateAKSNodeCost(ctx, node) - case strings.HasPrefix(labelKey, "cloud.google.com/"): - return s.calculateGKENodeCost(ctx, node) - } - } - return 0, status.Errorf(codes.InvalidArgument, "Cannot determine the cloud provider for the node") -} - -func mergeContainerMetrics(a *pb.KubernetesContainerMetrics, b *pb.KubernetesContainerMetrics, mergeF func(aa, bb float64) float64) *pb.KubernetesContainerMetrics { - if a == nil { - return b - } - if b == nil { - return a - } - - result := &pb.KubernetesContainerMetrics{ - Cpu: make(map[string]float64), - Memory: make(map[string]float64), - } - - for k, v := range a.Cpu { - result.Cpu[k] = v - } - for k, v := range b.Cpu { - if _, ok := result.Cpu[k]; ok { - result.Cpu[k] = mergeF(result.Cpu[k], v) - } else { - result.Cpu[k] = v - } - } - - for k, v := range a.Memory { - result.Memory[k] = v - } - for k, v := range b.Memory { - if _, ok := result.Memory[k]; ok { - result.Memory[k] = mergeF(result.Memory[k], v) - } else { - result.Memory[k] = v - } - } - - return result -} - -func getMetricMax(data map[string]float64) float64 { - if len(data) == 0 { - return 0 - } - dMax := float64(0) - for _, v := range data { - if v > dMax { - dMax = v - } - } - return dMax -} - -func getTrimmedMean(data map[string]float64, trimPercentage float64) float64 { - if len(data) == 0 { - return 0 - } - - values := make([]float64, 0, len(data)) - for _, v := range data { - values = append(values, v) - } - - sort.Float64s(values) - - numToTrim := int(trimPercentage * float64(len(data)) / 2) - trimmedValues := values[numToTrim : len(values)-numToTrim] - - var sum float64 - for _, v := range trimmedValues { - sum += v - } - return float64(sum) / float64(len(trimmedValues)) -} diff --git a/services/wastage/recommendation/kubernetes_objects_test.go b/services/wastage/recommendation/kubernetes_objects_test.go deleted file mode 100644 index fbdb4e9b5..000000000 --- a/services/wastage/recommendation/kubernetes_objects_test.go +++ /dev/null @@ -1,72 +0,0 @@ -package recommendation - -import ( - "context" - "fmt" - "github.com/opengovern/opengovernance/services/wastage/cost" - pb "github.com/opengovern/plugin-kubernetes-internal/plugin/proto/src/golang" - "github.com/stretchr/testify/assert" - "go.uber.org/zap" - "testing" -) - -func TestKubernetesNodeCost(t *testing.T) { - logger, _ := zap.NewDevelopment() - s := &Service{ - costSvc: cost.New("http://localhost:8080"), - logger: logger, - } - f, err := s.KubernetesNodeCost(context.Background(), pb.KubernetesNode{ - Id: "aks-main0pool-42603833-vmss000000", - Name: "aks-main0pool-42603833-vmss000000", - Annotations: map[string]string{ - "csi.volume.kubernetes.io/nodeid": `{"disk.csi.azure.com":"aks-main0pool-42603833-vmss000000","file.csi.azure.com":"aks-main0pool-42603833-vmss000000"}`, - "node.alpha.kubernetes.io/ttl": "0", - "volumes.kubernetes.io/controller-managed-attach-detach": "true", - }, - Labels: map[string]string{ - "agentpool": "main0pool", - "beta.kubernetes.io/arch": "amd64", - "beta.kubernetes.io/instance-type": "Standard_D8s_v5", - "beta.kubernetes.io/os": "linux", - "failure-domain.beta.kubernetes.io/region": "eastus2", - "failure-domain.beta.kubernetes.io/zone": "eastus2-2", - "kubernetes.azure.com/agentpool": "main0pool", - "kubernetes.azure.com/cluster": "MC_opengovernance-development_opengovernance-aks_eastus2", - "kubernetes.azure.com/consolidated-additional-properties": "2b9f482d-14b1-11ef-aa6c-e6cb19606ff6", - "kubernetes.azure.com/kubelet-identity-client-id": "044e3f42-e1bd-430a-92bb-6820098dbe50", - "kubernetes.azure.com/mode": "system", - "kubernetes.azure.com/network-policy": "azure", - "kubernetes.azure.com/node-image-version": "AKSUbuntu-2204gen2containerd-202405.03.0", - "kubernetes.azure.com/nodepool-type": "VirtualMachineScaleSets", - "kubernetes.azure.com/os-sku": "Ubuntu", - "kubernetes.azure.com/role": "agent", - "kubernetes.azure.com/storageprofile": "managed", - "kubernetes.azure.com/storagetier": "Premium_LRS", - "kubernetes.io/arch": "amd64", - "kubernetes.io/hostname": "aks-main0pool-42603833-vmss000000", - "kubernetes.io/os": "linux", - "kubernetes.io/role": "agent", - "node-role.kubernetes.io/agent": "", - "node.kubernetes.io/instance-type": "Standard_D8s_v5", - "storageprofile": "managed", - "storagetier": "Premium_LRS", - "topology.disk.csi.azure.com/zone": "eastus2-2", - "topology.kubernetes.io/region": "eastus2", - "topology.kubernetes.io/zone": "eastus2-2", - }, - Capacity: map[string]int64{ - "cpu": 8, - "ephemeral-storage": 50620216000, - "hugepages-1Gi": 0, - "hugepages-2Mi": 0, - "memory": 32863040000, - "pods": 100, - }, - NodeSystemInfo: nil, - Taints: nil, - MaxPodCount: 0, - }) - assert.NoError(t, err) - fmt.Println(f) -} diff --git a/services/wastage/recommendation/preferences/aws_rds/preferences.go b/services/wastage/recommendation/preferences/aws_rds/preferences.go deleted file mode 100644 index ff4d055eb..000000000 --- a/services/wastage/recommendation/preferences/aws_rds/preferences.go +++ /dev/null @@ -1,25 +0,0 @@ -package aws_rds - -var ( - PreferenceInstanceDBKey = map[string]string{ - "Region": "region_code", - "vCPU": "v_cpu", - "MemoryGB": "memory_gb", - "InstanceType": "instance_type", - "Engine": "database_engine", - "ClusterType": "deployment_option", - "InstanceFamily": "instance_family", - "LicenseModel": "license_model", - } - - PreferenceInstanceSpecialCond = map[string]string{ - "vCPU": ">=", - "MemoryGB": ">=", - } - - PreferenceStorageDBKey = map[string]string{ - "StorageType": "volume_type", - } - - PreferenceStorageSpecialCond = map[string]string{} -) diff --git a/services/wastage/recommendation/preferences/ec2instance/preferences.go b/services/wastage/recommendation/preferences/ec2instance/preferences.go deleted file mode 100644 index f7eb2e7e2..000000000 --- a/services/wastage/recommendation/preferences/ec2instance/preferences.go +++ /dev/null @@ -1,53 +0,0 @@ -package ec2instance - -var ( - PreferenceDBKey = map[string]string{ - "Tenancy": "tenancy", - "EBSOptimized": "ebs_optimized", - "OperatingSystem": "operating_system_family", - "LicenseModel": "license_model", - "Region": "region_code", - "Hypervisor": "", - "CurrentGeneration": "current_generation", - "PhysicalProcessor": "physical_processor", - "ClockSpeed": "clock_speed", - "ProcessorArchitecture": "physical_processor_arch", - "SupportedArchitectures": "", - "ENASupport": "enhanced_networking_supported", - "EncryptionInTransitSupported": "", - "SupportedRootDeviceTypes": "", - "Cores": "", - "Threads": "", - "vCPU": "v_cpu", - "MemoryGB": "memory_gb", - "InstanceFamily": "instance_family", - "UsageOperation": "operation", - } - - PreferenceSpecialCond = map[string]string{ - "vCPU": ">=", - "MemoryGB": ">=", - } - - UsageOperationHumanToMachine = map[string]string{ - "Linux/UNIX": "RunInstances", - "Red Hat BYOL Linux": "RunInstances:00g0", - "Red Hat Enterprise Linux": "RunInstances:0010", - "Red Hat Enterprise Linux with HA": "RunInstances:1010", - "Red Hat Enterprise Linux with SQL Server Standard and HA": "RunInstances:1014", - "Red Hat Enterprise Linux with SQL Server Enterprise and HA": "RunInstances:1110", - "Red Hat Enterprise Linux with SQL Server Standard": "RunInstances:0014", - "Red Hat Enterprise Linux with SQL Server Web": "RunInstances:0210", - "Red Hat Enterprise Linux with SQL Server Enterprise": "RunInstances:0110", - "SQL Server Enterprise": "RunInstances:0100", - "SQL Server Standard": "RunInstances:0004", - "SQL Server Web": "RunInstances:0200", - "SUSE Linux": "RunInstances:000g", - "Ubuntu Pro": "RunInstances:0g00", - "Windows": "RunInstances:0002", - "Windows BYOL": "RunInstances:0800", - "Windows with SQL Server Enterprise": "RunInstances:0102", - "Windows with SQL Server Standard": "RunInstances:0006", - "Windows with SQL Server Web": "RunInstances:0202", - } -) diff --git a/services/wastage/recommendation/preferences/gcp_compute/preferences.go b/services/wastage/recommendation/preferences/gcp_compute/preferences.go deleted file mode 100644 index 1ebdc7fc8..000000000 --- a/services/wastage/recommendation/preferences/gcp_compute/preferences.go +++ /dev/null @@ -1,20 +0,0 @@ -package gcp_compute - -var ( - PreferenceInstanceKey = map[string]string{ - "Region": "region", - "vCPU": "guest_cpus", - "MemoryGB": "memory_mb", - "MachineFamily": "machine_family", - } - - PreferenceInstanceSpecialCond = map[string]string{ - "vCPU": ">=", - "MemoryGB": ">=", - } - - PreferenceDiskKey = map[string]string{ - "Region": "region", - "DiskType": "storage_type", - } -) diff --git a/services/wastage/recommendation/service.go b/services/wastage/recommendation/service.go deleted file mode 100644 index 888e7ba35..000000000 --- a/services/wastage/recommendation/service.go +++ /dev/null @@ -1,37 +0,0 @@ -package recommendation - -import ( - "github.com/opengovern/opengovernance/services/wastage/cost" - "github.com/opengovern/opengovernance/services/wastage/db/repo" - "github.com/sashabaranov/go-openai" - "go.uber.org/zap" -) - -type Service struct { - logger *zap.Logger - - ec2InstanceRepo repo.EC2InstanceTypeRepo - ebsVolumeRepo repo.EBSVolumeTypeRepo - awsRDSDBInstanceRepo repo.RDSDBInstanceRepo - awsRDSDBStorageRepo repo.RDSDBStorageRepo - gcpComputeMachineTypeRepo repo.GCPComputeMachineTypeRepo - gcpComputeDiskTypeRepo repo.GCPComputeDiskTypeRepo - gcpComputeSKURepo repo.GCPComputeSKURepo - openaiSvc *openai.Client - costSvc *cost.Service -} - -func New(logger *zap.Logger, ec2InstanceRepo repo.EC2InstanceTypeRepo, ebsVolumeRepo repo.EBSVolumeTypeRepo, awsRDSDBInstanceRepo repo.RDSDBInstanceRepo, awsRDSDBStorageRepo repo.RDSDBStorageRepo, gcpComputeMachineTypeRepo repo.GCPComputeMachineTypeRepo, gcpComputeDiskTypeRepo repo.GCPComputeDiskTypeRepo, gcpComputeSKURepo repo.GCPComputeSKURepo, token string, costSvc *cost.Service) *Service { - return &Service{ - logger: logger, - ec2InstanceRepo: ec2InstanceRepo, - ebsVolumeRepo: ebsVolumeRepo, - awsRDSDBInstanceRepo: awsRDSDBInstanceRepo, - awsRDSDBStorageRepo: awsRDSDBStorageRepo, - gcpComputeMachineTypeRepo: gcpComputeMachineTypeRepo, - gcpComputeDiskTypeRepo: gcpComputeDiskTypeRepo, - gcpComputeSKURepo: gcpComputeSKURepo, - openaiSvc: openai.NewClient(token), - costSvc: costSvc, - } -} diff --git a/services/wastage/recommendation/service_test.go b/services/wastage/recommendation/service_test.go deleted file mode 100644 index 95de45f9c..000000000 --- a/services/wastage/recommendation/service_test.go +++ /dev/null @@ -1,171 +0,0 @@ -package recommendation - -import ( - "github.com/aws/aws-sdk-go-v2/aws" - types2 "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types" - "github.com/stretchr/testify/assert" - "testing" - "time" -) - -func TestMaxNull(t *testing.T) { - volumeMetrics := map[string]map[string][]types2.Datapoint{} - - n := time.Now() - volumeMetrics["vol1"] = map[string][]types2.Datapoint{ - "VolumeReadOps": []types2.Datapoint{ - { - Average: aws.Float64(0), - ExtendedStatistics: nil, - Maximum: nil, - Minimum: nil, - SampleCount: nil, - Sum: nil, - Timestamp: aws.Time(n), - Unit: "", - }, - { - Average: aws.Float64(0), - ExtendedStatistics: nil, - Maximum: nil, - Minimum: nil, - SampleCount: nil, - Sum: nil, - Timestamp: aws.Time(n.Add(1 * time.Hour)), - Unit: "", - }, - { - Average: aws.Float64(1), - ExtendedStatistics: nil, - Maximum: nil, - Minimum: nil, - SampleCount: nil, - Sum: nil, - Timestamp: aws.Time(n.Add(2 * time.Hour)), - Unit: "", - }, - }, - "VolumeWriteOps": []types2.Datapoint{ - { - Average: aws.Float64(2), - ExtendedStatistics: nil, - Maximum: nil, - Minimum: nil, - SampleCount: nil, - Sum: nil, - Timestamp: aws.Time(n), - Unit: "", - }, - { - Average: aws.Float64(2), - ExtendedStatistics: nil, - Maximum: nil, - Minimum: nil, - SampleCount: nil, - Sum: nil, - Timestamp: aws.Time(n.Add(1 * time.Hour)), - Unit: "", - }, - { - Average: aws.Float64(3), - ExtendedStatistics: nil, - Maximum: nil, - Minimum: nil, - SampleCount: nil, - Sum: nil, - Timestamp: aws.Time(n.Add(2 * time.Hour)), - Unit: "", - }, - }, - } - - var ebsIopsDatapoints []types2.Datapoint - for _, v := range volumeMetrics { - ebsIopsDatapoints = mergeDatapoints(sumMergeDatapoints(v["VolumeReadOps"], v["VolumeWriteOps"]), ebsIopsDatapoints) - } - - usage := extractUsage(ebsIopsDatapoints, UsageAverageTypeMax) - assert.Nil(t, usage.Max) - assert.Equal(t, 4.0, *usage.Avg) -} - -func TestMaxNotNull(t *testing.T) { - volumeMetrics := map[string]map[string][]types2.Datapoint{} - - n := time.Now() - volumeMetrics["vol1"] = map[string][]types2.Datapoint{ - "VolumeReadOps": []types2.Datapoint{ - { - Average: aws.Float64(0), - ExtendedStatistics: nil, - Maximum: aws.Float64(1), - Minimum: nil, - SampleCount: nil, - Sum: nil, - Timestamp: aws.Time(n), - Unit: "", - }, - { - Average: aws.Float64(0), - ExtendedStatistics: nil, - Maximum: nil, - Minimum: nil, - SampleCount: nil, - Sum: nil, - Timestamp: aws.Time(n.Add(1 * time.Hour)), - Unit: "", - }, - { - Average: aws.Float64(1), - ExtendedStatistics: nil, - Maximum: nil, - Minimum: nil, - SampleCount: nil, - Sum: nil, - Timestamp: aws.Time(n.Add(2 * time.Hour)), - Unit: "", - }, - }, - "VolumeWriteOps": []types2.Datapoint{ - { - Average: aws.Float64(2), - ExtendedStatistics: nil, - Maximum: nil, - Minimum: nil, - SampleCount: nil, - Sum: nil, - Timestamp: aws.Time(n), - Unit: "", - }, - { - Average: aws.Float64(2), - ExtendedStatistics: nil, - Maximum: nil, - Minimum: nil, - SampleCount: nil, - Sum: nil, - Timestamp: aws.Time(n.Add(1 * time.Hour)), - Unit: "", - }, - { - Average: aws.Float64(3), - ExtendedStatistics: nil, - Maximum: nil, - Minimum: nil, - SampleCount: nil, - Sum: nil, - Timestamp: aws.Time(n.Add(2 * time.Hour)), - Unit: "", - }, - }, - } - - var ebsIopsDatapoints []types2.Datapoint - for _, v := range volumeMetrics { - ebsIopsDatapoints = mergeDatapoints(sumMergeDatapoints(v["VolumeReadOps"], v["VolumeWriteOps"]), ebsIopsDatapoints) - } - - usage := extractUsage(ebsIopsDatapoints, UsageAverageTypeMax) - assert.Equal(t, 1.0, *usage.Max) - assert.Equal(t, 4.0, *usage.Avg) -} diff --git a/services/wastage/recommendation/utils.go b/services/wastage/recommendation/utils.go deleted file mode 100644 index 00e40c93c..000000000 --- a/services/wastage/recommendation/utils.go +++ /dev/null @@ -1,393 +0,0 @@ -package recommendation - -import ( - "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types" - "github.com/opengovern/opengovernance/services/wastage/api/entity" - aws "github.com/opengovern/plugin-aws/plugin/proto/src/golang" - gcp "github.com/opengovern/plugin-gcp/plugin/proto/src/golang/gcp" - "google.golang.org/protobuf/types/known/wrapperspb" - "math" - "sort" -) - -func funcP(a, b *float64, f func(aa, bb float64) float64) *float64 { - if a == nil && b == nil { - return nil - } else if a == nil { - return b - } else if b == nil { - return a - } else { - tmp := f(*a, *b) - return &tmp - } -} - -func funcPWrapper(a, b *wrapperspb.DoubleValue, f func(aa, bb float64) float64) *wrapperspb.DoubleValue { - if a == nil && b == nil { - return nil - } else if a == nil { - return b - } else if b == nil { - return a - } else { - tmp := f(a.GetValue(), b.GetValue()) - return wrapperspb.Double(tmp) - } -} - -func getValueOrZero[T float64 | float32 | int | int64 | int32](v *T) T { - if v == nil { - return T(0) - } - return *v -} - -func mergeDatapoints(in []types.Datapoint, out []types.Datapoint) []types.Datapoint { - avg := func(aa, bb float64) float64 { - return (aa + bb) / 2.0 - } - sum := func(aa, bb float64) float64 { - return aa + bb - } - - dps := map[int64]*types.Datapoint{} - for _, dp := range in { - dp := dp - dps[dp.Timestamp.Unix()] = &dp - } - for _, dp := range out { - dp := dp - if dps[dp.Timestamp.Unix()] == nil { - dps[dp.Timestamp.Unix()] = &dp - continue - } - - dps[dp.Timestamp.Unix()].Average = funcP(dps[dp.Timestamp.Unix()].Average, dp.Average, avg) - dps[dp.Timestamp.Unix()].Maximum = funcP(dps[dp.Timestamp.Unix()].Maximum, dp.Maximum, math.Max) - dps[dp.Timestamp.Unix()].Minimum = funcP(dps[dp.Timestamp.Unix()].Minimum, dp.Minimum, math.Min) - dps[dp.Timestamp.Unix()].SampleCount = funcP(dps[dp.Timestamp.Unix()].SampleCount, dp.SampleCount, sum) - dps[dp.Timestamp.Unix()].Sum = funcP(dps[dp.Timestamp.Unix()].Sum, dp.Sum, sum) - } - - var dpArr []types.Datapoint - for _, dp := range dps { - dpArr = append(dpArr, *dp) - } - sort.Slice(dpArr, func(i, j int) bool { - return dpArr[i].Timestamp.Unix() < dpArr[j].Timestamp.Unix() - }) - return dpArr -} - -func sumMergeDatapoints(in []types.Datapoint, out []types.Datapoint) []types.Datapoint { - sum := func(aa, bb float64) float64 { - return aa + bb - } - - dps := map[int64]*types.Datapoint{} - for _, dp := range in { - dp := dp - dps[dp.Timestamp.Unix()] = &dp - } - for _, dp := range out { - dp := dp - if dps[dp.Timestamp.Unix()] == nil { - dps[dp.Timestamp.Unix()] = &dp - continue - } - - dps[dp.Timestamp.Unix()].Average = funcP(dps[dp.Timestamp.Unix()].Average, dp.Average, sum) - dps[dp.Timestamp.Unix()].Maximum = funcP(dps[dp.Timestamp.Unix()].Maximum, dp.Maximum, sum) - dps[dp.Timestamp.Unix()].Minimum = funcP(dps[dp.Timestamp.Unix()].Minimum, dp.Minimum, sum) - dps[dp.Timestamp.Unix()].SampleCount = funcP(dps[dp.Timestamp.Unix()].SampleCount, dp.SampleCount, sum) - dps[dp.Timestamp.Unix()].Sum = funcP(dps[dp.Timestamp.Unix()].Sum, dp.Sum, sum) - } - - var dpArr []types.Datapoint - for _, dp := range dps { - dpArr = append(dpArr, *dp) - } - sort.Slice(dpArr, func(i, j int) bool { - return dpArr[i].Timestamp.Unix() < dpArr[j].Timestamp.Unix() - }) - return dpArr - -} - -func MergeDatapoints(in []types.Datapoint, out []types.Datapoint, mergeF func(aa, bb float64) float64) []types.Datapoint { - dps := map[int64]*types.Datapoint{} - for _, dp := range in { - dp := dp - dps[dp.Timestamp.Unix()] = &dp - } - for _, dp := range out { - dp := dp - if dps[dp.Timestamp.Unix()] == nil { - dps[dp.Timestamp.Unix()] = &dp - continue - } - - dps[dp.Timestamp.Unix()].Average = funcP(dps[dp.Timestamp.Unix()].Average, dp.Average, mergeF) - dps[dp.Timestamp.Unix()].Maximum = funcP(dps[dp.Timestamp.Unix()].Maximum, dp.Maximum, mergeF) - dps[dp.Timestamp.Unix()].Minimum = funcP(dps[dp.Timestamp.Unix()].Minimum, dp.Minimum, mergeF) - dps[dp.Timestamp.Unix()].SampleCount = funcP(dps[dp.Timestamp.Unix()].SampleCount, dp.SampleCount, mergeF) - dps[dp.Timestamp.Unix()].Sum = funcP(dps[dp.Timestamp.Unix()].Sum, dp.Sum, mergeF) - } - - var dpArr []types.Datapoint - for _, dp := range dps { - dpArr = append(dpArr, *dp) - } - sort.Slice(dpArr, func(i, j int) bool { - return dpArr[i].Timestamp.Unix() < dpArr[j].Timestamp.Unix() - }) - return dpArr - -} - -func MergeGrpcDatapoints(in []*aws.Datapoint, out []*aws.Datapoint, mergeF func(aa, bb float64) float64) []*aws.Datapoint { - dps := map[int64]*aws.Datapoint{} - for _, dp := range in { - dp := dp - dps[dp.Timestamp.AsTime().Unix()] = dp - } - for _, dp := range out { - dp := dp - if dps[dp.Timestamp.AsTime().Unix()] == nil { - dps[dp.Timestamp.AsTime().Unix()] = dp - continue - } - - dps[dp.Timestamp.AsTime().Unix()].Average = Float64ToWrapper(funcP(WrappedToFloat64(dps[dp.Timestamp.AsTime().Unix()].Average), WrappedToFloat64(dp.Average), mergeF)) - dps[dp.Timestamp.AsTime().Unix()].Maximum = Float64ToWrapper(funcP(WrappedToFloat64(dps[dp.Timestamp.AsTime().Unix()].Maximum), WrappedToFloat64(dp.Maximum), mergeF)) - dps[dp.Timestamp.AsTime().Unix()].Minimum = Float64ToWrapper(funcP(WrappedToFloat64(dps[dp.Timestamp.AsTime().Unix()].Minimum), WrappedToFloat64(dp.Minimum), mergeF)) - dps[dp.Timestamp.AsTime().Unix()].SampleCount = Float64ToWrapper(funcP(WrappedToFloat64(dps[dp.Timestamp.AsTime().Unix()].SampleCount), WrappedToFloat64(dp.SampleCount), mergeF)) - dps[dp.Timestamp.AsTime().Unix()].Sum = Float64ToWrapper(funcP(WrappedToFloat64(dps[dp.Timestamp.AsTime().Unix()].Sum), WrappedToFloat64(dp.Sum), mergeF)) - } - - var dpArr []*aws.Datapoint - for _, dp := range dps { - dpArr = append(dpArr, dp) - } - sort.Slice(dpArr, func(i, j int) bool { - return dpArr[i].Timestamp.AsTime().Unix() < dpArr[j].Timestamp.AsTime().Unix() - }) - return dpArr - -} - -func averageOfDatapoints(datapoints []types.Datapoint) *float64 { - if len(datapoints) == 0 { - return nil - } - - hasNonNil := false - avg := float64(0) - for _, dp := range datapoints { - dp := dp - if dp.Average == nil { - continue - } - hasNonNil = true - avg += *dp.Average - } - if !hasNonNil { - return nil - } - avg = avg / float64(len(datapoints)) - return &avg -} - -func maxOfAverageOfDatapoints(datapoints []types.Datapoint) *float64 { - if len(datapoints) == 0 { - return nil - } - - hasNonNil := false - maxOfAvgs := float64(0) - for _, dp := range datapoints { - dp := dp - if dp.Average == nil { - continue - } - hasNonNil = true - maxOfAvgs = max(maxOfAvgs, *dp.Average) - } - if !hasNonNil { - return nil - } - return &maxOfAvgs -} - -func minOfAverageOfDatapoints(datapoints []types.Datapoint) *float64 { - if len(datapoints) == 0 { - return nil - } - - hasNonNil := false - minOfAverages := float64(0) - for _, dp := range datapoints { - dp := dp - if dp.Average == nil { - continue - } - if !hasNonNil { - minOfAverages = *dp.Average - } - hasNonNil = true - minOfAverages = min(minOfAverages, *dp.Average) - } - if !hasNonNil { - return nil - } - return &minOfAverages -} - -func minOfDatapoints(datapoints []types.Datapoint) *float64 { - if len(datapoints) == 0 { - return nil - } - - hasNonNil := false - minV := math.MaxFloat64 - for _, dp := range datapoints { - dp := dp - if dp.Minimum == nil { - continue - } - hasNonNil = true - minV = min(minV, *dp.Minimum) - } - if !hasNonNil { - return nil - } - return &minV -} - -func maxOfDatapoints(datapoints []types.Datapoint) *float64 { - if len(datapoints) == 0 { - return nil - } - - hasNonNil := false - maxV := 0.0 - for _, dp := range datapoints { - dp := dp - if dp.Maximum == nil { - continue - } - hasNonNil = true - maxV = max(maxV, *dp.Maximum) - } - if !hasNonNil { - return nil - } - return &maxV -} - -type UsageAverageType int - -const ( - UsageAverageTypeAverage UsageAverageType = iota - UsageAverageTypeMax -) - -func extractUsage(dps []types.Datapoint, avgType UsageAverageType) entity.Usage { - var minV, avgV, maxV *float64 - switch avgType { - case UsageAverageTypeAverage: - minV, avgV, maxV = minOfAverageOfDatapoints(dps), averageOfDatapoints(dps), maxOfAverageOfDatapoints(dps) - case UsageAverageTypeMax: - minV, avgV, maxV = minOfAverageOfDatapoints(dps), maxOfAverageOfDatapoints(dps), maxOfDatapoints(dps) - } - - var lastDP *types.Datapoint - if len(dps) > 0 { - lastDP = &dps[len(dps)-1] - } - - return entity.Usage{ - Avg: avgV, - Min: minV, - Max: maxV, - Last: lastDP, - } -} - -func averageOfGCPDatapoints(datapoints []*gcp.DataPoint) *float64 { - if len(datapoints) == 0 { - return nil - } - - hasNonNil := false - avg := float64(0) - for _, dp := range datapoints { - hasNonNil = true - avg += dp.Value - } - if !hasNonNil { - return nil - } - avg = avg / float64(len(datapoints)) - return &avg -} - -func maxOfGCPDatapoints(datapoints []*gcp.DataPoint) *float64 { - if len(datapoints) == 0 { - return nil - } - - hasNonNil := false - maxOfAvgs := float64(0) - for _, dp := range datapoints { - hasNonNil = true - maxOfAvgs = max(maxOfAvgs, dp.Value) - } - if !hasNonNil { - return nil - } - return &maxOfAvgs -} - -func minOfGCPDatapoints(datapoints []*gcp.DataPoint) *float64 { - if len(datapoints) == 0 { - return nil - } - - hasNonNil := false - minOfAverages := float64(0) - for _, dp := range datapoints { - if !hasNonNil { - minOfAverages = dp.Value - } - hasNonNil = true - minOfAverages = min(minOfAverages, dp.Value) - } - if !hasNonNil { - return nil - } - return &minOfAverages -} - -func extractGCPUsage(ts []*gcp.DataPoint) gcp.Usage { - var minV, avgV, maxV *float64 - var minW, avgW, maxW *wrapperspb.DoubleValue - minV, avgV, maxV = minOfGCPDatapoints(ts), averageOfGCPDatapoints(ts), maxOfGCPDatapoints(ts) - - if minV != nil { - minW = wrapperspb.Double(*minV) - } - if avgV != nil { - avgW = wrapperspb.Double(*avgV) - } - if maxV != nil { - maxW = wrapperspb.Double(*maxV) - } - - return gcp.Usage{ - Avg: avgW, - Min: minW, - Max: maxW, - } -} From 47ce91828173d3ccaf6cc4d66a80c56e6e65297e Mon Sep 17 00:00:00 2001 From: Mohamad Choupan Date: Fri, 15 Nov 2024 23:53:33 +0330 Subject: [PATCH 11/11] faet: remove penny wise database --- assets/postgres-backup/init.sh | 9 --------- go.mod | 2 +- go.sum | 2 -- 3 files changed, 1 insertion(+), 12 deletions(-) diff --git a/assets/postgres-backup/init.sh b/assets/postgres-backup/init.sh index c1ed6a6f5..b541035a7 100755 --- a/assets/postgres-backup/init.sh +++ b/assets/postgres-backup/init.sh @@ -4,8 +4,6 @@ set -e dt=$(date '+%d/%m/%Y %H:%M:%S'); echo "$dt - Running init script the 1st time Primary PostgreSql container is created..."; -pennywiseDatabaseName="pennywise" -pennywiseUserName="pennywise_service" workspaceDatabaseName="workspace" workspaceUserName="workspace_service" @@ -81,13 +79,6 @@ GRANT ALL PRIVILEGES ON DATABASE "$subscriptionDatabaseName" to $subscriptionUse CREATE EXTENSION "uuid-ossp" WITH SCHEMA public; GRANT ALL ON SCHEMA public TO $subscriptionUserName; -CREATE DATABASE $pennywiseDatabaseName; -CREATE USER $pennywiseUserName WITH PASSWORD '$POSTGRES_PENNYWISE_DB_PASSWORD'; -GRANT ALL PRIVILEGES ON DATABASE "$pennywiseDatabaseName" to $pennywiseUserName; - -\c "$pennywiseDatabaseName" -CREATE EXTENSION "uuid-ossp" WITH SCHEMA public; -GRANT ALL ON SCHEMA public TO $pennywiseUserName; CREATE DATABASE $informationDatabaseName; CREATE USER $informationUserName WITH PASSWORD '$POSTGRES_INFORMATION_DB_PASSWORD'; diff --git a/go.mod b/go.mod index b5626f984..39132b455 100644 --- a/go.mod +++ b/go.mod @@ -35,7 +35,7 @@ require ( github.com/haoel/downsampling v0.0.0-20221012062717-1132fe8afe24 github.com/hashicorp/vault/api v1.14.0 github.com/jackc/pgtype v1.14.0 - github.com/kaytu-io/pennywise v1.7.16 + github.com/labstack/echo/v4 v4.12.0 github.com/labstack/gommon v0.4.2 github.com/lib/pq v1.10.9 diff --git a/go.sum b/go.sum index 882a5bc5b..9739c563d 100644 --- a/go.sum +++ b/go.sum @@ -831,8 +831,6 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -github.com/kaytu-io/pennywise v1.7.16 h1:ppZ80MNFuch4GRpEYzYZgxms4QkLCZWJPAnkxa1THgo= -github.com/kaytu-io/pennywise v1.7.16/go.mod h1:Egr3Z/zzT/K93sOG+QVSgrgQbkcnK+odwjnl1OQUjpQ= github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=