From da13967b62c3bdd4ba97266c56860c2332add6f8 Mon Sep 17 00:00:00 2001 From: Varun Dhananjaya Date: Wed, 2 Jun 2021 19:26:20 -0700 Subject: [PATCH 01/14] release v1.1.0 --- CHANGELOG-0.x.md | 28 +++++++++++++++++++ charts/aws-ebs-csi-driver/Chart.yaml | 2 +- charts/aws-ebs-csi-driver/values.yaml | 2 +- .../overlays/stable/kustomization.yaml | 2 +- docs/README.md | 3 ++ 5 files changed, 34 insertions(+), 3 deletions(-) diff --git a/CHANGELOG-0.x.md b/CHANGELOG-0.x.md index efbb686385..8b209f3a20 100644 --- a/CHANGELOG-0.x.md +++ b/CHANGELOG-0.x.md @@ -1,3 +1,31 @@ +# v1.1.0 + +## Notable changes +- Helm chart cleaned up ([#856](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/856), [@krmichel](https://github.com/krmichel)) + +### New features +* Add podAnnotations to snapshotController StatefulSet ([#884](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/884), [@snstanton](https://github.com/snstanton)) +* Support custom pod labels in Helm chart ([#905](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/905), [@eytanhanig](https://github.com/eytanhanig)) + +### Bug fixes +* fix naming mistake in clusterrolebinding, expose env var to controller via downward api ([#874](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/874), [@vdhanan](https://github.com/vdhanan)) +* Fix kustomize RBAC bindings to have namespace kube-system ([#878](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/878), [@wongma7](https://github.com/wongma7)) +* rename node clusterrolebinding to make auto upgrade work ([#894](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/894), [@vdhanan](https://github.com/vdhanan)) +* remove hardcoded namespace for pod disruption budget ([#895](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/895), [@vdhanan](https://github.com/vdhanan)) +* Only initialize the in-cluster kube client when metadata service is actually unavailable ([#897](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/897), [@chrisayoub](https://github.com/chrisayoub)) +* Reduce default log level to 2 ([#903](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/903), [@wongma7](https://github.com/wongma7)) +* Add pod disruption budgets that got missed in a rebase ([#906](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/906), [@krmichel](https://github.com/krmichel)) +* remove WellKnownTopologyKey from PV Topology ([#912](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/912), [@Elbehery](https://github.com/Elbehery)) +* Skip volume expansion if block node ([#916](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/916), [@gnufied](https://github.com/gnufied)) + +### Misc. +* Add eksctl support to e2e scripts ([#852](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/852), [@wongma7](https://github.com/wongma7)) +* release v1.0.0 ([#865](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/865), [@vdhanan](https://github.com/vdhanan)) +* add self as owner ([#866](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/866), [@vdhanan](https://github.com/vdhanan)) +* bump helm chart version ([#881](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/881), [@vdhanan](https://github.com/vdhanan)) +* add custom useragent suffix ([#910](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/910), [@vdhanan](https://github.com/vdhanan)) +* Bump chart-releaser-action to v1.2.1 ([#914](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/914), [@gliptak](https://github.com/gliptak)) + # v1.0.0 ## Notable changes - With this release, the EBS CSI Driver is now Generally Available! diff --git a/charts/aws-ebs-csi-driver/Chart.yaml b/charts/aws-ebs-csi-driver/Chart.yaml index 5897f3ba77..8bebbc9943 100644 --- a/charts/aws-ebs-csi-driver/Chart.yaml +++ b/charts/aws-ebs-csi-driver/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v1 -appVersion: "1.0.0" +appVersion: "1.1.0" name: aws-ebs-csi-driver description: A Helm chart for AWS EBS CSI Driver version: 1.2.0 diff --git a/charts/aws-ebs-csi-driver/values.yaml b/charts/aws-ebs-csi-driver/values.yaml index 6ca9a9cff8..5a9e6e2138 100644 --- a/charts/aws-ebs-csi-driver/values.yaml +++ b/charts/aws-ebs-csi-driver/values.yaml @@ -4,7 +4,7 @@ image: repository: k8s.gcr.io/provider-aws/aws-ebs-csi-driver - tag: "v1.0.0" + tag: "v1.1.0" pullPolicy: IfNotPresent sidecars: diff --git a/deploy/kubernetes/overlays/stable/kustomization.yaml b/deploy/kubernetes/overlays/stable/kustomization.yaml index c42cdc388d..36f0134e75 100644 --- a/deploy/kubernetes/overlays/stable/kustomization.yaml +++ b/deploy/kubernetes/overlays/stable/kustomization.yaml @@ -4,7 +4,7 @@ bases: - ../../base images: - name: k8s.gcr.io/provider-aws/aws-ebs-csi-driver - newTag: v1.0.0 + newTag: v1.1.0 - name: k8s.gcr.io/sig-storage/csi-provisioner newTag: v2.1.1 - name: k8s.gcr.io/sig-storage/csi-attacher diff --git a/docs/README.md b/docs/README.md index 42dbcf4d0c..5ef60d1814 100644 --- a/docs/README.md +++ b/docs/README.md @@ -12,6 +12,7 @@ The [Amazon Elastic Block Store](https://aws.amazon.com/ebs/) Container Storage | AWS EBS CSI Driver \ CSI Version | v0.3.0| v1.0.0 | v1.1.0 | |----------------------------------------|-------|--------|--------| | master branch | no | no | yes | +| v1.1.0 | no | no | yes | | v1.0.0 | no | no | yes | | v0.10.x | no | no | yes | | v0.9.x | no | no | yes | @@ -77,6 +78,7 @@ Following sections are Kubernetes specific. If you are Kubernetes user, use foll | AWS EBS CSI Driver \ Kubernetes Version| v1.12 | v1.13 | v1.14 | v1.15 | v1.16 | v1.17 | v1.18+ | |----------------------------------------|-------|-------|-------|-------|-------|-------|-------| | master branch | no | no+ | no | no | no | yes | yes | +| v1.1.0 | no | no+ | no | no | no | yes | yes | | v1.0.0 | no | no+ | no | no | no | yes | yes | | v0.10.x | no | no+ | no | no | no | yes | yes | | v0.9.x | no | no+ | no | no | no | yes | yes | @@ -95,6 +97,7 @@ Following sections are Kubernetes specific. If you are Kubernetes user, use foll |AWS EBS CSI Driver Version | Image | |---------------------------|--------------------------------------------------| |master branch |amazon/aws-ebs-csi-driver:latest | +|v1.1.0 |k8s.gcr.io/provider-aws/aws-ebs-csi-driver:v1.1.0 | |v1.0.0 |k8s.gcr.io/provider-aws/aws-ebs-csi-driver:v1.0.0 | |v0.10.1 |k8s.gcr.io/provider-aws/aws-ebs-csi-driver:v0.10.1| |v0.10.0 |k8s.gcr.io/provider-aws/aws-ebs-csi-driver:v0.10.0| From 3a082186ac3c5bc6cf3dd8e3d9420de7aa8955b7 Mon Sep 17 00:00:00 2001 From: Matthew Wong Date: Tue, 22 Jun 2021 11:36:06 -0700 Subject: [PATCH 02/14] Revert "Fix kustomize RBAC bindings to have namespace kube-system" This reverts commit 3716e5ef3175bdbb6745018f9646957b5a635eac. --- Makefile | 16 ++++++++-------- .../base/clusterrolebinding-attacher.yaml | 2 +- .../base/clusterrolebinding-csi-node.yaml | 2 +- .../base/clusterrolebinding-provisioner.yaml | 2 +- .../base/clusterrolebinding-resizer.yaml | 2 +- .../clusterrolebinding-snapshot-controller.yaml | 2 +- .../base/clusterrolebinding-snapshotter.yaml | 2 +- deploy/kubernetes/base/controller.yaml | 2 +- deploy/kubernetes/base/node.yaml | 2 +- ...nding-snapshot-controller-leaderelection.yaml | 2 +- 10 files changed, 17 insertions(+), 17 deletions(-) diff --git a/Makefile b/Makefile index 61a014d411..d5e14ee853 100644 --- a/Makefile +++ b/Makefile @@ -146,19 +146,19 @@ generate-kustomize: bin/helm cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/clusterrole-resizer.yaml > ../../deploy/kubernetes/base/clusterrole-resizer.yaml cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/clusterrole-snapshot-controller.yaml > ../../deploy/kubernetes/base/clusterrole-snapshot-controller.yaml cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/clusterrole-snapshotter.yaml > ../../deploy/kubernetes/base/clusterrole-snapshotter.yaml - cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/clusterrolebinding-attacher.yaml -n kube-system > ../../deploy/kubernetes/base/clusterrolebinding-attacher.yaml - cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/clusterrolebinding-csi-node.yaml -n kube-system > ../../deploy/kubernetes/base/clusterrolebinding-csi-node.yaml - cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/clusterrolebinding-provisioner.yaml -n kube-system > ../../deploy/kubernetes/base/clusterrolebinding-provisioner.yaml - cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/clusterrolebinding-resizer.yaml -n kube-system > ../../deploy/kubernetes/base/clusterrolebinding-resizer.yaml - cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/clusterrolebinding-snapshot-controller.yaml -n kube-system > ../../deploy/kubernetes/base/clusterrolebinding-snapshot-controller.yaml - cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/clusterrolebinding-snapshotter.yaml -n kube-system > ../../deploy/kubernetes/base/clusterrolebinding-snapshotter.yaml + cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/clusterrolebinding-attacher.yaml > ../../deploy/kubernetes/base/clusterrolebinding-attacher.yaml + cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/clusterrolebinding-csi-node.yaml > ../../deploy/kubernetes/base/clusterrolebinding-csi-node.yaml + cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/clusterrolebinding-provisioner.yaml > ../../deploy/kubernetes/base/clusterrolebinding-provisioner.yaml + cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/clusterrolebinding-resizer.yaml > ../../deploy/kubernetes/base/clusterrolebinding-resizer.yaml + cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/clusterrolebinding-snapshot-controller.yaml > ../../deploy/kubernetes/base/clusterrolebinding-snapshot-controller.yaml + cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/clusterrolebinding-snapshotter.yaml > ../../deploy/kubernetes/base/clusterrolebinding-snapshotter.yaml cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/controller.yaml > ../../deploy/kubernetes/base/controller.yaml cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/csidriver.yaml > ../../deploy/kubernetes/base/csidriver.yaml cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/node.yaml > ../../deploy/kubernetes/base/node.yaml cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/poddisruptionbudget-controller.yaml > ../../deploy/kubernetes/base/poddisruptionbudget-controller.yaml cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/poddisruptionbudget-snapshot-controller.yaml -f ../../deploy/kubernetes/values/snapshotter.yaml > ../../deploy/kubernetes/base/poddisruptionbudget-snapshot-controller.yaml - cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/role-snapshot-controller-leaderelection.yaml -n kube-system > ../../deploy/kubernetes/base/role-snapshot-controller-leaderelection.yaml - cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/rolebinding-snapshot-controller-leaderelection.yaml -n kube-system > ../../deploy/kubernetes/base/rolebinding-snapshot-controller-leaderelection.yaml + cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/role-snapshot-controller-leaderelection.yaml > ../../deploy/kubernetes/base/role-snapshot-controller-leaderelection.yaml + cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/rolebinding-snapshot-controller-leaderelection.yaml > ../../deploy/kubernetes/base/rolebinding-snapshot-controller-leaderelection.yaml cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/serviceaccount-csi-controller.yaml > ../../deploy/kubernetes/base/serviceaccount-csi-controller.yaml cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/serviceaccount-csi-node.yaml > ../../deploy/kubernetes/base/serviceaccount-csi-node.yaml cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/serviceaccount-snapshot-controller.yaml > ../../deploy/kubernetes/base/serviceaccount-snapshot-controller.yaml diff --git a/deploy/kubernetes/base/clusterrolebinding-attacher.yaml b/deploy/kubernetes/base/clusterrolebinding-attacher.yaml index 9a97b8efcb..5715d2651b 100644 --- a/deploy/kubernetes/base/clusterrolebinding-attacher.yaml +++ b/deploy/kubernetes/base/clusterrolebinding-attacher.yaml @@ -9,7 +9,7 @@ metadata: subjects: - kind: ServiceAccount name: ebs-csi-controller-sa - namespace: kube-system + namespace: default roleRef: kind: ClusterRole name: ebs-external-attacher-role diff --git a/deploy/kubernetes/base/clusterrolebinding-csi-node.yaml b/deploy/kubernetes/base/clusterrolebinding-csi-node.yaml index d1292b132d..095db52510 100644 --- a/deploy/kubernetes/base/clusterrolebinding-csi-node.yaml +++ b/deploy/kubernetes/base/clusterrolebinding-csi-node.yaml @@ -9,7 +9,7 @@ metadata: subjects: - kind: ServiceAccount name: ebs-csi-node-sa - namespace: kube-system + namespace: default roleRef: kind: ClusterRole name: ebs-csi-node-role diff --git a/deploy/kubernetes/base/clusterrolebinding-provisioner.yaml b/deploy/kubernetes/base/clusterrolebinding-provisioner.yaml index 084bed9df9..3544bc61e2 100644 --- a/deploy/kubernetes/base/clusterrolebinding-provisioner.yaml +++ b/deploy/kubernetes/base/clusterrolebinding-provisioner.yaml @@ -9,7 +9,7 @@ metadata: subjects: - kind: ServiceAccount name: ebs-csi-controller-sa - namespace: kube-system + namespace: default roleRef: kind: ClusterRole name: ebs-external-provisioner-role diff --git a/deploy/kubernetes/base/clusterrolebinding-resizer.yaml b/deploy/kubernetes/base/clusterrolebinding-resizer.yaml index a840f51b83..c80a9a26bf 100644 --- a/deploy/kubernetes/base/clusterrolebinding-resizer.yaml +++ b/deploy/kubernetes/base/clusterrolebinding-resizer.yaml @@ -9,7 +9,7 @@ metadata: subjects: - kind: ServiceAccount name: ebs-csi-controller-sa - namespace: kube-system + namespace: default roleRef: kind: ClusterRole name: ebs-external-resizer-role diff --git a/deploy/kubernetes/base/clusterrolebinding-snapshot-controller.yaml b/deploy/kubernetes/base/clusterrolebinding-snapshot-controller.yaml index 10c021c889..6d7cbec1b3 100644 --- a/deploy/kubernetes/base/clusterrolebinding-snapshot-controller.yaml +++ b/deploy/kubernetes/base/clusterrolebinding-snapshot-controller.yaml @@ -9,7 +9,7 @@ metadata: subjects: - kind: ServiceAccount name: ebs-snapshot-controller - namespace: kube-system + namespace: default roleRef: kind: ClusterRole name: ebs-snapshot-controller-role diff --git a/deploy/kubernetes/base/clusterrolebinding-snapshotter.yaml b/deploy/kubernetes/base/clusterrolebinding-snapshotter.yaml index 7720ca5d23..7946414d59 100644 --- a/deploy/kubernetes/base/clusterrolebinding-snapshotter.yaml +++ b/deploy/kubernetes/base/clusterrolebinding-snapshotter.yaml @@ -9,7 +9,7 @@ metadata: subjects: - kind: ServiceAccount name: ebs-csi-controller-sa - namespace: kube-system + namespace: default roleRef: kind: ClusterRole name: ebs-external-snapshotter-role diff --git a/deploy/kubernetes/base/controller.yaml b/deploy/kubernetes/base/controller.yaml index d38e1fdc90..9b8327223e 100644 --- a/deploy/kubernetes/base/controller.yaml +++ b/deploy/kubernetes/base/controller.yaml @@ -31,7 +31,7 @@ spec: tolerationSeconds: 300 containers: - name: ebs-plugin - image: k8s.gcr.io/provider-aws/aws-ebs-csi-driver:v1.0.0 + image: k8s.gcr.io/provider-aws/aws-ebs-csi-driver:v1.1.0 imagePullPolicy: IfNotPresent args: # - {all,controller,node} # specify the driver mode diff --git a/deploy/kubernetes/base/node.yaml b/deploy/kubernetes/base/node.yaml index e59bd43f38..038fc488e9 100644 --- a/deploy/kubernetes/base/node.yaml +++ b/deploy/kubernetes/base/node.yaml @@ -42,7 +42,7 @@ spec: - name: ebs-plugin securityContext: privileged: true - image: k8s.gcr.io/provider-aws/aws-ebs-csi-driver:v1.0.0 + image: k8s.gcr.io/provider-aws/aws-ebs-csi-driver:v1.1.0 args: - node - --endpoint=$(CSI_ENDPOINT) diff --git a/deploy/kubernetes/base/rolebinding-snapshot-controller-leaderelection.yaml b/deploy/kubernetes/base/rolebinding-snapshot-controller-leaderelection.yaml index 9d66443b9a..fd9ab78b4b 100644 --- a/deploy/kubernetes/base/rolebinding-snapshot-controller-leaderelection.yaml +++ b/deploy/kubernetes/base/rolebinding-snapshot-controller-leaderelection.yaml @@ -9,7 +9,7 @@ metadata: subjects: - kind: ServiceAccount name: ebs-snapshot-controller - namespace: kube-system + namespace: default roleRef: kind: Role name: ebs-snapshot-controller-leaderelection From 737b0678795d992ac4b86c0b2816e98bfe7bf8b6 Mon Sep 17 00:00:00 2001 From: Matthew Wong Date: Tue, 22 Jun 2021 11:40:00 -0700 Subject: [PATCH 03/14] Add fake test-e2e-external-eks make rule --- Makefile | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/Makefile b/Makefile index d5e14ee853..989c45dd6e 100644 --- a/Makefile +++ b/Makefile @@ -108,6 +108,10 @@ test-e2e-external: GINKGO_SKIP="\[Disruptive\]|\[Serial\]" \ ./hack/e2e/run.sh +.PHONY: test-e2e-external-eks +test-e2e-external-eks: + echo TODO + .PHONY: image-release image-release: docker build -t $(IMAGE):$(VERSION) . --target debian-base From 8e3b578fae823df7959c2b9bc01dd55d235ff905 Mon Sep 17 00:00:00 2001 From: Xiang Li Date: Tue, 8 Jun 2021 17:43:29 -0700 Subject: [PATCH 04/14] update inFlight cache to avoid race condition on volume operation --- pkg/driver/controller.go | 180 ++++++++++++++++++++++---------- pkg/driver/controller_test.go | 111 +++++++++++++++++--- pkg/driver/internal/inflight.go | 4 + 3 files changed, 227 insertions(+), 68 deletions(-) diff --git a/pkg/driver/controller.go b/pkg/driver/controller.go index 03a550deff..21f37eb97f 100644 --- a/pkg/driver/controller.go +++ b/pkg/driver/controller.go @@ -99,27 +99,21 @@ func newControllerService(driverOptions *DriverOptions) controllerService { func (d *controllerService) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) { klog.V(4).Infof("CreateVolume: called with args %+v", *req) - volName := req.GetName() - if len(volName) == 0 { - return nil, status.Error(codes.InvalidArgument, "Volume name not provided") + if err := validateCreateVolumeRequest(req); err != nil { + return nil, err } - volSizeBytes, err := getVolSizeBytes(req) if err != nil { return nil, err } + volName := req.GetName() - volCaps := req.GetVolumeCapabilities() - if len(volCaps) == 0 { - return nil, status.Error(codes.InvalidArgument, "Volume capabilities not provided") - } - - if !isValidVolumeCapabilities(volCaps) { - modes := util.GetAccessModes(volCaps) - stringModes := strings.Join(*modes, ", ") - errString := "Volume capabilities " + stringModes + " not supported. Only AccessModes[ReadWriteOnce] supported." - return nil, status.Error(codes.InvalidArgument, errString) + // check if a request is already in-flight + if ok := d.inFlight.Insert(volName); !ok { + msg := fmt.Sprintf("Create volume request for %s is already in progress", volName) + return nil, status.Error(codes.Aborted, msg) } + defer d.inFlight.Delete(volName) disk, err := d.cloud.GetDiskByName(ctx, volName, volSizeBytes) if err != nil { @@ -217,13 +211,6 @@ func (d *controllerService) CreateVolume(ctx context.Context, req *csi.CreateVol return newCreateVolumeResponse(disk), nil } - // check if a request is already in-flight because the CreateVolume API is not idempotent - if ok := d.inFlight.Insert(req.String()); !ok { - msg := fmt.Sprintf("Create volume request for %s is already in progress", volName) - return nil, status.Error(codes.Aborted, msg) - } - defer d.inFlight.Delete(req.String()) - // create a new volume zone := pickAvailabilityZone(req.GetAccessibilityRequirements()) outpostArn := getOutpostArn(req.GetAccessibilityRequirements()) @@ -264,12 +251,40 @@ func (d *controllerService) CreateVolume(ctx context.Context, req *csi.CreateVol return newCreateVolumeResponse(disk), nil } +func validateCreateVolumeRequest(req *csi.CreateVolumeRequest) error { + volName := req.GetName() + if len(volName) == 0 { + return status.Error(codes.InvalidArgument, "Volume name not provided") + } + + volCaps := req.GetVolumeCapabilities() + if len(volCaps) == 0 { + return status.Error(codes.InvalidArgument, "Volume capabilities not provided") + } + + if !isValidVolumeCapabilities(volCaps) { + modes := util.GetAccessModes(volCaps) + stringModes := strings.Join(*modes, ", ") + errString := "Volume capabilities " + stringModes + " not supported. Only AccessModes[ReadWriteOnce] supported." + return status.Error(codes.InvalidArgument, errString) + } + return nil +} + func (d *controllerService) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) { klog.V(4).Infof("DeleteVolume: called with args: %+v", *req) + if err := validateDeleteVolumeRequest(req); err != nil { + return nil, err + } + volumeID := req.GetVolumeId() - if len(volumeID) == 0 { - return nil, status.Error(codes.InvalidArgument, "Volume ID not provided") + + // check if a request is already in-flight + if ok := d.inFlight.Insert(volumeID); !ok { + msg := fmt.Sprintf(internal.VolumeOperationAlreadyExistsErrorMsg, volumeID) + return nil, status.Error(codes.Aborted, msg) } + defer d.inFlight.Delete(volumeID) if _, err := d.cloud.DeleteDisk(ctx, volumeID); err != nil { if err == cloud.ErrNotFound { @@ -282,30 +297,21 @@ func (d *controllerService) DeleteVolume(ctx context.Context, req *csi.DeleteVol return &csi.DeleteVolumeResponse{}, nil } +func validateDeleteVolumeRequest(req *csi.DeleteVolumeRequest) error { + if len(req.GetVolumeId()) == 0 { + return status.Error(codes.InvalidArgument, "Volume ID not provided") + } + return nil +} + func (d *controllerService) ControllerPublishVolume(ctx context.Context, req *csi.ControllerPublishVolumeRequest) (*csi.ControllerPublishVolumeResponse, error) { klog.V(4).Infof("ControllerPublishVolume: called with args %+v", *req) - volumeID := req.GetVolumeId() - if len(volumeID) == 0 { - return nil, status.Error(codes.InvalidArgument, "Volume ID not provided") + if err := validateControllerPublishVolumeRequest(req); err != nil { + return nil, err } + volumeID := req.GetVolumeId() nodeID := req.GetNodeId() - if len(nodeID) == 0 { - return nil, status.Error(codes.InvalidArgument, "Node ID not provided") - } - - volCap := req.GetVolumeCapability() - if volCap == nil { - return nil, status.Error(codes.InvalidArgument, "Volume capability not provided") - } - - caps := []*csi.VolumeCapability{volCap} - if !isValidVolumeCapabilities(caps) { - modes := util.GetAccessModes(caps) - stringModes := strings.Join(*modes, ", ") - errString := "Volume capabilities " + stringModes + " not supported. Only AccessModes[ReadWriteOnce] supported." - return nil, status.Error(codes.InvalidArgument, errString) - } if !d.cloud.IsExistInstance(ctx, nodeID) { return nil, status.Errorf(codes.NotFound, "Instance %q not found", nodeID) @@ -333,17 +339,38 @@ func (d *controllerService) ControllerPublishVolume(ctx context.Context, req *cs return &csi.ControllerPublishVolumeResponse{PublishContext: pvInfo}, nil } +func validateControllerPublishVolumeRequest(req *csi.ControllerPublishVolumeRequest) error { + if len(req.GetVolumeId()) == 0 { + return status.Error(codes.InvalidArgument, "Volume ID not provided") + } + + if len(req.GetNodeId()) == 0 { + return status.Error(codes.InvalidArgument, "Node ID not provided") + } + + volCap := req.GetVolumeCapability() + if volCap == nil { + return status.Error(codes.InvalidArgument, "Volume capability not provided") + } + + caps := []*csi.VolumeCapability{volCap} + if !isValidVolumeCapabilities(caps) { + modes := util.GetAccessModes(caps) + stringModes := strings.Join(*modes, ", ") + errString := "Volume capabilities " + stringModes + " not supported. Only AccessModes[ReadWriteOnce] supported." + return status.Error(codes.InvalidArgument, errString) + } + return nil +} + func (d *controllerService) ControllerUnpublishVolume(ctx context.Context, req *csi.ControllerUnpublishVolumeRequest) (*csi.ControllerUnpublishVolumeResponse, error) { klog.V(4).Infof("ControllerUnpublishVolume: called with args %+v", *req) - volumeID := req.GetVolumeId() - if len(volumeID) == 0 { - return nil, status.Error(codes.InvalidArgument, "Volume ID not provided") + if err := validateControllerUnpublishVolumeRequest(req); err != nil { + return nil, err } + volumeID := req.GetVolumeId() nodeID := req.GetNodeId() - if len(nodeID) == 0 { - return nil, status.Error(codes.InvalidArgument, "Node ID not provided") - } if err := d.cloud.DetachDisk(ctx, volumeID, nodeID); err != nil { if err == cloud.ErrNotFound { @@ -356,6 +383,18 @@ func (d *controllerService) ControllerUnpublishVolume(ctx context.Context, req * return &csi.ControllerUnpublishVolumeResponse{}, nil } +func validateControllerUnpublishVolumeRequest(req *csi.ControllerUnpublishVolumeRequest) error { + if len(req.GetVolumeId()) == 0 { + return status.Error(codes.InvalidArgument, "Volume ID not provided") + } + + if len(req.GetNodeId()) == 0 { + return status.Error(codes.InvalidArgument, "Node ID not provided") + } + + return nil +} + func (d *controllerService) ControllerGetCapabilities(ctx context.Context, req *csi.ControllerGetCapabilitiesRequest) (*csi.ControllerGetCapabilitiesResponse, error) { klog.V(4).Infof("ControllerGetCapabilities: called with args %+v", *req) var caps []*csi.ControllerServiceCapability @@ -489,15 +528,20 @@ func isValidVolumeContext(volContext map[string]string) bool { func (d *controllerService) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) { klog.V(4).Infof("CreateSnapshot: called with args %+v", req) - snapshotName := req.GetName() - if len(snapshotName) == 0 { - return nil, status.Error(codes.InvalidArgument, "Snapshot name not provided") + if err := validateCreateSnapshotRequest(req); err != nil { + return nil, err } + snapshotName := req.GetName() volumeID := req.GetSourceVolumeId() - if len(volumeID) == 0 { - return nil, status.Error(codes.InvalidArgument, "Snapshot volume source ID not provided") + + // check if a request is already in-flight + if ok := d.inFlight.Insert(snapshotName); !ok { + msg := fmt.Sprintf(internal.VolumeOperationAlreadyExistsErrorMsg, snapshotName) + return nil, status.Error(codes.Aborted, msg) } + defer d.inFlight.Delete(snapshotName) + snapshot, err := d.cloud.GetSnapshotByName(ctx, snapshotName) if err != nil && err != cloud.ErrNotFound { klog.Errorf("Error looking for the snapshot %s: %v", snapshotName, err) @@ -535,12 +579,31 @@ func (d *controllerService) CreateSnapshot(ctx context.Context, req *csi.CreateS return newCreateSnapshotResponse(snapshot) } +func validateCreateSnapshotRequest(req *csi.CreateSnapshotRequest) error { + if len(req.GetName()) == 0 { + return status.Error(codes.InvalidArgument, "Snapshot name not provided") + } + + if len(req.GetSourceVolumeId()) == 0 { + return status.Error(codes.InvalidArgument, "Snapshot volume source ID not provided") + } + return nil +} + func (d *controllerService) DeleteSnapshot(ctx context.Context, req *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) { klog.V(4).Infof("DeleteSnapshot: called with args %+v", req) + if err := validateDeleteSnapshotRequest(req); err != nil { + return nil, err + } + snapshotID := req.GetSnapshotId() - if len(snapshotID) == 0 { - return nil, status.Error(codes.InvalidArgument, "Snapshot ID not provided") + + // check if a request is already in-flight + if ok := d.inFlight.Insert(snapshotID); !ok { + msg := fmt.Sprintf("DeleteSnapshot for Snapshot %s is already in progress", snapshotID) + return nil, status.Error(codes.Aborted, msg) } + defer d.inFlight.Delete(snapshotID) if _, err := d.cloud.DeleteSnapshot(ctx, snapshotID); err != nil { if err == cloud.ErrNotFound { @@ -553,6 +616,13 @@ func (d *controllerService) DeleteSnapshot(ctx context.Context, req *csi.DeleteS return &csi.DeleteSnapshotResponse{}, nil } +func validateDeleteSnapshotRequest(req *csi.DeleteSnapshotRequest) error { + if len(req.GetSnapshotId()) == 0 { + return status.Error(codes.InvalidArgument, "Snapshot ID not provided") + } + return nil +} + func (d *controllerService) ListSnapshots(ctx context.Context, req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) { klog.V(4).Infof("ListSnapshots: called with args %+v", req) var snapshots []*cloud.Snapshot diff --git a/pkg/driver/controller_test.go b/pkg/driver/controller_test.go index 84d78db1b0..ca2f96115a 100644 --- a/pkg/driver/controller_test.go +++ b/pkg/driver/controller_test.go @@ -1536,11 +1536,10 @@ func TestCreateVolume(t *testing.T) { defer mockCtl.Finish() mockCloud := mocks.NewMockCloud(mockCtl) - mockCloud.EXPECT().GetDiskByName(gomock.Eq(ctx), gomock.Eq(req.Name), gomock.Eq(stdVolSize)).Return(nil, cloud.ErrNotFound) inFlight := internal.NewInFlight() - inFlight.Insert(req.String()) - defer inFlight.Delete(req.String()) + inFlight.Insert(req.GetName()) + defer inFlight.Delete(req.GetName()) awsDriver := controllerService{ cloud: mockCloud, @@ -1549,17 +1548,8 @@ func TestCreateVolume(t *testing.T) { } _, err := awsDriver.CreateVolume(ctx, req) - if err == nil { - t.Fatalf("Expected CreateVolume to fail but got no error") - } - srvErr, ok := status.FromError(err) - if !ok { - t.Fatalf("Could not get error status code from error: %v", srvErr) - } - if srvErr.Code() != codes.Aborted { - t.Fatalf("Expected Aborted but got: %s", srvErr.Code()) - } + checkAbortedErrorCode(t, err) }, }, { @@ -1714,6 +1704,31 @@ func TestDeleteVolume(t *testing.T) { } }, }, + { + name: "fail another request already in-flight", + testFunc: func(t *testing.T) { + req := &csi.DeleteVolumeRequest{ + VolumeId: "vol-test", + } + + ctx := context.Background() + mockCtl := gomock.NewController(t) + defer mockCtl.Finish() + + mockCloud := mocks.NewMockCloud(mockCtl) + inFlight := internal.NewInFlight() + inFlight.Insert(req.GetVolumeId()) + defer inFlight.Delete(req.GetVolumeId()) + awsDriver := controllerService{ + cloud: mockCloud, + inFlight: inFlight, + driverOptions: &DriverOptions{}, + } + _, err := awsDriver.DeleteVolume(ctx, req) + + checkAbortedErrorCode(t, err) + }, + }, } for _, tc := range testCases { @@ -2259,6 +2274,34 @@ func TestCreateSnapshot(t *testing.T) { } }, }, + { + name: "fail with another request in-flight", + testFunc: func(t *testing.T) { + req := &csi.CreateSnapshotRequest{ + Name: "test-snapshot", + Parameters: nil, + SourceVolumeId: "vol-test", + } + + mockCtl := gomock.NewController(t) + defer mockCtl.Finish() + + mockCloud := mocks.NewMockCloud(mockCtl) + + inFlight := internal.NewInFlight() + inFlight.Insert(req.GetName()) + defer inFlight.Delete(req.GetName()) + + awsDriver := controllerService{ + cloud: mockCloud, + inFlight: inFlight, + driverOptions: &DriverOptions{}, + } + _, err := awsDriver.CreateSnapshot(context.Background(), req) + + checkAbortedErrorCode(t, err) + }, + }, } for _, tc := range testCases { @@ -2321,6 +2364,34 @@ func TestDeleteSnapshot(t *testing.T) { } }, }, + { + name: "fail with another request in-flight", + testFunc: func(t *testing.T) { + ctx := context.Background() + + mockCtl := gomock.NewController(t) + defer mockCtl.Finish() + + mockCloud := mocks.NewMockCloud(mockCtl) + + req := &csi.DeleteSnapshotRequest{ + SnapshotId: "test-snapshotID", + } + inFlight := internal.NewInFlight() + inFlight.Insert(req.GetSnapshotId()) + defer inFlight.Delete(req.GetSnapshotId()) + + awsDriver := controllerService{ + cloud: mockCloud, + inFlight: inFlight, + driverOptions: &DriverOptions{}, + } + + _, err := awsDriver.DeleteSnapshot(ctx, req) + + checkAbortedErrorCode(t, err) + }, + }, } for _, tc := range testCases { @@ -3082,3 +3153,17 @@ func TestControllerExpandVolume(t *testing.T) { }) } } + +func checkAbortedErrorCode(t *testing.T, err error) { + if err == nil { + t.Fatalf("Expected operation to fail but got no error") + } + + srvErr, ok := status.FromError(err) + if !ok { + t.Fatalf("Could not get error status code from error: %v", srvErr) + } + if srvErr.Code() != codes.Aborted { + t.Fatalf("Expected Aborted but got: %s", srvErr.Code()) + } +} diff --git a/pkg/driver/internal/inflight.go b/pkg/driver/internal/inflight.go index 5f0d2a9ad7..9b45680fbc 100644 --- a/pkg/driver/internal/inflight.go +++ b/pkg/driver/internal/inflight.go @@ -30,6 +30,10 @@ type Idempotent interface { String() string } +const ( + VolumeOperationAlreadyExistsErrorMsg = "An operation with the given Volume %s already exists" +) + // InFlight is a struct used to manage in flight requests per volumeId. type InFlight struct { mux *sync.Mutex From 2c6373b26883ae423a18cdaa24968749428bf94b Mon Sep 17 00:00:00 2001 From: Matthew Wong Date: Thu, 1 Jul 2021 12:34:55 -0700 Subject: [PATCH 05/14] Release v1.1.1 and chart v1.2.4 --- CHANGELOG-0.x.md | 5 +++++ Makefile | 2 +- charts/aws-ebs-csi-driver/CHANGELOG.md | 6 ++++++ charts/aws-ebs-csi-driver/Chart.yaml | 4 ++-- charts/aws-ebs-csi-driver/values.yaml | 3 +-- deploy/kubernetes/base/controller.yaml | 2 +- deploy/kubernetes/base/node.yaml | 2 +- deploy/kubernetes/overlays/stable/ecr/kustomization.yaml | 2 +- deploy/kubernetes/overlays/stable/kustomization.yaml | 2 +- docs/README.md | 4 ++-- 10 files changed, 21 insertions(+), 11 deletions(-) create mode 100644 charts/aws-ebs-csi-driver/CHANGELOG.md diff --git a/CHANGELOG-0.x.md b/CHANGELOG-0.x.md index 8b209f3a20..a5fd0400ac 100644 --- a/CHANGELOG-0.x.md +++ b/CHANGELOG-0.x.md @@ -1,3 +1,8 @@ +# v1.1.1 + +### Bug fixes +- update inFlight cache to avoid race condition on volume operation ([#924])(https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/924), [@AndyXiangLi](https://github.com/AndyXiangLi)) + # v1.1.0 ## Notable changes diff --git a/Makefile b/Makefile index 989c45dd6e..dfd3d7bcfa 100644 --- a/Makefile +++ b/Makefile @@ -14,7 +14,7 @@ PKG=github.com/kubernetes-sigs/aws-ebs-csi-driver IMAGE?=amazon/aws-ebs-csi-driver -VERSION=v1.0.0 +VERSION=v1.1.1 VERSION_AMAZONLINUX=$(VERSION)-amazonlinux GIT_COMMIT?=$(shell git rev-parse HEAD) BUILD_DATE?=$(shell date -u +"%Y-%m-%dT%H:%M:%SZ") diff --git a/charts/aws-ebs-csi-driver/CHANGELOG.md b/charts/aws-ebs-csi-driver/CHANGELOG.md new file mode 100644 index 0000000000..b82cf8631f --- /dev/null +++ b/charts/aws-ebs-csi-driver/CHANGELOG.md @@ -0,0 +1,6 @@ +# Helm chart + +# v1.2.4 +* Bump app/driver version to `v1.1.1` +* Install VolumeSnapshotClass, VolumeSnapshotContent, VolumeSnapshot CRDs if enableVolumeSnapshot is true +* Only run csi-snapshotter sidecar if enableVolumeSnapshot is true or if CRDs are already installed diff --git a/charts/aws-ebs-csi-driver/Chart.yaml b/charts/aws-ebs-csi-driver/Chart.yaml index 8bebbc9943..fdba841218 100644 --- a/charts/aws-ebs-csi-driver/Chart.yaml +++ b/charts/aws-ebs-csi-driver/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v1 -appVersion: "1.1.0" +appVersion: "1.1.1" name: aws-ebs-csi-driver description: A Helm chart for AWS EBS CSI Driver -version: 1.2.0 +version: 1.2.4 kubeVersion: ">=1.17.0-0" home: https://github.com/kubernetes-sigs/aws-ebs-csi-driver sources: diff --git a/charts/aws-ebs-csi-driver/values.yaml b/charts/aws-ebs-csi-driver/values.yaml index 5a9e6e2138..96ca4006cc 100644 --- a/charts/aws-ebs-csi-driver/values.yaml +++ b/charts/aws-ebs-csi-driver/values.yaml @@ -4,7 +4,7 @@ image: repository: k8s.gcr.io/provider-aws/aws-ebs-csi-driver - tag: "v1.1.0" + tag: "v1.1.1" pullPolicy: IfNotPresent sidecars: @@ -124,7 +124,6 @@ controller: # whenUnsatisfiable: ScheduleAnyway topologySpreadConstraints: [] - # Moving to values under node # The "maximum number of attachable volumes" per node volumeAttachLimit: diff --git a/deploy/kubernetes/base/controller.yaml b/deploy/kubernetes/base/controller.yaml index 9b8327223e..bb693effcd 100644 --- a/deploy/kubernetes/base/controller.yaml +++ b/deploy/kubernetes/base/controller.yaml @@ -31,7 +31,7 @@ spec: tolerationSeconds: 300 containers: - name: ebs-plugin - image: k8s.gcr.io/provider-aws/aws-ebs-csi-driver:v1.1.0 + image: k8s.gcr.io/provider-aws/aws-ebs-csi-driver:v1.1.1 imagePullPolicy: IfNotPresent args: # - {all,controller,node} # specify the driver mode diff --git a/deploy/kubernetes/base/node.yaml b/deploy/kubernetes/base/node.yaml index 038fc488e9..907f7e3d97 100644 --- a/deploy/kubernetes/base/node.yaml +++ b/deploy/kubernetes/base/node.yaml @@ -42,7 +42,7 @@ spec: - name: ebs-plugin securityContext: privileged: true - image: k8s.gcr.io/provider-aws/aws-ebs-csi-driver:v1.1.0 + image: k8s.gcr.io/provider-aws/aws-ebs-csi-driver:v1.1.1 args: - node - --endpoint=$(CSI_ENDPOINT) diff --git a/deploy/kubernetes/overlays/stable/ecr/kustomization.yaml b/deploy/kubernetes/overlays/stable/ecr/kustomization.yaml index d36d7ca71e..e5f2a1ddf4 100644 --- a/deploy/kubernetes/overlays/stable/ecr/kustomization.yaml +++ b/deploy/kubernetes/overlays/stable/ecr/kustomization.yaml @@ -5,7 +5,7 @@ bases: images: - name: k8s.gcr.io/provider-aws/aws-ebs-csi-driver newName: 602401143452.dkr.ecr.us-west-2.amazonaws.com/eks/aws-ebs-csi-driver - newTag: v1.0.0 + newTag: v1.1.1 - name: k8s.gcr.io/sig-storage/csi-provisioner newName: public.ecr.aws/eks-distro/kubernetes-csi/external-provisioner newTag: v2.1.1-eks-1-18-3 diff --git a/deploy/kubernetes/overlays/stable/kustomization.yaml b/deploy/kubernetes/overlays/stable/kustomization.yaml index 36f0134e75..50ced3ef5d 100644 --- a/deploy/kubernetes/overlays/stable/kustomization.yaml +++ b/deploy/kubernetes/overlays/stable/kustomization.yaml @@ -4,7 +4,7 @@ bases: - ../../base images: - name: k8s.gcr.io/provider-aws/aws-ebs-csi-driver - newTag: v1.1.0 + newTag: v1.1.1 - name: k8s.gcr.io/sig-storage/csi-provisioner newTag: v2.1.1 - name: k8s.gcr.io/sig-storage/csi-attacher diff --git a/docs/README.md b/docs/README.md index 5ef60d1814..7b1638b992 100644 --- a/docs/README.md +++ b/docs/README.md @@ -12,7 +12,7 @@ The [Amazon Elastic Block Store](https://aws.amazon.com/ebs/) Container Storage | AWS EBS CSI Driver \ CSI Version | v0.3.0| v1.0.0 | v1.1.0 | |----------------------------------------|-------|--------|--------| | master branch | no | no | yes | -| v1.1.0 | no | no | yes | +| v1.1.x | no | no | yes | | v1.0.0 | no | no | yes | | v0.10.x | no | no | yes | | v0.9.x | no | no | yes | @@ -96,7 +96,7 @@ Following sections are Kubernetes specific. If you are Kubernetes user, use foll ## Container Images: |AWS EBS CSI Driver Version | Image | |---------------------------|--------------------------------------------------| -|master branch |amazon/aws-ebs-csi-driver:latest | +|v1.1.1 |k8s.gcr.io/provider-aws/aws-ebs-csi-driver:v1.1.1 | |v1.1.0 |k8s.gcr.io/provider-aws/aws-ebs-csi-driver:v1.1.0 | |v1.0.0 |k8s.gcr.io/provider-aws/aws-ebs-csi-driver:v1.0.0 | |v0.10.1 |k8s.gcr.io/provider-aws/aws-ebs-csi-driver:v0.10.1| From 75113249527aed10de64114620f9f856edb84ff3 Mon Sep 17 00:00:00 2001 From: Matthew Wong Date: Thu, 22 Jul 2021 10:50:30 -0700 Subject: [PATCH 06/14] Update base images: yum update al2, bump debian tag --- Dockerfile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index aa6464e644..d974968d1d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -18,12 +18,13 @@ COPY . . RUN make FROM amazonlinux:2 AS amazonlinux +RUN yum update -y RUN yum install ca-certificates e2fsprogs xfsprogs util-linux -y COPY --from=builder /go/src/github.com/kubernetes-sigs/aws-ebs-csi-driver/bin/aws-ebs-csi-driver /bin/aws-ebs-csi-driver ENTRYPOINT ["/bin/aws-ebs-csi-driver"] -FROM k8s.gcr.io/build-image/debian-base:v2.1.3 AS debian-base +FROM k8s.gcr.io/build-image/debian-base:buster-v1.8.0 AS debian-base RUN clean-install ca-certificates e2fsprogs mount udev util-linux xfsprogs COPY --from=builder /go/src/github.com/kubernetes-sigs/aws-ebs-csi-driver/bin/aws-ebs-csi-driver /bin/aws-ebs-csi-driver From 061fcd8d9ba1dc75c2cb8c0fc3b72794cb3972c5 Mon Sep 17 00:00:00 2001 From: Varun Dhananjaya Date: Fri, 23 Jul 2021 13:40:45 -0700 Subject: [PATCH 07/14] release 1.1.2 --- CHANGELOG-0.x.md | 5 +++++ Makefile | 2 +- charts/aws-ebs-csi-driver/Chart.yaml | 2 +- charts/aws-ebs-csi-driver/values.yaml | 2 +- deploy/kubernetes/base/controller.yaml | 2 +- deploy/kubernetes/base/node.yaml | 2 +- deploy/kubernetes/overlays/stable/ecr/kustomization.yaml | 2 +- deploy/kubernetes/overlays/stable/kustomization.yaml | 2 +- docs/README.md | 1 + 9 files changed, 13 insertions(+), 7 deletions(-) diff --git a/CHANGELOG-0.x.md b/CHANGELOG-0.x.md index a5fd0400ac..3ff3202a1e 100644 --- a/CHANGELOG-0.x.md +++ b/CHANGELOG-0.x.md @@ -1,3 +1,8 @@ +# v1.1.2 + +## Notable changes +- Update base images: yum update al2, bump debian tag ([#986](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/986), [@wongma7](https://github.com/wongma7)) + # v1.1.1 ### Bug fixes diff --git a/Makefile b/Makefile index dfd3d7bcfa..f2316bfac2 100644 --- a/Makefile +++ b/Makefile @@ -14,7 +14,7 @@ PKG=github.com/kubernetes-sigs/aws-ebs-csi-driver IMAGE?=amazon/aws-ebs-csi-driver -VERSION=v1.1.1 +VERSION=v1.1.2 VERSION_AMAZONLINUX=$(VERSION)-amazonlinux GIT_COMMIT?=$(shell git rev-parse HEAD) BUILD_DATE?=$(shell date -u +"%Y-%m-%dT%H:%M:%SZ") diff --git a/charts/aws-ebs-csi-driver/Chart.yaml b/charts/aws-ebs-csi-driver/Chart.yaml index fdba841218..f59b6e10eb 100644 --- a/charts/aws-ebs-csi-driver/Chart.yaml +++ b/charts/aws-ebs-csi-driver/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v1 -appVersion: "1.1.1" +appVersion: "1.1.2" name: aws-ebs-csi-driver description: A Helm chart for AWS EBS CSI Driver version: 1.2.4 diff --git a/charts/aws-ebs-csi-driver/values.yaml b/charts/aws-ebs-csi-driver/values.yaml index 96ca4006cc..9dbabdeadf 100644 --- a/charts/aws-ebs-csi-driver/values.yaml +++ b/charts/aws-ebs-csi-driver/values.yaml @@ -4,7 +4,7 @@ image: repository: k8s.gcr.io/provider-aws/aws-ebs-csi-driver - tag: "v1.1.1" + tag: "v1.1.2" pullPolicy: IfNotPresent sidecars: diff --git a/deploy/kubernetes/base/controller.yaml b/deploy/kubernetes/base/controller.yaml index bb693effcd..a33841a827 100644 --- a/deploy/kubernetes/base/controller.yaml +++ b/deploy/kubernetes/base/controller.yaml @@ -31,7 +31,7 @@ spec: tolerationSeconds: 300 containers: - name: ebs-plugin - image: k8s.gcr.io/provider-aws/aws-ebs-csi-driver:v1.1.1 + image: k8s.gcr.io/provider-aws/aws-ebs-csi-driver:v1.1.2 imagePullPolicy: IfNotPresent args: # - {all,controller,node} # specify the driver mode diff --git a/deploy/kubernetes/base/node.yaml b/deploy/kubernetes/base/node.yaml index 907f7e3d97..36c61d7099 100644 --- a/deploy/kubernetes/base/node.yaml +++ b/deploy/kubernetes/base/node.yaml @@ -42,7 +42,7 @@ spec: - name: ebs-plugin securityContext: privileged: true - image: k8s.gcr.io/provider-aws/aws-ebs-csi-driver:v1.1.1 + image: k8s.gcr.io/provider-aws/aws-ebs-csi-driver:v1.1.2 args: - node - --endpoint=$(CSI_ENDPOINT) diff --git a/deploy/kubernetes/overlays/stable/ecr/kustomization.yaml b/deploy/kubernetes/overlays/stable/ecr/kustomization.yaml index e5f2a1ddf4..62da2b8843 100644 --- a/deploy/kubernetes/overlays/stable/ecr/kustomization.yaml +++ b/deploy/kubernetes/overlays/stable/ecr/kustomization.yaml @@ -5,7 +5,7 @@ bases: images: - name: k8s.gcr.io/provider-aws/aws-ebs-csi-driver newName: 602401143452.dkr.ecr.us-west-2.amazonaws.com/eks/aws-ebs-csi-driver - newTag: v1.1.1 + newTag: v1.1.2 - name: k8s.gcr.io/sig-storage/csi-provisioner newName: public.ecr.aws/eks-distro/kubernetes-csi/external-provisioner newTag: v2.1.1-eks-1-18-3 diff --git a/deploy/kubernetes/overlays/stable/kustomization.yaml b/deploy/kubernetes/overlays/stable/kustomization.yaml index 50ced3ef5d..21106dd1de 100644 --- a/deploy/kubernetes/overlays/stable/kustomization.yaml +++ b/deploy/kubernetes/overlays/stable/kustomization.yaml @@ -4,7 +4,7 @@ bases: - ../../base images: - name: k8s.gcr.io/provider-aws/aws-ebs-csi-driver - newTag: v1.1.1 + newTag: v1.1.2 - name: k8s.gcr.io/sig-storage/csi-provisioner newTag: v2.1.1 - name: k8s.gcr.io/sig-storage/csi-attacher diff --git a/docs/README.md b/docs/README.md index 7b1638b992..78296560d9 100644 --- a/docs/README.md +++ b/docs/README.md @@ -96,6 +96,7 @@ Following sections are Kubernetes specific. If you are Kubernetes user, use foll ## Container Images: |AWS EBS CSI Driver Version | Image | |---------------------------|--------------------------------------------------| +|v1.1.2 |k8s.gcr.io/provider-aws/aws-ebs-csi-driver:v1.1.2 | |v1.1.1 |k8s.gcr.io/provider-aws/aws-ebs-csi-driver:v1.1.1 | |v1.1.0 |k8s.gcr.io/provider-aws/aws-ebs-csi-driver:v1.1.0 | |v1.0.0 |k8s.gcr.io/provider-aws/aws-ebs-csi-driver:v1.0.0 | From b104754f04091e8d4b54262a2eb5081c434ecebb Mon Sep 17 00:00:00 2001 From: Matthew Wong Date: Fri, 23 Jul 2021 17:15:07 -0700 Subject: [PATCH 08/14] Release v1.1.3 --- CHANGELOG-0.x.md | 7 +++++++ Dockerfile | 1 + Makefile | 2 +- charts/aws-ebs-csi-driver/Chart.yaml | 2 +- charts/aws-ebs-csi-driver/values.yaml | 2 +- deploy/kubernetes/base/controller.yaml | 2 +- deploy/kubernetes/base/node.yaml | 2 +- deploy/kubernetes/overlays/stable/ecr/kustomization.yaml | 2 +- deploy/kubernetes/overlays/stable/kustomization.yaml | 2 +- docs/README.md | 1 + 10 files changed, 16 insertions(+), 7 deletions(-) diff --git a/CHANGELOG-0.x.md b/CHANGELOG-0.x.md index 3ff3202a1e..4260e93f32 100644 --- a/CHANGELOG-0.x.md +++ b/CHANGELOG-0.x.md @@ -1,3 +1,10 @@ +# v1.1.3 + +## Notable changes +- Fix ecr image being debian-based +- In a future release, the debian-based image will be removed and only an al2-based image will be maintained and pushed to GCR and ECR +- In a future release, images will stop getting pushed to Docker Hub + # v1.1.2 ## Notable changes diff --git a/Dockerfile b/Dockerfile index d974968d1d..e077e3e6ae 100644 --- a/Dockerfile +++ b/Dockerfile @@ -20,6 +20,7 @@ RUN make FROM amazonlinux:2 AS amazonlinux RUN yum update -y RUN yum install ca-certificates e2fsprogs xfsprogs util-linux -y +RUN yum clean all COPY --from=builder /go/src/github.com/kubernetes-sigs/aws-ebs-csi-driver/bin/aws-ebs-csi-driver /bin/aws-ebs-csi-driver ENTRYPOINT ["/bin/aws-ebs-csi-driver"] diff --git a/Makefile b/Makefile index f2316bfac2..a259f8673d 100644 --- a/Makefile +++ b/Makefile @@ -14,7 +14,7 @@ PKG=github.com/kubernetes-sigs/aws-ebs-csi-driver IMAGE?=amazon/aws-ebs-csi-driver -VERSION=v1.1.2 +VERSION=v1.1.3 VERSION_AMAZONLINUX=$(VERSION)-amazonlinux GIT_COMMIT?=$(shell git rev-parse HEAD) BUILD_DATE?=$(shell date -u +"%Y-%m-%dT%H:%M:%SZ") diff --git a/charts/aws-ebs-csi-driver/Chart.yaml b/charts/aws-ebs-csi-driver/Chart.yaml index f59b6e10eb..d874536c0b 100644 --- a/charts/aws-ebs-csi-driver/Chart.yaml +++ b/charts/aws-ebs-csi-driver/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v1 -appVersion: "1.1.2" +appVersion: "1.1.3" name: aws-ebs-csi-driver description: A Helm chart for AWS EBS CSI Driver version: 1.2.4 diff --git a/charts/aws-ebs-csi-driver/values.yaml b/charts/aws-ebs-csi-driver/values.yaml index 9dbabdeadf..2a6644cf87 100644 --- a/charts/aws-ebs-csi-driver/values.yaml +++ b/charts/aws-ebs-csi-driver/values.yaml @@ -4,7 +4,7 @@ image: repository: k8s.gcr.io/provider-aws/aws-ebs-csi-driver - tag: "v1.1.2" + tag: "v1.1.3" pullPolicy: IfNotPresent sidecars: diff --git a/deploy/kubernetes/base/controller.yaml b/deploy/kubernetes/base/controller.yaml index a33841a827..972a9232bf 100644 --- a/deploy/kubernetes/base/controller.yaml +++ b/deploy/kubernetes/base/controller.yaml @@ -31,7 +31,7 @@ spec: tolerationSeconds: 300 containers: - name: ebs-plugin - image: k8s.gcr.io/provider-aws/aws-ebs-csi-driver:v1.1.2 + image: k8s.gcr.io/provider-aws/aws-ebs-csi-driver:v1.1.3 imagePullPolicy: IfNotPresent args: # - {all,controller,node} # specify the driver mode diff --git a/deploy/kubernetes/base/node.yaml b/deploy/kubernetes/base/node.yaml index 36c61d7099..2d129d5d7d 100644 --- a/deploy/kubernetes/base/node.yaml +++ b/deploy/kubernetes/base/node.yaml @@ -42,7 +42,7 @@ spec: - name: ebs-plugin securityContext: privileged: true - image: k8s.gcr.io/provider-aws/aws-ebs-csi-driver:v1.1.2 + image: k8s.gcr.io/provider-aws/aws-ebs-csi-driver:v1.1.3 args: - node - --endpoint=$(CSI_ENDPOINT) diff --git a/deploy/kubernetes/overlays/stable/ecr/kustomization.yaml b/deploy/kubernetes/overlays/stable/ecr/kustomization.yaml index 62da2b8843..356973c067 100644 --- a/deploy/kubernetes/overlays/stable/ecr/kustomization.yaml +++ b/deploy/kubernetes/overlays/stable/ecr/kustomization.yaml @@ -5,7 +5,7 @@ bases: images: - name: k8s.gcr.io/provider-aws/aws-ebs-csi-driver newName: 602401143452.dkr.ecr.us-west-2.amazonaws.com/eks/aws-ebs-csi-driver - newTag: v1.1.2 + newTag: v1.1.3 - name: k8s.gcr.io/sig-storage/csi-provisioner newName: public.ecr.aws/eks-distro/kubernetes-csi/external-provisioner newTag: v2.1.1-eks-1-18-3 diff --git a/deploy/kubernetes/overlays/stable/kustomization.yaml b/deploy/kubernetes/overlays/stable/kustomization.yaml index 21106dd1de..a5002a8bc4 100644 --- a/deploy/kubernetes/overlays/stable/kustomization.yaml +++ b/deploy/kubernetes/overlays/stable/kustomization.yaml @@ -4,7 +4,7 @@ bases: - ../../base images: - name: k8s.gcr.io/provider-aws/aws-ebs-csi-driver - newTag: v1.1.2 + newTag: v1.1.3 - name: k8s.gcr.io/sig-storage/csi-provisioner newTag: v2.1.1 - name: k8s.gcr.io/sig-storage/csi-attacher diff --git a/docs/README.md b/docs/README.md index 78296560d9..19b3119bfa 100644 --- a/docs/README.md +++ b/docs/README.md @@ -96,6 +96,7 @@ Following sections are Kubernetes specific. If you are Kubernetes user, use foll ## Container Images: |AWS EBS CSI Driver Version | Image | |---------------------------|--------------------------------------------------| +|v1.1.3 |k8s.gcr.io/provider-aws/aws-ebs-csi-driver:v1.1.3 | |v1.1.2 |k8s.gcr.io/provider-aws/aws-ebs-csi-driver:v1.1.2 | |v1.1.1 |k8s.gcr.io/provider-aws/aws-ebs-csi-driver:v1.1.1 | |v1.1.0 |k8s.gcr.io/provider-aws/aws-ebs-csi-driver:v1.1.0 | From d0112611cc4170d0fe15a2116302f5b446416b1f Mon Sep 17 00:00:00 2001 From: Nirmal Aagash Chandramohan Date: Tue, 10 Aug 2021 22:15:55 +0000 Subject: [PATCH 09/14] Added Mount Idempotency --- pkg/driver/node.go | 35 ++++++++++++++++++++++++++++++++--- pkg/driver/node_test.go | 8 ++++++++ 2 files changed, 40 insertions(+), 3 deletions(-) diff --git a/pkg/driver/node.go b/pkg/driver/node.go index bb8e728189..798911e740 100644 --- a/pkg/driver/node.go +++ b/pkg/driver/node.go @@ -624,10 +624,39 @@ func (d *nodeService) nodePublishVolumeForFileSystem(req *csi.NodePublishVolumeR } klog.V(4).Infof("NodePublishVolume: mounting %s at %s with option %s as fstype %s", source, target, mountOptions, fsType) - if err := d.mounter.Mount(source, target, fsType, mountOptions); err != nil { - if removeErr := os.Remove(target); removeErr != nil { - return status.Errorf(codes.Internal, "Could not remove mount target %q: %v", target, err) + /* + Checking if it's a mount point. There are three cases, + 1. true, err when the directory does not exist or corrupted. + 2. false, nil when the path is already mounted with a device. + 3. true, nil when the path is not mounted with any device. + */ + notMnt, err := d.mounter.IsLikelyNotMountPoint(target) + if err != nil && !os.IsNotExist(err){ + //Checking if the path exists and error is related to Corrupted Mount, in that case, the system could unmount and mount. + _, pathErr := mountutils.PathExists(target) + if pathErr != nil && mountutils.IsCorruptedMnt(pathErr) { + klog.V(4).Infof("Target path %q is a corrupted directory",target) + }else{ + return status.Errorf(codes.Internal, "Could not mount %q at %q: %v", source, target, err) } + } + + if !notMnt { + //Return error when any of the directory inside is not readable which means that a device could be mounted. + _, err = os.ReadDir(target) + if err != nil { + klog.V(4).Infof("Error occurred at reading directory %q. Trying to Unmount.",target) + //Reading the directory failed and trying to unmount and remount + if mntErr := d.mounter.Unmount(target); mntErr != nil { + return status.Errorf(codes.Internal, "Unable to unmount the target %q : %v", target, err) + } + } else { + klog.V(4).Infof("Target path %q is already mounted",target) + return nil + } + } + + if err := d.mounter.Mount(source, target, fsType, mountOptions); err != nil { return status.Errorf(codes.Internal, "Could not mount %q at %q: %v", source, target, err) } diff --git a/pkg/driver/node_test.go b/pkg/driver/node_test.go index 20bf8c48dc..01eb6d7fcb 100644 --- a/pkg/driver/node_test.go +++ b/pkg/driver/node_test.go @@ -597,6 +597,8 @@ func TestNodePublishVolume(t *testing.T) { mockMounter.EXPECT().MakeDir(gomock.Eq(targetPath)).Return(nil) mockMounter.EXPECT().Mount(gomock.Eq(stagingTargetPath), gomock.Eq(targetPath), gomock.Eq(defaultFsType), gomock.Eq([]string{"bind"})).Return(nil) + mockMounter.EXPECT().IsLikelyNotMountPoint(gomock.Eq(targetPath)).Return(false, nil) + mockMounter.EXPECT().Unmount(gomock.Eq(targetPath)).Return(nil) req := &csi.NodePublishVolumeRequest{ PublishContext: map[string]string{DevicePathKey: devicePath}, @@ -629,6 +631,8 @@ func TestNodePublishVolume(t *testing.T) { mockMounter.EXPECT().MakeDir(gomock.Eq(targetPath)).Return(nil) mockMounter.EXPECT().Mount(gomock.Eq(stagingTargetPath), gomock.Eq(targetPath), gomock.Eq(FSTypeXfs), gomock.Eq([]string{"bind"})).Return(nil) + mockMounter.EXPECT().IsLikelyNotMountPoint(gomock.Eq(targetPath)).Return(false, nil) + mockMounter.EXPECT().Unmount(gomock.Eq(targetPath)).Return(nil) req := &csi.NodePublishVolumeRequest{ PublishContext: map[string]string{DevicePathKey: devicePath}, @@ -670,6 +674,8 @@ func TestNodePublishVolume(t *testing.T) { mockMounter.EXPECT().MakeDir(gomock.Eq(targetPath)).Return(nil) mockMounter.EXPECT().Mount(gomock.Eq(stagingTargetPath), gomock.Eq(targetPath), gomock.Eq(defaultFsType), gomock.Eq([]string{"bind", "ro"})).Return(nil) + mockMounter.EXPECT().IsLikelyNotMountPoint(gomock.Eq(targetPath)).Return(false, nil) + mockMounter.EXPECT().Unmount(gomock.Eq(targetPath)).Return(nil) req := &csi.NodePublishVolumeRequest{ PublishContext: map[string]string{DevicePathKey: devicePath}, @@ -703,6 +709,8 @@ func TestNodePublishVolume(t *testing.T) { mockMounter.EXPECT().MakeDir(gomock.Eq(targetPath)).Return(nil) mockMounter.EXPECT().Mount(gomock.Eq(stagingTargetPath), gomock.Eq(targetPath), gomock.Eq(defaultFsType), gomock.Eq([]string{"bind", "test-flag"})).Return(nil) + mockMounter.EXPECT().IsLikelyNotMountPoint(gomock.Eq(targetPath)).Return(false, nil) + mockMounter.EXPECT().Unmount(gomock.Eq(targetPath)).Return(nil) req := &csi.NodePublishVolumeRequest{ PublishContext: map[string]string{DevicePathKey: "/dev/fake"}, From 482d3bdb73f3927dc7ce86e127e965141c644e0e Mon Sep 17 00:00:00 2001 From: Nirmal Aagash Chandramohan Date: Wed, 11 Aug 2021 18:54:19 +0000 Subject: [PATCH 10/14] fixed:g-format error --- pkg/driver/node.go | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/pkg/driver/node.go b/pkg/driver/node.go index 798911e740..680bccae2e 100644 --- a/pkg/driver/node.go +++ b/pkg/driver/node.go @@ -631,12 +631,12 @@ func (d *nodeService) nodePublishVolumeForFileSystem(req *csi.NodePublishVolumeR 3. true, nil when the path is not mounted with any device. */ notMnt, err := d.mounter.IsLikelyNotMountPoint(target) - if err != nil && !os.IsNotExist(err){ + if err != nil && !os.IsNotExist(err) { //Checking if the path exists and error is related to Corrupted Mount, in that case, the system could unmount and mount. _, pathErr := mountutils.PathExists(target) if pathErr != nil && mountutils.IsCorruptedMnt(pathErr) { - klog.V(4).Infof("Target path %q is a corrupted directory",target) - }else{ + klog.V(4).Infof("Target path %q is a corrupted directory", target) + } else { return status.Errorf(codes.Internal, "Could not mount %q at %q: %v", source, target, err) } } @@ -645,13 +645,13 @@ func (d *nodeService) nodePublishVolumeForFileSystem(req *csi.NodePublishVolumeR //Return error when any of the directory inside is not readable which means that a device could be mounted. _, err = os.ReadDir(target) if err != nil { - klog.V(4).Infof("Error occurred at reading directory %q. Trying to Unmount.",target) + klog.V(4).Infof("Error occurred at reading directory %q. Trying to Unmount.", target) //Reading the directory failed and trying to unmount and remount if mntErr := d.mounter.Unmount(target); mntErr != nil { return status.Errorf(codes.Internal, "Unable to unmount the target %q : %v", target, err) } } else { - klog.V(4).Infof("Target path %q is already mounted",target) + klog.V(4).Infof("Target path %q is already mounted", target) return nil } } @@ -659,7 +659,6 @@ func (d *nodeService) nodePublishVolumeForFileSystem(req *csi.NodePublishVolumeR if err := d.mounter.Mount(source, target, fsType, mountOptions); err != nil { return status.Errorf(codes.Internal, "Could not mount %q at %q: %v", source, target, err) } - return nil } From 25fdec30ee09ed8cd5c956680a7f392c36891a61 Mon Sep 17 00:00:00 2001 From: Nirmal Aagash Chandramohan Date: Wed, 18 Aug 2021 02:31:28 +0000 Subject: [PATCH 11/14] Added Mount Idempotency for block device --- pkg/driver/mocks/mock_mount.go | 14 ++ pkg/driver/mount.go | 9 +- pkg/driver/node.go | 89 +++++++----- pkg/driver/node_test.go | 251 ++++++++++++++++++++++++++++++++- pkg/driver/sanity_test.go | 4 + 5 files changed, 325 insertions(+), 42 deletions(-) diff --git a/pkg/driver/mocks/mock_mount.go b/pkg/driver/mocks/mock_mount.go index eeb81d98c9..138d84f10a 100644 --- a/pkg/driver/mocks/mock_mount.go +++ b/pkg/driver/mocks/mock_mount.go @@ -104,6 +104,20 @@ func (mr *MockMounterMockRecorder) GetDeviceNameFromMount(mountPath interface{}) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDeviceNameFromMount", reflect.TypeOf((*MockMounter)(nil).GetDeviceNameFromMount), mountPath) } +// IsCorruptedMnt mocks base method. +func (m *MockMounter) IsCorruptedMnt(err error) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsCorruptedMnt", err) + ret0, _ := ret[0].(bool) + return ret0 +} + +// IsCorruptedMnt indicates an expected call of IsCorruptedMnt. +func (mr *MockMounterMockRecorder) IsCorruptedMnt(err interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsCorruptedMnt", reflect.TypeOf((*MockMounter)(nil).IsCorruptedMnt), err) +} + // GetMountRefs mocks base method. func (m *MockMounter) GetMountRefs(pathname string) ([]string, error) { m.ctrl.T.Helper() diff --git a/pkg/driver/mount.go b/pkg/driver/mount.go index f1ce7b48c8..125384234a 100644 --- a/pkg/driver/mount.go +++ b/pkg/driver/mount.go @@ -18,11 +18,12 @@ package driver import ( "fmt" - "k8s.io/klog" "os" "strconv" "strings" + "k8s.io/klog" + "github.com/kubernetes-sigs/aws-ebs-csi-driver/pkg/mounter" mountutils "k8s.io/mount-utils" utilexec "k8s.io/utils/exec" @@ -38,6 +39,7 @@ type Mounter interface { utilexec.Interface // Implemented by NodeMounter below + IsCorruptedMnt(err error) bool GetDeviceNameFromMount(mountPath string) (string, int, error) // TODO this won't make sense on Windows with csi-proxy MakeFile(path string) error @@ -64,6 +66,11 @@ func (m NodeMounter) GetDeviceNameFromMount(mountPath string) (string, int, erro return mountutils.GetDeviceNameFromMount(m, mountPath) } +// IsCorruptedMnt return true if err is about corrupted mount point +func (m NodeMounter) IsCorruptedMnt(err error) bool { + return mountutils.IsCorruptedMnt(err) +} + // This function is mirrored in ./sanity_test.go to make sure sanity test covered this block of code // Please mirror the change to func MakeFile in ./sanity_test.go func (m *NodeMounter) MakeFile(path string) error { diff --git a/pkg/driver/node.go b/pkg/driver/node.go index 680bccae2e..813a8e89c9 100644 --- a/pkg/driver/node.go +++ b/pkg/driver/node.go @@ -591,17 +591,57 @@ func (d *nodeService) nodePublishVolumeForBlock(req *csi.NodePublishVolumeReques return status.Errorf(codes.Internal, "Could not create file %q: %v", target, err) } - klog.V(4).Infof("NodePublishVolume [block]: mounting %s at %s", source, target) - if err := d.mounter.Mount(source, target, "", mountOptions); err != nil { - if removeErr := os.Remove(target); removeErr != nil { - return status.Errorf(codes.Internal, "Could not remove mount target %q: %v", target, removeErr) - } + //Checking if the target file is already mounted with a device. + mounted, err := d.isMounted(source, target) + if err != nil { return status.Errorf(codes.Internal, "Could not mount %q at %q: %v", source, target, err) } + if !mounted { + klog.V(4).Infof("NodePublishVolume [block]: mounting %s at %s", source, target) + if err := d.mounter.Mount(source, target, "", mountOptions); err != nil { + if removeErr := os.Remove(target); removeErr != nil { + return status.Errorf(codes.Internal, "Could not remove mount target %q: %v", target, removeErr) + } + return status.Errorf(codes.Internal, "Could not mount %q at %q: %v", source, target, err) + } + } else { + klog.V(4).Infof("NodePublishVolume [block]: Target path %q is already mounted", target) + } + return nil } +func (d *nodeService) isMounted(source string, target string) (bool, error) { + /* + Checking if it's a mount point using IsLikelyNotMountPoint. There are three different return values, + 1. true, err when the directory does not exist or corrupted. + 2. false, nil when the path is already mounted with a device. + 3. true, nil when the path is not mounted with any device. + */ + notMnt, err := d.mounter.IsLikelyNotMountPoint(target) + if err != nil && !os.IsNotExist(err) { + //Checking if the path exists and error is related to Corrupted Mount, in that case, the system could unmount and mount. + _, pathErr := d.mounter.PathExists(target) + if pathErr != nil && d.mounter.IsCorruptedMnt(pathErr) { + klog.V(4).Infof("NodePublishVolume: Target path %q is a corrupted mount. Trying to unmount.", target) + if mntErr := d.mounter.Unmount(target); mntErr != nil { + return !notMnt, status.Errorf(codes.Internal, "Unable to unmount the target %q : %v", target, mntErr) + } + //After successful unmount, the device is ready to be mounted. + return !notMnt, nil + } + return !notMnt, status.Errorf(codes.Internal, "Could not mount %q at %q: %v", source, target, err) + } + + if !notMnt { + klog.V(4).Infof("NodePublishVolume: Target path %q is already mounted", target) + return !notMnt, nil + } + + return !notMnt, err +} + func (d *nodeService) nodePublishVolumeForFileSystem(req *csi.NodePublishVolumeRequest, mountOptions []string, mode *csi.VolumeCapability_Mount) error { target := req.GetTargetPath() source := req.GetStagingTargetPath() @@ -624,41 +664,20 @@ func (d *nodeService) nodePublishVolumeForFileSystem(req *csi.NodePublishVolumeR } klog.V(4).Infof("NodePublishVolume: mounting %s at %s with option %s as fstype %s", source, target, mountOptions, fsType) - /* - Checking if it's a mount point. There are three cases, - 1. true, err when the directory does not exist or corrupted. - 2. false, nil when the path is already mounted with a device. - 3. true, nil when the path is not mounted with any device. - */ - notMnt, err := d.mounter.IsLikelyNotMountPoint(target) - if err != nil && !os.IsNotExist(err) { - //Checking if the path exists and error is related to Corrupted Mount, in that case, the system could unmount and mount. - _, pathErr := mountutils.PathExists(target) - if pathErr != nil && mountutils.IsCorruptedMnt(pathErr) { - klog.V(4).Infof("Target path %q is a corrupted directory", target) - } else { - return status.Errorf(codes.Internal, "Could not mount %q at %q: %v", source, target, err) - } + + //Checking if the target directory is already mounted with a device. + mounted, err := d.isMounted(source, target) + + if err != nil { + return status.Errorf(codes.Internal, "Could not mount %q at %q: %v", source, target, err) } - if !notMnt { - //Return error when any of the directory inside is not readable which means that a device could be mounted. - _, err = os.ReadDir(target) - if err != nil { - klog.V(4).Infof("Error occurred at reading directory %q. Trying to Unmount.", target) - //Reading the directory failed and trying to unmount and remount - if mntErr := d.mounter.Unmount(target); mntErr != nil { - return status.Errorf(codes.Internal, "Unable to unmount the target %q : %v", target, err) - } - } else { - klog.V(4).Infof("Target path %q is already mounted", target) - return nil + if !mounted { + if err := d.mounter.Mount(source, target, fsType, mountOptions); err != nil { + return status.Errorf(codes.Internal, "Could not mount %q at %q: %v", source, target, err) } } - if err := d.mounter.Mount(source, target, fsType, mountOptions); err != nil { - return status.Errorf(codes.Internal, "Could not mount %q at %q: %v", source, target, err) - } return nil } diff --git a/pkg/driver/node_test.go b/pkg/driver/node_test.go index 01eb6d7fcb..a1f01a4492 100644 --- a/pkg/driver/node_test.go +++ b/pkg/driver/node_test.go @@ -597,8 +597,109 @@ func TestNodePublishVolume(t *testing.T) { mockMounter.EXPECT().MakeDir(gomock.Eq(targetPath)).Return(nil) mockMounter.EXPECT().Mount(gomock.Eq(stagingTargetPath), gomock.Eq(targetPath), gomock.Eq(defaultFsType), gomock.Eq([]string{"bind"})).Return(nil) + mockMounter.EXPECT().IsLikelyNotMountPoint(gomock.Eq(targetPath)).Return(true, nil) + + req := &csi.NodePublishVolumeRequest{ + PublishContext: map[string]string{DevicePathKey: devicePath}, + StagingTargetPath: stagingTargetPath, + TargetPath: targetPath, + VolumeCapability: stdVolCap, + VolumeId: "vol-test", + } + + _, err := awsDriver.NodePublishVolume(context.TODO(), req) + if err != nil { + t.Fatalf("Expect no error but got: %v", err) + } + }, + }, + { + name: "success filesystem mounted already", + testFunc: func(t *testing.T) { + mockCtl := gomock.NewController(t) + defer mockCtl.Finish() + + mockMetadata := mocks.NewMockMetadataService(mockCtl) + mockMounter := mocks.NewMockMounter(mockCtl) + + awsDriver := &nodeService{ + metadata: mockMetadata, + mounter: mockMounter, + inFlight: internal.NewInFlight(), + } + + mockMounter.EXPECT().MakeDir(gomock.Eq(targetPath)).Return(nil) mockMounter.EXPECT().IsLikelyNotMountPoint(gomock.Eq(targetPath)).Return(false, nil) + + req := &csi.NodePublishVolumeRequest{ + PublishContext: map[string]string{DevicePathKey: devicePath}, + StagingTargetPath: stagingTargetPath, + TargetPath: targetPath, + VolumeCapability: stdVolCap, + VolumeId: "vol-test", + } + + _, err := awsDriver.NodePublishVolume(context.TODO(), req) + if err != nil { + t.Fatalf("Expect no error but got: %v", err) + } + }, + }, + { + name: "success filesystem mountpoint error", + testFunc: func(t *testing.T) { + mockCtl := gomock.NewController(t) + defer mockCtl.Finish() + + mockMetadata := mocks.NewMockMetadataService(mockCtl) + mockMounter := mocks.NewMockMounter(mockCtl) + + awsDriver := &nodeService{ + metadata: mockMetadata, + mounter: mockMounter, + inFlight: internal.NewInFlight(), + } + + gomock.InOrder( + mockMounter.EXPECT().PathExists(gomock.Eq(targetPath)).Return(true, nil), + ) + mockMounter.EXPECT().MakeDir(gomock.Eq(targetPath)).Return(nil) + mockMounter.EXPECT().IsLikelyNotMountPoint(gomock.Eq(targetPath)).Return(true, errors.New("Internal system error")) + + req := &csi.NodePublishVolumeRequest{ + PublishContext: map[string]string{DevicePathKey: devicePath}, + StagingTargetPath: stagingTargetPath, + TargetPath: targetPath, + VolumeCapability: stdVolCap, + VolumeId: "vol-test", + } + + _, err := awsDriver.NodePublishVolume(context.TODO(), req) + expectErr(t, err, codes.Internal) + }, + }, + { + name: "success filesystem corrupted mountpoint error", + testFunc: func(t *testing.T) { + mockCtl := gomock.NewController(t) + defer mockCtl.Finish() + + mockMetadata := mocks.NewMockMetadataService(mockCtl) + mockMounter := mocks.NewMockMounter(mockCtl) + + awsDriver := &nodeService{ + metadata: mockMetadata, + mounter: mockMounter, + inFlight: internal.NewInFlight(), + } + + mockMounter.EXPECT().PathExists(gomock.Eq(targetPath)).Return(true, errors.New("CorruptedMntError")) + mockMounter.EXPECT().IsCorruptedMnt(gomock.Eq(errors.New("CorruptedMntError"))).Return(true) + + mockMounter.EXPECT().MakeDir(gomock.Eq(targetPath)).Return(nil) + mockMounter.EXPECT().IsLikelyNotMountPoint(gomock.Eq(targetPath)).Return(true, errors.New("internal system error")) mockMounter.EXPECT().Unmount(gomock.Eq(targetPath)).Return(nil) + mockMounter.EXPECT().Mount(gomock.Eq(stagingTargetPath), gomock.Eq(targetPath), gomock.Eq(defaultFsType), gomock.Eq([]string{"bind"})).Return(nil) req := &csi.NodePublishVolumeRequest{ PublishContext: map[string]string{DevicePathKey: devicePath}, @@ -631,8 +732,7 @@ func TestNodePublishVolume(t *testing.T) { mockMounter.EXPECT().MakeDir(gomock.Eq(targetPath)).Return(nil) mockMounter.EXPECT().Mount(gomock.Eq(stagingTargetPath), gomock.Eq(targetPath), gomock.Eq(FSTypeXfs), gomock.Eq([]string{"bind"})).Return(nil) - mockMounter.EXPECT().IsLikelyNotMountPoint(gomock.Eq(targetPath)).Return(false, nil) - mockMounter.EXPECT().Unmount(gomock.Eq(targetPath)).Return(nil) + mockMounter.EXPECT().IsLikelyNotMountPoint(gomock.Eq(targetPath)).Return(true, nil) req := &csi.NodePublishVolumeRequest{ PublishContext: map[string]string{DevicePathKey: devicePath}, @@ -674,8 +774,7 @@ func TestNodePublishVolume(t *testing.T) { mockMounter.EXPECT().MakeDir(gomock.Eq(targetPath)).Return(nil) mockMounter.EXPECT().Mount(gomock.Eq(stagingTargetPath), gomock.Eq(targetPath), gomock.Eq(defaultFsType), gomock.Eq([]string{"bind", "ro"})).Return(nil) - mockMounter.EXPECT().IsLikelyNotMountPoint(gomock.Eq(targetPath)).Return(false, nil) - mockMounter.EXPECT().Unmount(gomock.Eq(targetPath)).Return(nil) + mockMounter.EXPECT().IsLikelyNotMountPoint(gomock.Eq(targetPath)).Return(true, nil) req := &csi.NodePublishVolumeRequest{ PublishContext: map[string]string{DevicePathKey: devicePath}, @@ -709,8 +808,7 @@ func TestNodePublishVolume(t *testing.T) { mockMounter.EXPECT().MakeDir(gomock.Eq(targetPath)).Return(nil) mockMounter.EXPECT().Mount(gomock.Eq(stagingTargetPath), gomock.Eq(targetPath), gomock.Eq(defaultFsType), gomock.Eq([]string{"bind", "test-flag"})).Return(nil) - mockMounter.EXPECT().IsLikelyNotMountPoint(gomock.Eq(targetPath)).Return(false, nil) - mockMounter.EXPECT().Unmount(gomock.Eq(targetPath)).Return(nil) + mockMounter.EXPECT().IsLikelyNotMountPoint(gomock.Eq(targetPath)).Return(true, nil) req := &csi.NodePublishVolumeRequest{ PublishContext: map[string]string{DevicePathKey: "/dev/fake"}, @@ -761,6 +859,145 @@ func TestNodePublishVolume(t *testing.T) { mockMounter.EXPECT().MakeDir(gomock.Eq("/test")).Return(nil) mockMounter.EXPECT().MakeFile(targetPath).Return(nil) mockMounter.EXPECT().Mount(gomock.Eq(devicePath), gomock.Eq(targetPath), gomock.Eq(""), gomock.Eq([]string{"bind"})).Return(nil) + mockMounter.EXPECT().IsLikelyNotMountPoint(gomock.Eq(targetPath)).Return(true, nil) + + req := &csi.NodePublishVolumeRequest{ + PublishContext: map[string]string{DevicePathKey: "/dev/fake"}, + StagingTargetPath: stagingTargetPath, + TargetPath: targetPath, + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Block{ + Block: &csi.VolumeCapability_BlockVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + VolumeId: "vol-test", + } + + _, err := awsDriver.NodePublishVolume(context.TODO(), req) + if err != nil { + t.Fatalf("Expect no error but got: %v", err) + } + }, + }, + { + name: "success mounted already [raw block]", + testFunc: func(t *testing.T) { + mockCtl := gomock.NewController(t) + defer mockCtl.Finish() + + mockMetadata := mocks.NewMockMetadataService(mockCtl) + mockMounter := mocks.NewMockMounter(mockCtl) + + awsDriver := &nodeService{ + metadata: mockMetadata, + mounter: mockMounter, + inFlight: internal.NewInFlight(), + } + + gomock.InOrder( + mockMounter.EXPECT().PathExists(gomock.Eq(devicePath)).Return(true, nil), + mockMounter.EXPECT().PathExists(gomock.Eq("/test")).Return(false, nil), + ) + mockMounter.EXPECT().MakeDir(gomock.Eq("/test")).Return(nil) + mockMounter.EXPECT().MakeFile(targetPath).Return(nil) + mockMounter.EXPECT().IsLikelyNotMountPoint(gomock.Eq(targetPath)).Return(false, nil) + + req := &csi.NodePublishVolumeRequest{ + PublishContext: map[string]string{DevicePathKey: "/dev/fake"}, + StagingTargetPath: stagingTargetPath, + TargetPath: targetPath, + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Block{ + Block: &csi.VolumeCapability_BlockVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + VolumeId: "vol-test", + } + + _, err := awsDriver.NodePublishVolume(context.TODO(), req) + if err != nil { + t.Fatalf("Expect no error but got: %v", err) + } + }, + }, + { + name: "success mountpoint error [raw block]", + testFunc: func(t *testing.T) { + mockCtl := gomock.NewController(t) + defer mockCtl.Finish() + + mockMetadata := mocks.NewMockMetadataService(mockCtl) + mockMounter := mocks.NewMockMounter(mockCtl) + + awsDriver := &nodeService{ + metadata: mockMetadata, + mounter: mockMounter, + inFlight: internal.NewInFlight(), + } + + gomock.InOrder( + mockMounter.EXPECT().PathExists(gomock.Eq(devicePath)).Return(true, nil), + mockMounter.EXPECT().PathExists(gomock.Eq("/test")).Return(false, nil), + mockMounter.EXPECT().PathExists(gomock.Eq(targetPath)).Return(true, nil), + ) + + mockMounter.EXPECT().MakeDir(gomock.Eq("/test")).Return(nil) + mockMounter.EXPECT().MakeFile(targetPath).Return(nil) + mockMounter.EXPECT().IsLikelyNotMountPoint(gomock.Eq(targetPath)).Return(true, errors.New("Internal System Error")) + + req := &csi.NodePublishVolumeRequest{ + PublishContext: map[string]string{DevicePathKey: "/dev/fake"}, + StagingTargetPath: stagingTargetPath, + TargetPath: targetPath, + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Block{ + Block: &csi.VolumeCapability_BlockVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + VolumeId: "vol-test", + } + + _, err := awsDriver.NodePublishVolume(context.TODO(), req) + expectErr(t, err, codes.Internal) + }, + }, + { + name: "success corrupted mountpoint error [raw block]", + testFunc: func(t *testing.T) { + mockCtl := gomock.NewController(t) + defer mockCtl.Finish() + + mockMetadata := mocks.NewMockMetadataService(mockCtl) + mockMounter := mocks.NewMockMounter(mockCtl) + + awsDriver := &nodeService{ + metadata: mockMetadata, + mounter: mockMounter, + inFlight: internal.NewInFlight(), + } + + gomock.InOrder( + mockMounter.EXPECT().PathExists(gomock.Eq(devicePath)).Return(true, nil), + mockMounter.EXPECT().PathExists(gomock.Eq("/test")).Return(false, nil), + mockMounter.EXPECT().PathExists(gomock.Eq(targetPath)).Return(true, errors.New("CorruptedMntError")), + ) + + mockMounter.EXPECT().IsCorruptedMnt(errors.New("CorruptedMntError")).Return(true) + + mockMounter.EXPECT().MakeDir(gomock.Eq("/test")).Return(nil) + mockMounter.EXPECT().MakeFile(targetPath).Return(nil) + mockMounter.EXPECT().Unmount(gomock.Eq(targetPath)).Return(nil) + mockMounter.EXPECT().IsLikelyNotMountPoint(gomock.Eq(targetPath)).Return(true, errors.New("Internal System Error")) + mockMounter.EXPECT().Mount(gomock.Eq(devicePath), gomock.Eq(targetPath), gomock.Any(), gomock.Any()).Return(nil) req := &csi.NodePublishVolumeRequest{ PublishContext: map[string]string{DevicePathKey: "/dev/fake"}, @@ -805,6 +1042,7 @@ func TestNodePublishVolume(t *testing.T) { mockMounter.EXPECT().MakeDir(gomock.Eq("/test")).Return(nil) mockMounter.EXPECT().MakeFile(targetPath).Return(nil) mockMounter.EXPECT().Mount(gomock.Eq(devicePathWithPartition), gomock.Eq(targetPath), gomock.Eq(""), gomock.Eq([]string{"bind"})).Return(nil) + mockMounter.EXPECT().IsLikelyNotMountPoint(gomock.Eq(targetPath)).Return(true, nil) req := &csi.NodePublishVolumeRequest{ PublishContext: map[string]string{DevicePathKey: "/dev/fake"}, @@ -850,6 +1088,7 @@ func TestNodePublishVolume(t *testing.T) { mockMounter.EXPECT().MakeDir(gomock.Eq("/test")).Return(nil) mockMounter.EXPECT().MakeFile(targetPath).Return(nil) mockMounter.EXPECT().Mount(gomock.Eq(devicePath), gomock.Eq(targetPath), gomock.Eq(""), gomock.Eq([]string{"bind"})).Return(nil) + mockMounter.EXPECT().IsLikelyNotMountPoint(gomock.Eq(targetPath)).Return(true, nil) req := &csi.NodePublishVolumeRequest{ PublishContext: map[string]string{DevicePathKey: "/dev/fake"}, diff --git a/pkg/driver/sanity_test.go b/pkg/driver/sanity_test.go index b8e8a3e2e3..fc28bdf4bc 100644 --- a/pkg/driver/sanity_test.go +++ b/pkg/driver/sanity_test.go @@ -293,6 +293,10 @@ func newFakeMounter() *fakeMounter { } } +func (f *fakeMounter) IsCorruptedMnt(err error) bool { + return false +} + func (f *fakeMounter) Mount(source string, target string, fstype string, options []string) error { return nil } From 76265af6dd7942f3657c3332b5889131882d380b Mon Sep 17 00:00:00 2001 From: Nirmal Aagash Chandramohan Date: Wed, 25 Aug 2021 22:38:09 +0000 Subject: [PATCH 12/14] go formatt error fixed using go v1.17 --- pkg/cloud/aws_metrics.go | 1 + pkg/driver/node_linux.go | 1 + pkg/mounter/safe_mounter_unix.go | 1 + pkg/mounter/safe_mounter_windows.go | 1 + 4 files changed, 4 insertions(+) diff --git a/pkg/cloud/aws_metrics.go b/pkg/cloud/aws_metrics.go index 827f1d2298..5f48369abd 100644 --- a/pkg/cloud/aws_metrics.go +++ b/pkg/cloud/aws_metrics.go @@ -1,3 +1,4 @@ +//go:build !providerless // +build !providerless /* diff --git a/pkg/driver/node_linux.go b/pkg/driver/node_linux.go index 4c4a9d0955..dff94d277c 100644 --- a/pkg/driver/node_linux.go +++ b/pkg/driver/node_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* diff --git a/pkg/mounter/safe_mounter_unix.go b/pkg/mounter/safe_mounter_unix.go index a72b544748..215047b001 100644 --- a/pkg/mounter/safe_mounter_unix.go +++ b/pkg/mounter/safe_mounter_unix.go @@ -1,3 +1,4 @@ +//go:build linux || darwin // +build linux darwin /* diff --git a/pkg/mounter/safe_mounter_windows.go b/pkg/mounter/safe_mounter_windows.go index 35a3277d78..fd6b34f66b 100644 --- a/pkg/mounter/safe_mounter_windows.go +++ b/pkg/mounter/safe_mounter_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows /* From 81871460904c38f0f32a0c7b11f91939213fd6d6 Mon Sep 17 00:00:00 2001 From: Varun Dhananjaya Date: Thu, 2 Sep 2021 16:35:41 -0700 Subject: [PATCH 13/14] release v1.1.4 --- CHANGELOG-0.x.md | 5 +++++ Makefile | 2 +- docs/README.md | 1 + 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/CHANGELOG-0.x.md b/CHANGELOG-0.x.md index 4260e93f32..95f5f83c12 100644 --- a/CHANGELOG-0.x.md +++ b/CHANGELOG-0.x.md @@ -1,3 +1,8 @@ +# v1.1.4 + +## Notable changes +- Fix mount idempotency ([#1019](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/1019), [@nirmalaagash](https://github.com/nirmalaagash)) + # v1.1.3 ## Notable changes diff --git a/Makefile b/Makefile index a259f8673d..229ff5bdab 100644 --- a/Makefile +++ b/Makefile @@ -14,7 +14,7 @@ PKG=github.com/kubernetes-sigs/aws-ebs-csi-driver IMAGE?=amazon/aws-ebs-csi-driver -VERSION=v1.1.3 +VERSION=v1.1.4 VERSION_AMAZONLINUX=$(VERSION)-amazonlinux GIT_COMMIT?=$(shell git rev-parse HEAD) BUILD_DATE?=$(shell date -u +"%Y-%m-%dT%H:%M:%SZ") diff --git a/docs/README.md b/docs/README.md index 19b3119bfa..3811c501aa 100644 --- a/docs/README.md +++ b/docs/README.md @@ -96,6 +96,7 @@ Following sections are Kubernetes specific. If you are Kubernetes user, use foll ## Container Images: |AWS EBS CSI Driver Version | Image | |---------------------------|--------------------------------------------------| +|v1.1.4 |k8s.gcr.io/provider-aws/aws-ebs-csi-driver:v1.1.4 | |v1.1.3 |k8s.gcr.io/provider-aws/aws-ebs-csi-driver:v1.1.3 | |v1.1.2 |k8s.gcr.io/provider-aws/aws-ebs-csi-driver:v1.1.2 | |v1.1.1 |k8s.gcr.io/provider-aws/aws-ebs-csi-driver:v1.1.1 | From 7d42839802e83a6fcc47546fbf4518250a011fc0 Mon Sep 17 00:00:00 2001 From: Matthew Wong Date: Wed, 8 Sep 2021 18:40:08 -0700 Subject: [PATCH 14/14] Increase prow timeout to 1h15m --- cloudbuild.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloudbuild.yaml b/cloudbuild.yaml index b41170e10a..46dfc9190e 100644 --- a/cloudbuild.yaml +++ b/cloudbuild.yaml @@ -1,4 +1,4 @@ -timeout: 3600s +timeout: 4500s steps: - name: gcr.io/k8s-testimages/gcb-docker-gcloud:v20200421-a2bf5f8 entrypoint: ./hack/prow.sh