diff --git a/CHANGELOG-0.x.md b/CHANGELOG-0.x.md index efbb686385..95f5f83c12 100644 --- a/CHANGELOG-0.x.md +++ b/CHANGELOG-0.x.md @@ -1,3 +1,53 @@ +# v1.1.4 + +## Notable changes +- Fix mount idempotency ([#1019](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/1019), [@nirmalaagash](https://github.com/nirmalaagash)) + +# v1.1.3 + +## Notable changes +- Fix ecr image being debian-based +- In a future release, the debian-based image will be removed and only an al2-based image will be maintained and pushed to GCR and ECR +- In a future release, images will stop getting pushed to Docker Hub + +# v1.1.2 + +## Notable changes +- Update base images: yum update al2, bump debian tag ([#986](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/986), [@wongma7](https://github.com/wongma7)) + +# v1.1.1 + +### Bug fixes +- update inFlight cache to avoid race condition on volume operation ([#924])(https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/924), [@AndyXiangLi](https://github.com/AndyXiangLi)) + +# v1.1.0 + +## Notable changes +- Helm chart cleaned up ([#856](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/856), [@krmichel](https://github.com/krmichel)) + +### New features +* Add podAnnotations to snapshotController StatefulSet ([#884](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/884), [@snstanton](https://github.com/snstanton)) +* Support custom pod labels in Helm chart ([#905](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/905), [@eytanhanig](https://github.com/eytanhanig)) + +### Bug fixes +* fix naming mistake in clusterrolebinding, expose env var to controller via downward api ([#874](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/874), [@vdhanan](https://github.com/vdhanan)) +* Fix kustomize RBAC bindings to have namespace kube-system ([#878](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/878), [@wongma7](https://github.com/wongma7)) +* rename node clusterrolebinding to make auto upgrade work ([#894](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/894), [@vdhanan](https://github.com/vdhanan)) +* remove hardcoded namespace for pod disruption budget ([#895](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/895), [@vdhanan](https://github.com/vdhanan)) +* Only initialize the in-cluster kube client when metadata service is actually unavailable ([#897](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/897), [@chrisayoub](https://github.com/chrisayoub)) +* Reduce default log level to 2 ([#903](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/903), [@wongma7](https://github.com/wongma7)) +* Add pod disruption budgets that got missed in a rebase ([#906](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/906), [@krmichel](https://github.com/krmichel)) +* remove WellKnownTopologyKey from PV Topology ([#912](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/912), [@Elbehery](https://github.com/Elbehery)) +* Skip volume expansion if block node ([#916](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/916), [@gnufied](https://github.com/gnufied)) + +### Misc. +* Add eksctl support to e2e scripts ([#852](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/852), [@wongma7](https://github.com/wongma7)) +* release v1.0.0 ([#865](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/865), [@vdhanan](https://github.com/vdhanan)) +* add self as owner ([#866](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/866), [@vdhanan](https://github.com/vdhanan)) +* bump helm chart version ([#881](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/881), [@vdhanan](https://github.com/vdhanan)) +* add custom useragent suffix ([#910](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/910), [@vdhanan](https://github.com/vdhanan)) +* Bump chart-releaser-action to v1.2.1 ([#914](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/914), [@gliptak](https://github.com/gliptak)) + # v1.0.0 ## Notable changes - With this release, the EBS CSI Driver is now Generally Available! diff --git a/Dockerfile b/Dockerfile index aa6464e644..e077e3e6ae 100644 --- a/Dockerfile +++ b/Dockerfile @@ -18,12 +18,14 @@ COPY . . RUN make FROM amazonlinux:2 AS amazonlinux +RUN yum update -y RUN yum install ca-certificates e2fsprogs xfsprogs util-linux -y +RUN yum clean all COPY --from=builder /go/src/github.com/kubernetes-sigs/aws-ebs-csi-driver/bin/aws-ebs-csi-driver /bin/aws-ebs-csi-driver ENTRYPOINT ["/bin/aws-ebs-csi-driver"] -FROM k8s.gcr.io/build-image/debian-base:v2.1.3 AS debian-base +FROM k8s.gcr.io/build-image/debian-base:buster-v1.8.0 AS debian-base RUN clean-install ca-certificates e2fsprogs mount udev util-linux xfsprogs COPY --from=builder /go/src/github.com/kubernetes-sigs/aws-ebs-csi-driver/bin/aws-ebs-csi-driver /bin/aws-ebs-csi-driver diff --git a/Makefile b/Makefile index 61a014d411..229ff5bdab 100644 --- a/Makefile +++ b/Makefile @@ -14,7 +14,7 @@ PKG=github.com/kubernetes-sigs/aws-ebs-csi-driver IMAGE?=amazon/aws-ebs-csi-driver -VERSION=v1.0.0 +VERSION=v1.1.4 VERSION_AMAZONLINUX=$(VERSION)-amazonlinux GIT_COMMIT?=$(shell git rev-parse HEAD) BUILD_DATE?=$(shell date -u +"%Y-%m-%dT%H:%M:%SZ") @@ -108,6 +108,10 @@ test-e2e-external: GINKGO_SKIP="\[Disruptive\]|\[Serial\]" \ ./hack/e2e/run.sh +.PHONY: test-e2e-external-eks +test-e2e-external-eks: + echo TODO + .PHONY: image-release image-release: docker build -t $(IMAGE):$(VERSION) . --target debian-base @@ -146,19 +150,19 @@ generate-kustomize: bin/helm cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/clusterrole-resizer.yaml > ../../deploy/kubernetes/base/clusterrole-resizer.yaml cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/clusterrole-snapshot-controller.yaml > ../../deploy/kubernetes/base/clusterrole-snapshot-controller.yaml cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/clusterrole-snapshotter.yaml > ../../deploy/kubernetes/base/clusterrole-snapshotter.yaml - cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/clusterrolebinding-attacher.yaml -n kube-system > ../../deploy/kubernetes/base/clusterrolebinding-attacher.yaml - cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/clusterrolebinding-csi-node.yaml -n kube-system > ../../deploy/kubernetes/base/clusterrolebinding-csi-node.yaml - cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/clusterrolebinding-provisioner.yaml -n kube-system > ../../deploy/kubernetes/base/clusterrolebinding-provisioner.yaml - cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/clusterrolebinding-resizer.yaml -n kube-system > ../../deploy/kubernetes/base/clusterrolebinding-resizer.yaml - cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/clusterrolebinding-snapshot-controller.yaml -n kube-system > ../../deploy/kubernetes/base/clusterrolebinding-snapshot-controller.yaml - cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/clusterrolebinding-snapshotter.yaml -n kube-system > ../../deploy/kubernetes/base/clusterrolebinding-snapshotter.yaml + cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/clusterrolebinding-attacher.yaml > ../../deploy/kubernetes/base/clusterrolebinding-attacher.yaml + cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/clusterrolebinding-csi-node.yaml > ../../deploy/kubernetes/base/clusterrolebinding-csi-node.yaml + cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/clusterrolebinding-provisioner.yaml > ../../deploy/kubernetes/base/clusterrolebinding-provisioner.yaml + cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/clusterrolebinding-resizer.yaml > ../../deploy/kubernetes/base/clusterrolebinding-resizer.yaml + cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/clusterrolebinding-snapshot-controller.yaml > ../../deploy/kubernetes/base/clusterrolebinding-snapshot-controller.yaml + cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/clusterrolebinding-snapshotter.yaml > ../../deploy/kubernetes/base/clusterrolebinding-snapshotter.yaml cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/controller.yaml > ../../deploy/kubernetes/base/controller.yaml cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/csidriver.yaml > ../../deploy/kubernetes/base/csidriver.yaml cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/node.yaml > ../../deploy/kubernetes/base/node.yaml cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/poddisruptionbudget-controller.yaml > ../../deploy/kubernetes/base/poddisruptionbudget-controller.yaml cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/poddisruptionbudget-snapshot-controller.yaml -f ../../deploy/kubernetes/values/snapshotter.yaml > ../../deploy/kubernetes/base/poddisruptionbudget-snapshot-controller.yaml - cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/role-snapshot-controller-leaderelection.yaml -n kube-system > ../../deploy/kubernetes/base/role-snapshot-controller-leaderelection.yaml - cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/rolebinding-snapshot-controller-leaderelection.yaml -n kube-system > ../../deploy/kubernetes/base/rolebinding-snapshot-controller-leaderelection.yaml + cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/role-snapshot-controller-leaderelection.yaml > ../../deploy/kubernetes/base/role-snapshot-controller-leaderelection.yaml + cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/rolebinding-snapshot-controller-leaderelection.yaml > ../../deploy/kubernetes/base/rolebinding-snapshot-controller-leaderelection.yaml cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/serviceaccount-csi-controller.yaml > ../../deploy/kubernetes/base/serviceaccount-csi-controller.yaml cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/serviceaccount-csi-node.yaml > ../../deploy/kubernetes/base/serviceaccount-csi-node.yaml cd charts/aws-ebs-csi-driver && ../../bin/helm template kustomize . -s templates/serviceaccount-snapshot-controller.yaml > ../../deploy/kubernetes/base/serviceaccount-snapshot-controller.yaml diff --git a/charts/aws-ebs-csi-driver/CHANGELOG.md b/charts/aws-ebs-csi-driver/CHANGELOG.md new file mode 100644 index 0000000000..b82cf8631f --- /dev/null +++ b/charts/aws-ebs-csi-driver/CHANGELOG.md @@ -0,0 +1,6 @@ +# Helm chart + +# v1.2.4 +* Bump app/driver version to `v1.1.1` +* Install VolumeSnapshotClass, VolumeSnapshotContent, VolumeSnapshot CRDs if enableVolumeSnapshot is true +* Only run csi-snapshotter sidecar if enableVolumeSnapshot is true or if CRDs are already installed diff --git a/charts/aws-ebs-csi-driver/Chart.yaml b/charts/aws-ebs-csi-driver/Chart.yaml index 5897f3ba77..d874536c0b 100644 --- a/charts/aws-ebs-csi-driver/Chart.yaml +++ b/charts/aws-ebs-csi-driver/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v1 -appVersion: "1.0.0" +appVersion: "1.1.3" name: aws-ebs-csi-driver description: A Helm chart for AWS EBS CSI Driver -version: 1.2.0 +version: 1.2.4 kubeVersion: ">=1.17.0-0" home: https://github.com/kubernetes-sigs/aws-ebs-csi-driver sources: diff --git a/charts/aws-ebs-csi-driver/values.yaml b/charts/aws-ebs-csi-driver/values.yaml index 6ca9a9cff8..2a6644cf87 100644 --- a/charts/aws-ebs-csi-driver/values.yaml +++ b/charts/aws-ebs-csi-driver/values.yaml @@ -4,7 +4,7 @@ image: repository: k8s.gcr.io/provider-aws/aws-ebs-csi-driver - tag: "v1.0.0" + tag: "v1.1.3" pullPolicy: IfNotPresent sidecars: @@ -124,7 +124,6 @@ controller: # whenUnsatisfiable: ScheduleAnyway topologySpreadConstraints: [] - # Moving to values under node # The "maximum number of attachable volumes" per node volumeAttachLimit: diff --git a/cloudbuild.yaml b/cloudbuild.yaml index b41170e10a..46dfc9190e 100644 --- a/cloudbuild.yaml +++ b/cloudbuild.yaml @@ -1,4 +1,4 @@ -timeout: 3600s +timeout: 4500s steps: - name: gcr.io/k8s-testimages/gcb-docker-gcloud:v20200421-a2bf5f8 entrypoint: ./hack/prow.sh diff --git a/deploy/kubernetes/base/clusterrolebinding-attacher.yaml b/deploy/kubernetes/base/clusterrolebinding-attacher.yaml index 9a97b8efcb..5715d2651b 100644 --- a/deploy/kubernetes/base/clusterrolebinding-attacher.yaml +++ b/deploy/kubernetes/base/clusterrolebinding-attacher.yaml @@ -9,7 +9,7 @@ metadata: subjects: - kind: ServiceAccount name: ebs-csi-controller-sa - namespace: kube-system + namespace: default roleRef: kind: ClusterRole name: ebs-external-attacher-role diff --git a/deploy/kubernetes/base/clusterrolebinding-csi-node.yaml b/deploy/kubernetes/base/clusterrolebinding-csi-node.yaml index d1292b132d..095db52510 100644 --- a/deploy/kubernetes/base/clusterrolebinding-csi-node.yaml +++ b/deploy/kubernetes/base/clusterrolebinding-csi-node.yaml @@ -9,7 +9,7 @@ metadata: subjects: - kind: ServiceAccount name: ebs-csi-node-sa - namespace: kube-system + namespace: default roleRef: kind: ClusterRole name: ebs-csi-node-role diff --git a/deploy/kubernetes/base/clusterrolebinding-provisioner.yaml b/deploy/kubernetes/base/clusterrolebinding-provisioner.yaml index 084bed9df9..3544bc61e2 100644 --- a/deploy/kubernetes/base/clusterrolebinding-provisioner.yaml +++ b/deploy/kubernetes/base/clusterrolebinding-provisioner.yaml @@ -9,7 +9,7 @@ metadata: subjects: - kind: ServiceAccount name: ebs-csi-controller-sa - namespace: kube-system + namespace: default roleRef: kind: ClusterRole name: ebs-external-provisioner-role diff --git a/deploy/kubernetes/base/clusterrolebinding-resizer.yaml b/deploy/kubernetes/base/clusterrolebinding-resizer.yaml index a840f51b83..c80a9a26bf 100644 --- a/deploy/kubernetes/base/clusterrolebinding-resizer.yaml +++ b/deploy/kubernetes/base/clusterrolebinding-resizer.yaml @@ -9,7 +9,7 @@ metadata: subjects: - kind: ServiceAccount name: ebs-csi-controller-sa - namespace: kube-system + namespace: default roleRef: kind: ClusterRole name: ebs-external-resizer-role diff --git a/deploy/kubernetes/base/clusterrolebinding-snapshot-controller.yaml b/deploy/kubernetes/base/clusterrolebinding-snapshot-controller.yaml index 10c021c889..6d7cbec1b3 100644 --- a/deploy/kubernetes/base/clusterrolebinding-snapshot-controller.yaml +++ b/deploy/kubernetes/base/clusterrolebinding-snapshot-controller.yaml @@ -9,7 +9,7 @@ metadata: subjects: - kind: ServiceAccount name: ebs-snapshot-controller - namespace: kube-system + namespace: default roleRef: kind: ClusterRole name: ebs-snapshot-controller-role diff --git a/deploy/kubernetes/base/clusterrolebinding-snapshotter.yaml b/deploy/kubernetes/base/clusterrolebinding-snapshotter.yaml index 7720ca5d23..7946414d59 100644 --- a/deploy/kubernetes/base/clusterrolebinding-snapshotter.yaml +++ b/deploy/kubernetes/base/clusterrolebinding-snapshotter.yaml @@ -9,7 +9,7 @@ metadata: subjects: - kind: ServiceAccount name: ebs-csi-controller-sa - namespace: kube-system + namespace: default roleRef: kind: ClusterRole name: ebs-external-snapshotter-role diff --git a/deploy/kubernetes/base/controller.yaml b/deploy/kubernetes/base/controller.yaml index d38e1fdc90..972a9232bf 100644 --- a/deploy/kubernetes/base/controller.yaml +++ b/deploy/kubernetes/base/controller.yaml @@ -31,7 +31,7 @@ spec: tolerationSeconds: 300 containers: - name: ebs-plugin - image: k8s.gcr.io/provider-aws/aws-ebs-csi-driver:v1.0.0 + image: k8s.gcr.io/provider-aws/aws-ebs-csi-driver:v1.1.3 imagePullPolicy: IfNotPresent args: # - {all,controller,node} # specify the driver mode diff --git a/deploy/kubernetes/base/node.yaml b/deploy/kubernetes/base/node.yaml index e59bd43f38..2d129d5d7d 100644 --- a/deploy/kubernetes/base/node.yaml +++ b/deploy/kubernetes/base/node.yaml @@ -42,7 +42,7 @@ spec: - name: ebs-plugin securityContext: privileged: true - image: k8s.gcr.io/provider-aws/aws-ebs-csi-driver:v1.0.0 + image: k8s.gcr.io/provider-aws/aws-ebs-csi-driver:v1.1.3 args: - node - --endpoint=$(CSI_ENDPOINT) diff --git a/deploy/kubernetes/base/rolebinding-snapshot-controller-leaderelection.yaml b/deploy/kubernetes/base/rolebinding-snapshot-controller-leaderelection.yaml index 9d66443b9a..fd9ab78b4b 100644 --- a/deploy/kubernetes/base/rolebinding-snapshot-controller-leaderelection.yaml +++ b/deploy/kubernetes/base/rolebinding-snapshot-controller-leaderelection.yaml @@ -9,7 +9,7 @@ metadata: subjects: - kind: ServiceAccount name: ebs-snapshot-controller - namespace: kube-system + namespace: default roleRef: kind: Role name: ebs-snapshot-controller-leaderelection diff --git a/deploy/kubernetes/overlays/stable/ecr/kustomization.yaml b/deploy/kubernetes/overlays/stable/ecr/kustomization.yaml index d36d7ca71e..356973c067 100644 --- a/deploy/kubernetes/overlays/stable/ecr/kustomization.yaml +++ b/deploy/kubernetes/overlays/stable/ecr/kustomization.yaml @@ -5,7 +5,7 @@ bases: images: - name: k8s.gcr.io/provider-aws/aws-ebs-csi-driver newName: 602401143452.dkr.ecr.us-west-2.amazonaws.com/eks/aws-ebs-csi-driver - newTag: v1.0.0 + newTag: v1.1.3 - name: k8s.gcr.io/sig-storage/csi-provisioner newName: public.ecr.aws/eks-distro/kubernetes-csi/external-provisioner newTag: v2.1.1-eks-1-18-3 diff --git a/deploy/kubernetes/overlays/stable/kustomization.yaml b/deploy/kubernetes/overlays/stable/kustomization.yaml index c42cdc388d..a5002a8bc4 100644 --- a/deploy/kubernetes/overlays/stable/kustomization.yaml +++ b/deploy/kubernetes/overlays/stable/kustomization.yaml @@ -4,7 +4,7 @@ bases: - ../../base images: - name: k8s.gcr.io/provider-aws/aws-ebs-csi-driver - newTag: v1.0.0 + newTag: v1.1.3 - name: k8s.gcr.io/sig-storage/csi-provisioner newTag: v2.1.1 - name: k8s.gcr.io/sig-storage/csi-attacher diff --git a/docs/README.md b/docs/README.md index 42dbcf4d0c..3811c501aa 100644 --- a/docs/README.md +++ b/docs/README.md @@ -12,6 +12,7 @@ The [Amazon Elastic Block Store](https://aws.amazon.com/ebs/) Container Storage | AWS EBS CSI Driver \ CSI Version | v0.3.0| v1.0.0 | v1.1.0 | |----------------------------------------|-------|--------|--------| | master branch | no | no | yes | +| v1.1.x | no | no | yes | | v1.0.0 | no | no | yes | | v0.10.x | no | no | yes | | v0.9.x | no | no | yes | @@ -77,6 +78,7 @@ Following sections are Kubernetes specific. If you are Kubernetes user, use foll | AWS EBS CSI Driver \ Kubernetes Version| v1.12 | v1.13 | v1.14 | v1.15 | v1.16 | v1.17 | v1.18+ | |----------------------------------------|-------|-------|-------|-------|-------|-------|-------| | master branch | no | no+ | no | no | no | yes | yes | +| v1.1.0 | no | no+ | no | no | no | yes | yes | | v1.0.0 | no | no+ | no | no | no | yes | yes | | v0.10.x | no | no+ | no | no | no | yes | yes | | v0.9.x | no | no+ | no | no | no | yes | yes | @@ -94,7 +96,11 @@ Following sections are Kubernetes specific. If you are Kubernetes user, use foll ## Container Images: |AWS EBS CSI Driver Version | Image | |---------------------------|--------------------------------------------------| -|master branch |amazon/aws-ebs-csi-driver:latest | +|v1.1.4 |k8s.gcr.io/provider-aws/aws-ebs-csi-driver:v1.1.4 | +|v1.1.3 |k8s.gcr.io/provider-aws/aws-ebs-csi-driver:v1.1.3 | +|v1.1.2 |k8s.gcr.io/provider-aws/aws-ebs-csi-driver:v1.1.2 | +|v1.1.1 |k8s.gcr.io/provider-aws/aws-ebs-csi-driver:v1.1.1 | +|v1.1.0 |k8s.gcr.io/provider-aws/aws-ebs-csi-driver:v1.1.0 | |v1.0.0 |k8s.gcr.io/provider-aws/aws-ebs-csi-driver:v1.0.0 | |v0.10.1 |k8s.gcr.io/provider-aws/aws-ebs-csi-driver:v0.10.1| |v0.10.0 |k8s.gcr.io/provider-aws/aws-ebs-csi-driver:v0.10.0| diff --git a/pkg/cloud/aws_metrics.go b/pkg/cloud/aws_metrics.go index 827f1d2298..5f48369abd 100644 --- a/pkg/cloud/aws_metrics.go +++ b/pkg/cloud/aws_metrics.go @@ -1,3 +1,4 @@ +//go:build !providerless // +build !providerless /* diff --git a/pkg/driver/controller.go b/pkg/driver/controller.go index 03a550deff..21f37eb97f 100644 --- a/pkg/driver/controller.go +++ b/pkg/driver/controller.go @@ -99,27 +99,21 @@ func newControllerService(driverOptions *DriverOptions) controllerService { func (d *controllerService) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) { klog.V(4).Infof("CreateVolume: called with args %+v", *req) - volName := req.GetName() - if len(volName) == 0 { - return nil, status.Error(codes.InvalidArgument, "Volume name not provided") + if err := validateCreateVolumeRequest(req); err != nil { + return nil, err } - volSizeBytes, err := getVolSizeBytes(req) if err != nil { return nil, err } + volName := req.GetName() - volCaps := req.GetVolumeCapabilities() - if len(volCaps) == 0 { - return nil, status.Error(codes.InvalidArgument, "Volume capabilities not provided") - } - - if !isValidVolumeCapabilities(volCaps) { - modes := util.GetAccessModes(volCaps) - stringModes := strings.Join(*modes, ", ") - errString := "Volume capabilities " + stringModes + " not supported. Only AccessModes[ReadWriteOnce] supported." - return nil, status.Error(codes.InvalidArgument, errString) + // check if a request is already in-flight + if ok := d.inFlight.Insert(volName); !ok { + msg := fmt.Sprintf("Create volume request for %s is already in progress", volName) + return nil, status.Error(codes.Aborted, msg) } + defer d.inFlight.Delete(volName) disk, err := d.cloud.GetDiskByName(ctx, volName, volSizeBytes) if err != nil { @@ -217,13 +211,6 @@ func (d *controllerService) CreateVolume(ctx context.Context, req *csi.CreateVol return newCreateVolumeResponse(disk), nil } - // check if a request is already in-flight because the CreateVolume API is not idempotent - if ok := d.inFlight.Insert(req.String()); !ok { - msg := fmt.Sprintf("Create volume request for %s is already in progress", volName) - return nil, status.Error(codes.Aborted, msg) - } - defer d.inFlight.Delete(req.String()) - // create a new volume zone := pickAvailabilityZone(req.GetAccessibilityRequirements()) outpostArn := getOutpostArn(req.GetAccessibilityRequirements()) @@ -264,12 +251,40 @@ func (d *controllerService) CreateVolume(ctx context.Context, req *csi.CreateVol return newCreateVolumeResponse(disk), nil } +func validateCreateVolumeRequest(req *csi.CreateVolumeRequest) error { + volName := req.GetName() + if len(volName) == 0 { + return status.Error(codes.InvalidArgument, "Volume name not provided") + } + + volCaps := req.GetVolumeCapabilities() + if len(volCaps) == 0 { + return status.Error(codes.InvalidArgument, "Volume capabilities not provided") + } + + if !isValidVolumeCapabilities(volCaps) { + modes := util.GetAccessModes(volCaps) + stringModes := strings.Join(*modes, ", ") + errString := "Volume capabilities " + stringModes + " not supported. Only AccessModes[ReadWriteOnce] supported." + return status.Error(codes.InvalidArgument, errString) + } + return nil +} + func (d *controllerService) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) { klog.V(4).Infof("DeleteVolume: called with args: %+v", *req) + if err := validateDeleteVolumeRequest(req); err != nil { + return nil, err + } + volumeID := req.GetVolumeId() - if len(volumeID) == 0 { - return nil, status.Error(codes.InvalidArgument, "Volume ID not provided") + + // check if a request is already in-flight + if ok := d.inFlight.Insert(volumeID); !ok { + msg := fmt.Sprintf(internal.VolumeOperationAlreadyExistsErrorMsg, volumeID) + return nil, status.Error(codes.Aborted, msg) } + defer d.inFlight.Delete(volumeID) if _, err := d.cloud.DeleteDisk(ctx, volumeID); err != nil { if err == cloud.ErrNotFound { @@ -282,30 +297,21 @@ func (d *controllerService) DeleteVolume(ctx context.Context, req *csi.DeleteVol return &csi.DeleteVolumeResponse{}, nil } +func validateDeleteVolumeRequest(req *csi.DeleteVolumeRequest) error { + if len(req.GetVolumeId()) == 0 { + return status.Error(codes.InvalidArgument, "Volume ID not provided") + } + return nil +} + func (d *controllerService) ControllerPublishVolume(ctx context.Context, req *csi.ControllerPublishVolumeRequest) (*csi.ControllerPublishVolumeResponse, error) { klog.V(4).Infof("ControllerPublishVolume: called with args %+v", *req) - volumeID := req.GetVolumeId() - if len(volumeID) == 0 { - return nil, status.Error(codes.InvalidArgument, "Volume ID not provided") + if err := validateControllerPublishVolumeRequest(req); err != nil { + return nil, err } + volumeID := req.GetVolumeId() nodeID := req.GetNodeId() - if len(nodeID) == 0 { - return nil, status.Error(codes.InvalidArgument, "Node ID not provided") - } - - volCap := req.GetVolumeCapability() - if volCap == nil { - return nil, status.Error(codes.InvalidArgument, "Volume capability not provided") - } - - caps := []*csi.VolumeCapability{volCap} - if !isValidVolumeCapabilities(caps) { - modes := util.GetAccessModes(caps) - stringModes := strings.Join(*modes, ", ") - errString := "Volume capabilities " + stringModes + " not supported. Only AccessModes[ReadWriteOnce] supported." - return nil, status.Error(codes.InvalidArgument, errString) - } if !d.cloud.IsExistInstance(ctx, nodeID) { return nil, status.Errorf(codes.NotFound, "Instance %q not found", nodeID) @@ -333,17 +339,38 @@ func (d *controllerService) ControllerPublishVolume(ctx context.Context, req *cs return &csi.ControllerPublishVolumeResponse{PublishContext: pvInfo}, nil } +func validateControllerPublishVolumeRequest(req *csi.ControllerPublishVolumeRequest) error { + if len(req.GetVolumeId()) == 0 { + return status.Error(codes.InvalidArgument, "Volume ID not provided") + } + + if len(req.GetNodeId()) == 0 { + return status.Error(codes.InvalidArgument, "Node ID not provided") + } + + volCap := req.GetVolumeCapability() + if volCap == nil { + return status.Error(codes.InvalidArgument, "Volume capability not provided") + } + + caps := []*csi.VolumeCapability{volCap} + if !isValidVolumeCapabilities(caps) { + modes := util.GetAccessModes(caps) + stringModes := strings.Join(*modes, ", ") + errString := "Volume capabilities " + stringModes + " not supported. Only AccessModes[ReadWriteOnce] supported." + return status.Error(codes.InvalidArgument, errString) + } + return nil +} + func (d *controllerService) ControllerUnpublishVolume(ctx context.Context, req *csi.ControllerUnpublishVolumeRequest) (*csi.ControllerUnpublishVolumeResponse, error) { klog.V(4).Infof("ControllerUnpublishVolume: called with args %+v", *req) - volumeID := req.GetVolumeId() - if len(volumeID) == 0 { - return nil, status.Error(codes.InvalidArgument, "Volume ID not provided") + if err := validateControllerUnpublishVolumeRequest(req); err != nil { + return nil, err } + volumeID := req.GetVolumeId() nodeID := req.GetNodeId() - if len(nodeID) == 0 { - return nil, status.Error(codes.InvalidArgument, "Node ID not provided") - } if err := d.cloud.DetachDisk(ctx, volumeID, nodeID); err != nil { if err == cloud.ErrNotFound { @@ -356,6 +383,18 @@ func (d *controllerService) ControllerUnpublishVolume(ctx context.Context, req * return &csi.ControllerUnpublishVolumeResponse{}, nil } +func validateControllerUnpublishVolumeRequest(req *csi.ControllerUnpublishVolumeRequest) error { + if len(req.GetVolumeId()) == 0 { + return status.Error(codes.InvalidArgument, "Volume ID not provided") + } + + if len(req.GetNodeId()) == 0 { + return status.Error(codes.InvalidArgument, "Node ID not provided") + } + + return nil +} + func (d *controllerService) ControllerGetCapabilities(ctx context.Context, req *csi.ControllerGetCapabilitiesRequest) (*csi.ControllerGetCapabilitiesResponse, error) { klog.V(4).Infof("ControllerGetCapabilities: called with args %+v", *req) var caps []*csi.ControllerServiceCapability @@ -489,15 +528,20 @@ func isValidVolumeContext(volContext map[string]string) bool { func (d *controllerService) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) { klog.V(4).Infof("CreateSnapshot: called with args %+v", req) - snapshotName := req.GetName() - if len(snapshotName) == 0 { - return nil, status.Error(codes.InvalidArgument, "Snapshot name not provided") + if err := validateCreateSnapshotRequest(req); err != nil { + return nil, err } + snapshotName := req.GetName() volumeID := req.GetSourceVolumeId() - if len(volumeID) == 0 { - return nil, status.Error(codes.InvalidArgument, "Snapshot volume source ID not provided") + + // check if a request is already in-flight + if ok := d.inFlight.Insert(snapshotName); !ok { + msg := fmt.Sprintf(internal.VolumeOperationAlreadyExistsErrorMsg, snapshotName) + return nil, status.Error(codes.Aborted, msg) } + defer d.inFlight.Delete(snapshotName) + snapshot, err := d.cloud.GetSnapshotByName(ctx, snapshotName) if err != nil && err != cloud.ErrNotFound { klog.Errorf("Error looking for the snapshot %s: %v", snapshotName, err) @@ -535,12 +579,31 @@ func (d *controllerService) CreateSnapshot(ctx context.Context, req *csi.CreateS return newCreateSnapshotResponse(snapshot) } +func validateCreateSnapshotRequest(req *csi.CreateSnapshotRequest) error { + if len(req.GetName()) == 0 { + return status.Error(codes.InvalidArgument, "Snapshot name not provided") + } + + if len(req.GetSourceVolumeId()) == 0 { + return status.Error(codes.InvalidArgument, "Snapshot volume source ID not provided") + } + return nil +} + func (d *controllerService) DeleteSnapshot(ctx context.Context, req *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) { klog.V(4).Infof("DeleteSnapshot: called with args %+v", req) + if err := validateDeleteSnapshotRequest(req); err != nil { + return nil, err + } + snapshotID := req.GetSnapshotId() - if len(snapshotID) == 0 { - return nil, status.Error(codes.InvalidArgument, "Snapshot ID not provided") + + // check if a request is already in-flight + if ok := d.inFlight.Insert(snapshotID); !ok { + msg := fmt.Sprintf("DeleteSnapshot for Snapshot %s is already in progress", snapshotID) + return nil, status.Error(codes.Aborted, msg) } + defer d.inFlight.Delete(snapshotID) if _, err := d.cloud.DeleteSnapshot(ctx, snapshotID); err != nil { if err == cloud.ErrNotFound { @@ -553,6 +616,13 @@ func (d *controllerService) DeleteSnapshot(ctx context.Context, req *csi.DeleteS return &csi.DeleteSnapshotResponse{}, nil } +func validateDeleteSnapshotRequest(req *csi.DeleteSnapshotRequest) error { + if len(req.GetSnapshotId()) == 0 { + return status.Error(codes.InvalidArgument, "Snapshot ID not provided") + } + return nil +} + func (d *controllerService) ListSnapshots(ctx context.Context, req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) { klog.V(4).Infof("ListSnapshots: called with args %+v", req) var snapshots []*cloud.Snapshot diff --git a/pkg/driver/controller_test.go b/pkg/driver/controller_test.go index 84d78db1b0..ca2f96115a 100644 --- a/pkg/driver/controller_test.go +++ b/pkg/driver/controller_test.go @@ -1536,11 +1536,10 @@ func TestCreateVolume(t *testing.T) { defer mockCtl.Finish() mockCloud := mocks.NewMockCloud(mockCtl) - mockCloud.EXPECT().GetDiskByName(gomock.Eq(ctx), gomock.Eq(req.Name), gomock.Eq(stdVolSize)).Return(nil, cloud.ErrNotFound) inFlight := internal.NewInFlight() - inFlight.Insert(req.String()) - defer inFlight.Delete(req.String()) + inFlight.Insert(req.GetName()) + defer inFlight.Delete(req.GetName()) awsDriver := controllerService{ cloud: mockCloud, @@ -1549,17 +1548,8 @@ func TestCreateVolume(t *testing.T) { } _, err := awsDriver.CreateVolume(ctx, req) - if err == nil { - t.Fatalf("Expected CreateVolume to fail but got no error") - } - srvErr, ok := status.FromError(err) - if !ok { - t.Fatalf("Could not get error status code from error: %v", srvErr) - } - if srvErr.Code() != codes.Aborted { - t.Fatalf("Expected Aborted but got: %s", srvErr.Code()) - } + checkAbortedErrorCode(t, err) }, }, { @@ -1714,6 +1704,31 @@ func TestDeleteVolume(t *testing.T) { } }, }, + { + name: "fail another request already in-flight", + testFunc: func(t *testing.T) { + req := &csi.DeleteVolumeRequest{ + VolumeId: "vol-test", + } + + ctx := context.Background() + mockCtl := gomock.NewController(t) + defer mockCtl.Finish() + + mockCloud := mocks.NewMockCloud(mockCtl) + inFlight := internal.NewInFlight() + inFlight.Insert(req.GetVolumeId()) + defer inFlight.Delete(req.GetVolumeId()) + awsDriver := controllerService{ + cloud: mockCloud, + inFlight: inFlight, + driverOptions: &DriverOptions{}, + } + _, err := awsDriver.DeleteVolume(ctx, req) + + checkAbortedErrorCode(t, err) + }, + }, } for _, tc := range testCases { @@ -2259,6 +2274,34 @@ func TestCreateSnapshot(t *testing.T) { } }, }, + { + name: "fail with another request in-flight", + testFunc: func(t *testing.T) { + req := &csi.CreateSnapshotRequest{ + Name: "test-snapshot", + Parameters: nil, + SourceVolumeId: "vol-test", + } + + mockCtl := gomock.NewController(t) + defer mockCtl.Finish() + + mockCloud := mocks.NewMockCloud(mockCtl) + + inFlight := internal.NewInFlight() + inFlight.Insert(req.GetName()) + defer inFlight.Delete(req.GetName()) + + awsDriver := controllerService{ + cloud: mockCloud, + inFlight: inFlight, + driverOptions: &DriverOptions{}, + } + _, err := awsDriver.CreateSnapshot(context.Background(), req) + + checkAbortedErrorCode(t, err) + }, + }, } for _, tc := range testCases { @@ -2321,6 +2364,34 @@ func TestDeleteSnapshot(t *testing.T) { } }, }, + { + name: "fail with another request in-flight", + testFunc: func(t *testing.T) { + ctx := context.Background() + + mockCtl := gomock.NewController(t) + defer mockCtl.Finish() + + mockCloud := mocks.NewMockCloud(mockCtl) + + req := &csi.DeleteSnapshotRequest{ + SnapshotId: "test-snapshotID", + } + inFlight := internal.NewInFlight() + inFlight.Insert(req.GetSnapshotId()) + defer inFlight.Delete(req.GetSnapshotId()) + + awsDriver := controllerService{ + cloud: mockCloud, + inFlight: inFlight, + driverOptions: &DriverOptions{}, + } + + _, err := awsDriver.DeleteSnapshot(ctx, req) + + checkAbortedErrorCode(t, err) + }, + }, } for _, tc := range testCases { @@ -3082,3 +3153,17 @@ func TestControllerExpandVolume(t *testing.T) { }) } } + +func checkAbortedErrorCode(t *testing.T, err error) { + if err == nil { + t.Fatalf("Expected operation to fail but got no error") + } + + srvErr, ok := status.FromError(err) + if !ok { + t.Fatalf("Could not get error status code from error: %v", srvErr) + } + if srvErr.Code() != codes.Aborted { + t.Fatalf("Expected Aborted but got: %s", srvErr.Code()) + } +} diff --git a/pkg/driver/internal/inflight.go b/pkg/driver/internal/inflight.go index 5f0d2a9ad7..9b45680fbc 100644 --- a/pkg/driver/internal/inflight.go +++ b/pkg/driver/internal/inflight.go @@ -30,6 +30,10 @@ type Idempotent interface { String() string } +const ( + VolumeOperationAlreadyExistsErrorMsg = "An operation with the given Volume %s already exists" +) + // InFlight is a struct used to manage in flight requests per volumeId. type InFlight struct { mux *sync.Mutex diff --git a/pkg/driver/mocks/mock_mount.go b/pkg/driver/mocks/mock_mount.go index eeb81d98c9..138d84f10a 100644 --- a/pkg/driver/mocks/mock_mount.go +++ b/pkg/driver/mocks/mock_mount.go @@ -104,6 +104,20 @@ func (mr *MockMounterMockRecorder) GetDeviceNameFromMount(mountPath interface{}) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDeviceNameFromMount", reflect.TypeOf((*MockMounter)(nil).GetDeviceNameFromMount), mountPath) } +// IsCorruptedMnt mocks base method. +func (m *MockMounter) IsCorruptedMnt(err error) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsCorruptedMnt", err) + ret0, _ := ret[0].(bool) + return ret0 +} + +// IsCorruptedMnt indicates an expected call of IsCorruptedMnt. +func (mr *MockMounterMockRecorder) IsCorruptedMnt(err interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsCorruptedMnt", reflect.TypeOf((*MockMounter)(nil).IsCorruptedMnt), err) +} + // GetMountRefs mocks base method. func (m *MockMounter) GetMountRefs(pathname string) ([]string, error) { m.ctrl.T.Helper() diff --git a/pkg/driver/mount.go b/pkg/driver/mount.go index f1ce7b48c8..125384234a 100644 --- a/pkg/driver/mount.go +++ b/pkg/driver/mount.go @@ -18,11 +18,12 @@ package driver import ( "fmt" - "k8s.io/klog" "os" "strconv" "strings" + "k8s.io/klog" + "github.com/kubernetes-sigs/aws-ebs-csi-driver/pkg/mounter" mountutils "k8s.io/mount-utils" utilexec "k8s.io/utils/exec" @@ -38,6 +39,7 @@ type Mounter interface { utilexec.Interface // Implemented by NodeMounter below + IsCorruptedMnt(err error) bool GetDeviceNameFromMount(mountPath string) (string, int, error) // TODO this won't make sense on Windows with csi-proxy MakeFile(path string) error @@ -64,6 +66,11 @@ func (m NodeMounter) GetDeviceNameFromMount(mountPath string) (string, int, erro return mountutils.GetDeviceNameFromMount(m, mountPath) } +// IsCorruptedMnt return true if err is about corrupted mount point +func (m NodeMounter) IsCorruptedMnt(err error) bool { + return mountutils.IsCorruptedMnt(err) +} + // This function is mirrored in ./sanity_test.go to make sure sanity test covered this block of code // Please mirror the change to func MakeFile in ./sanity_test.go func (m *NodeMounter) MakeFile(path string) error { diff --git a/pkg/driver/node.go b/pkg/driver/node.go index bb8e728189..813a8e89c9 100644 --- a/pkg/driver/node.go +++ b/pkg/driver/node.go @@ -591,17 +591,57 @@ func (d *nodeService) nodePublishVolumeForBlock(req *csi.NodePublishVolumeReques return status.Errorf(codes.Internal, "Could not create file %q: %v", target, err) } - klog.V(4).Infof("NodePublishVolume [block]: mounting %s at %s", source, target) - if err := d.mounter.Mount(source, target, "", mountOptions); err != nil { - if removeErr := os.Remove(target); removeErr != nil { - return status.Errorf(codes.Internal, "Could not remove mount target %q: %v", target, removeErr) - } + //Checking if the target file is already mounted with a device. + mounted, err := d.isMounted(source, target) + if err != nil { return status.Errorf(codes.Internal, "Could not mount %q at %q: %v", source, target, err) } + if !mounted { + klog.V(4).Infof("NodePublishVolume [block]: mounting %s at %s", source, target) + if err := d.mounter.Mount(source, target, "", mountOptions); err != nil { + if removeErr := os.Remove(target); removeErr != nil { + return status.Errorf(codes.Internal, "Could not remove mount target %q: %v", target, removeErr) + } + return status.Errorf(codes.Internal, "Could not mount %q at %q: %v", source, target, err) + } + } else { + klog.V(4).Infof("NodePublishVolume [block]: Target path %q is already mounted", target) + } + return nil } +func (d *nodeService) isMounted(source string, target string) (bool, error) { + /* + Checking if it's a mount point using IsLikelyNotMountPoint. There are three different return values, + 1. true, err when the directory does not exist or corrupted. + 2. false, nil when the path is already mounted with a device. + 3. true, nil when the path is not mounted with any device. + */ + notMnt, err := d.mounter.IsLikelyNotMountPoint(target) + if err != nil && !os.IsNotExist(err) { + //Checking if the path exists and error is related to Corrupted Mount, in that case, the system could unmount and mount. + _, pathErr := d.mounter.PathExists(target) + if pathErr != nil && d.mounter.IsCorruptedMnt(pathErr) { + klog.V(4).Infof("NodePublishVolume: Target path %q is a corrupted mount. Trying to unmount.", target) + if mntErr := d.mounter.Unmount(target); mntErr != nil { + return !notMnt, status.Errorf(codes.Internal, "Unable to unmount the target %q : %v", target, mntErr) + } + //After successful unmount, the device is ready to be mounted. + return !notMnt, nil + } + return !notMnt, status.Errorf(codes.Internal, "Could not mount %q at %q: %v", source, target, err) + } + + if !notMnt { + klog.V(4).Infof("NodePublishVolume: Target path %q is already mounted", target) + return !notMnt, nil + } + + return !notMnt, err +} + func (d *nodeService) nodePublishVolumeForFileSystem(req *csi.NodePublishVolumeRequest, mountOptions []string, mode *csi.VolumeCapability_Mount) error { target := req.GetTargetPath() source := req.GetStagingTargetPath() @@ -624,13 +664,20 @@ func (d *nodeService) nodePublishVolumeForFileSystem(req *csi.NodePublishVolumeR } klog.V(4).Infof("NodePublishVolume: mounting %s at %s with option %s as fstype %s", source, target, mountOptions, fsType) - if err := d.mounter.Mount(source, target, fsType, mountOptions); err != nil { - if removeErr := os.Remove(target); removeErr != nil { - return status.Errorf(codes.Internal, "Could not remove mount target %q: %v", target, err) - } + + //Checking if the target directory is already mounted with a device. + mounted, err := d.isMounted(source, target) + + if err != nil { return status.Errorf(codes.Internal, "Could not mount %q at %q: %v", source, target, err) } + if !mounted { + if err := d.mounter.Mount(source, target, fsType, mountOptions); err != nil { + return status.Errorf(codes.Internal, "Could not mount %q at %q: %v", source, target, err) + } + } + return nil } diff --git a/pkg/driver/node_linux.go b/pkg/driver/node_linux.go index 4c4a9d0955..dff94d277c 100644 --- a/pkg/driver/node_linux.go +++ b/pkg/driver/node_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* diff --git a/pkg/driver/node_test.go b/pkg/driver/node_test.go index 20bf8c48dc..a1f01a4492 100644 --- a/pkg/driver/node_test.go +++ b/pkg/driver/node_test.go @@ -597,6 +597,109 @@ func TestNodePublishVolume(t *testing.T) { mockMounter.EXPECT().MakeDir(gomock.Eq(targetPath)).Return(nil) mockMounter.EXPECT().Mount(gomock.Eq(stagingTargetPath), gomock.Eq(targetPath), gomock.Eq(defaultFsType), gomock.Eq([]string{"bind"})).Return(nil) + mockMounter.EXPECT().IsLikelyNotMountPoint(gomock.Eq(targetPath)).Return(true, nil) + + req := &csi.NodePublishVolumeRequest{ + PublishContext: map[string]string{DevicePathKey: devicePath}, + StagingTargetPath: stagingTargetPath, + TargetPath: targetPath, + VolumeCapability: stdVolCap, + VolumeId: "vol-test", + } + + _, err := awsDriver.NodePublishVolume(context.TODO(), req) + if err != nil { + t.Fatalf("Expect no error but got: %v", err) + } + }, + }, + { + name: "success filesystem mounted already", + testFunc: func(t *testing.T) { + mockCtl := gomock.NewController(t) + defer mockCtl.Finish() + + mockMetadata := mocks.NewMockMetadataService(mockCtl) + mockMounter := mocks.NewMockMounter(mockCtl) + + awsDriver := &nodeService{ + metadata: mockMetadata, + mounter: mockMounter, + inFlight: internal.NewInFlight(), + } + + mockMounter.EXPECT().MakeDir(gomock.Eq(targetPath)).Return(nil) + mockMounter.EXPECT().IsLikelyNotMountPoint(gomock.Eq(targetPath)).Return(false, nil) + + req := &csi.NodePublishVolumeRequest{ + PublishContext: map[string]string{DevicePathKey: devicePath}, + StagingTargetPath: stagingTargetPath, + TargetPath: targetPath, + VolumeCapability: stdVolCap, + VolumeId: "vol-test", + } + + _, err := awsDriver.NodePublishVolume(context.TODO(), req) + if err != nil { + t.Fatalf("Expect no error but got: %v", err) + } + }, + }, + { + name: "success filesystem mountpoint error", + testFunc: func(t *testing.T) { + mockCtl := gomock.NewController(t) + defer mockCtl.Finish() + + mockMetadata := mocks.NewMockMetadataService(mockCtl) + mockMounter := mocks.NewMockMounter(mockCtl) + + awsDriver := &nodeService{ + metadata: mockMetadata, + mounter: mockMounter, + inFlight: internal.NewInFlight(), + } + + gomock.InOrder( + mockMounter.EXPECT().PathExists(gomock.Eq(targetPath)).Return(true, nil), + ) + mockMounter.EXPECT().MakeDir(gomock.Eq(targetPath)).Return(nil) + mockMounter.EXPECT().IsLikelyNotMountPoint(gomock.Eq(targetPath)).Return(true, errors.New("Internal system error")) + + req := &csi.NodePublishVolumeRequest{ + PublishContext: map[string]string{DevicePathKey: devicePath}, + StagingTargetPath: stagingTargetPath, + TargetPath: targetPath, + VolumeCapability: stdVolCap, + VolumeId: "vol-test", + } + + _, err := awsDriver.NodePublishVolume(context.TODO(), req) + expectErr(t, err, codes.Internal) + }, + }, + { + name: "success filesystem corrupted mountpoint error", + testFunc: func(t *testing.T) { + mockCtl := gomock.NewController(t) + defer mockCtl.Finish() + + mockMetadata := mocks.NewMockMetadataService(mockCtl) + mockMounter := mocks.NewMockMounter(mockCtl) + + awsDriver := &nodeService{ + metadata: mockMetadata, + mounter: mockMounter, + inFlight: internal.NewInFlight(), + } + + mockMounter.EXPECT().PathExists(gomock.Eq(targetPath)).Return(true, errors.New("CorruptedMntError")) + mockMounter.EXPECT().IsCorruptedMnt(gomock.Eq(errors.New("CorruptedMntError"))).Return(true) + + mockMounter.EXPECT().MakeDir(gomock.Eq(targetPath)).Return(nil) + mockMounter.EXPECT().IsLikelyNotMountPoint(gomock.Eq(targetPath)).Return(true, errors.New("internal system error")) + mockMounter.EXPECT().Unmount(gomock.Eq(targetPath)).Return(nil) + mockMounter.EXPECT().Mount(gomock.Eq(stagingTargetPath), gomock.Eq(targetPath), gomock.Eq(defaultFsType), gomock.Eq([]string{"bind"})).Return(nil) req := &csi.NodePublishVolumeRequest{ PublishContext: map[string]string{DevicePathKey: devicePath}, @@ -629,6 +732,7 @@ func TestNodePublishVolume(t *testing.T) { mockMounter.EXPECT().MakeDir(gomock.Eq(targetPath)).Return(nil) mockMounter.EXPECT().Mount(gomock.Eq(stagingTargetPath), gomock.Eq(targetPath), gomock.Eq(FSTypeXfs), gomock.Eq([]string{"bind"})).Return(nil) + mockMounter.EXPECT().IsLikelyNotMountPoint(gomock.Eq(targetPath)).Return(true, nil) req := &csi.NodePublishVolumeRequest{ PublishContext: map[string]string{DevicePathKey: devicePath}, @@ -670,6 +774,7 @@ func TestNodePublishVolume(t *testing.T) { mockMounter.EXPECT().MakeDir(gomock.Eq(targetPath)).Return(nil) mockMounter.EXPECT().Mount(gomock.Eq(stagingTargetPath), gomock.Eq(targetPath), gomock.Eq(defaultFsType), gomock.Eq([]string{"bind", "ro"})).Return(nil) + mockMounter.EXPECT().IsLikelyNotMountPoint(gomock.Eq(targetPath)).Return(true, nil) req := &csi.NodePublishVolumeRequest{ PublishContext: map[string]string{DevicePathKey: devicePath}, @@ -703,6 +808,7 @@ func TestNodePublishVolume(t *testing.T) { mockMounter.EXPECT().MakeDir(gomock.Eq(targetPath)).Return(nil) mockMounter.EXPECT().Mount(gomock.Eq(stagingTargetPath), gomock.Eq(targetPath), gomock.Eq(defaultFsType), gomock.Eq([]string{"bind", "test-flag"})).Return(nil) + mockMounter.EXPECT().IsLikelyNotMountPoint(gomock.Eq(targetPath)).Return(true, nil) req := &csi.NodePublishVolumeRequest{ PublishContext: map[string]string{DevicePathKey: "/dev/fake"}, @@ -753,6 +859,145 @@ func TestNodePublishVolume(t *testing.T) { mockMounter.EXPECT().MakeDir(gomock.Eq("/test")).Return(nil) mockMounter.EXPECT().MakeFile(targetPath).Return(nil) mockMounter.EXPECT().Mount(gomock.Eq(devicePath), gomock.Eq(targetPath), gomock.Eq(""), gomock.Eq([]string{"bind"})).Return(nil) + mockMounter.EXPECT().IsLikelyNotMountPoint(gomock.Eq(targetPath)).Return(true, nil) + + req := &csi.NodePublishVolumeRequest{ + PublishContext: map[string]string{DevicePathKey: "/dev/fake"}, + StagingTargetPath: stagingTargetPath, + TargetPath: targetPath, + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Block{ + Block: &csi.VolumeCapability_BlockVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + VolumeId: "vol-test", + } + + _, err := awsDriver.NodePublishVolume(context.TODO(), req) + if err != nil { + t.Fatalf("Expect no error but got: %v", err) + } + }, + }, + { + name: "success mounted already [raw block]", + testFunc: func(t *testing.T) { + mockCtl := gomock.NewController(t) + defer mockCtl.Finish() + + mockMetadata := mocks.NewMockMetadataService(mockCtl) + mockMounter := mocks.NewMockMounter(mockCtl) + + awsDriver := &nodeService{ + metadata: mockMetadata, + mounter: mockMounter, + inFlight: internal.NewInFlight(), + } + + gomock.InOrder( + mockMounter.EXPECT().PathExists(gomock.Eq(devicePath)).Return(true, nil), + mockMounter.EXPECT().PathExists(gomock.Eq("/test")).Return(false, nil), + ) + mockMounter.EXPECT().MakeDir(gomock.Eq("/test")).Return(nil) + mockMounter.EXPECT().MakeFile(targetPath).Return(nil) + mockMounter.EXPECT().IsLikelyNotMountPoint(gomock.Eq(targetPath)).Return(false, nil) + + req := &csi.NodePublishVolumeRequest{ + PublishContext: map[string]string{DevicePathKey: "/dev/fake"}, + StagingTargetPath: stagingTargetPath, + TargetPath: targetPath, + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Block{ + Block: &csi.VolumeCapability_BlockVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + VolumeId: "vol-test", + } + + _, err := awsDriver.NodePublishVolume(context.TODO(), req) + if err != nil { + t.Fatalf("Expect no error but got: %v", err) + } + }, + }, + { + name: "success mountpoint error [raw block]", + testFunc: func(t *testing.T) { + mockCtl := gomock.NewController(t) + defer mockCtl.Finish() + + mockMetadata := mocks.NewMockMetadataService(mockCtl) + mockMounter := mocks.NewMockMounter(mockCtl) + + awsDriver := &nodeService{ + metadata: mockMetadata, + mounter: mockMounter, + inFlight: internal.NewInFlight(), + } + + gomock.InOrder( + mockMounter.EXPECT().PathExists(gomock.Eq(devicePath)).Return(true, nil), + mockMounter.EXPECT().PathExists(gomock.Eq("/test")).Return(false, nil), + mockMounter.EXPECT().PathExists(gomock.Eq(targetPath)).Return(true, nil), + ) + + mockMounter.EXPECT().MakeDir(gomock.Eq("/test")).Return(nil) + mockMounter.EXPECT().MakeFile(targetPath).Return(nil) + mockMounter.EXPECT().IsLikelyNotMountPoint(gomock.Eq(targetPath)).Return(true, errors.New("Internal System Error")) + + req := &csi.NodePublishVolumeRequest{ + PublishContext: map[string]string{DevicePathKey: "/dev/fake"}, + StagingTargetPath: stagingTargetPath, + TargetPath: targetPath, + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Block{ + Block: &csi.VolumeCapability_BlockVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + VolumeId: "vol-test", + } + + _, err := awsDriver.NodePublishVolume(context.TODO(), req) + expectErr(t, err, codes.Internal) + }, + }, + { + name: "success corrupted mountpoint error [raw block]", + testFunc: func(t *testing.T) { + mockCtl := gomock.NewController(t) + defer mockCtl.Finish() + + mockMetadata := mocks.NewMockMetadataService(mockCtl) + mockMounter := mocks.NewMockMounter(mockCtl) + + awsDriver := &nodeService{ + metadata: mockMetadata, + mounter: mockMounter, + inFlight: internal.NewInFlight(), + } + + gomock.InOrder( + mockMounter.EXPECT().PathExists(gomock.Eq(devicePath)).Return(true, nil), + mockMounter.EXPECT().PathExists(gomock.Eq("/test")).Return(false, nil), + mockMounter.EXPECT().PathExists(gomock.Eq(targetPath)).Return(true, errors.New("CorruptedMntError")), + ) + + mockMounter.EXPECT().IsCorruptedMnt(errors.New("CorruptedMntError")).Return(true) + + mockMounter.EXPECT().MakeDir(gomock.Eq("/test")).Return(nil) + mockMounter.EXPECT().MakeFile(targetPath).Return(nil) + mockMounter.EXPECT().Unmount(gomock.Eq(targetPath)).Return(nil) + mockMounter.EXPECT().IsLikelyNotMountPoint(gomock.Eq(targetPath)).Return(true, errors.New("Internal System Error")) + mockMounter.EXPECT().Mount(gomock.Eq(devicePath), gomock.Eq(targetPath), gomock.Any(), gomock.Any()).Return(nil) req := &csi.NodePublishVolumeRequest{ PublishContext: map[string]string{DevicePathKey: "/dev/fake"}, @@ -797,6 +1042,7 @@ func TestNodePublishVolume(t *testing.T) { mockMounter.EXPECT().MakeDir(gomock.Eq("/test")).Return(nil) mockMounter.EXPECT().MakeFile(targetPath).Return(nil) mockMounter.EXPECT().Mount(gomock.Eq(devicePathWithPartition), gomock.Eq(targetPath), gomock.Eq(""), gomock.Eq([]string{"bind"})).Return(nil) + mockMounter.EXPECT().IsLikelyNotMountPoint(gomock.Eq(targetPath)).Return(true, nil) req := &csi.NodePublishVolumeRequest{ PublishContext: map[string]string{DevicePathKey: "/dev/fake"}, @@ -842,6 +1088,7 @@ func TestNodePublishVolume(t *testing.T) { mockMounter.EXPECT().MakeDir(gomock.Eq("/test")).Return(nil) mockMounter.EXPECT().MakeFile(targetPath).Return(nil) mockMounter.EXPECT().Mount(gomock.Eq(devicePath), gomock.Eq(targetPath), gomock.Eq(""), gomock.Eq([]string{"bind"})).Return(nil) + mockMounter.EXPECT().IsLikelyNotMountPoint(gomock.Eq(targetPath)).Return(true, nil) req := &csi.NodePublishVolumeRequest{ PublishContext: map[string]string{DevicePathKey: "/dev/fake"}, diff --git a/pkg/driver/sanity_test.go b/pkg/driver/sanity_test.go index b8e8a3e2e3..fc28bdf4bc 100644 --- a/pkg/driver/sanity_test.go +++ b/pkg/driver/sanity_test.go @@ -293,6 +293,10 @@ func newFakeMounter() *fakeMounter { } } +func (f *fakeMounter) IsCorruptedMnt(err error) bool { + return false +} + func (f *fakeMounter) Mount(source string, target string, fstype string, options []string) error { return nil } diff --git a/pkg/mounter/safe_mounter_unix.go b/pkg/mounter/safe_mounter_unix.go index a72b544748..215047b001 100644 --- a/pkg/mounter/safe_mounter_unix.go +++ b/pkg/mounter/safe_mounter_unix.go @@ -1,3 +1,4 @@ +//go:build linux || darwin // +build linux darwin /* diff --git a/pkg/mounter/safe_mounter_windows.go b/pkg/mounter/safe_mounter_windows.go index 35a3277d78..fd6b34f66b 100644 --- a/pkg/mounter/safe_mounter_windows.go +++ b/pkg/mounter/safe_mounter_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows /*