Skip to content

Commit

Permalink
fix not enough arguments in call, required context
Browse files Browse the repository at this point in the history
Signed-off-by: hanenMizouni <[email protected]>
  • Loading branch information
outscale-hmi committed Aug 26, 2024
1 parent 8941b9e commit 6b80c57
Show file tree
Hide file tree
Showing 3 changed files with 22 additions and 18 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,12 @@ limitations under the License.
package testsuites

import (
"context"
"fmt"
"regexp"

. "github.com/onsi/ginkgo/v2"
omega "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
Expand Down Expand Up @@ -80,7 +82,7 @@ func (t *DynamicallyProvisionedCustomPodTest) Run(client clientset.Interface, na
By("checking that the pod is running")
tDeployment.WaitForPodReady()

pods, err := e2edeployment.GetPodsForDeployment(client, tDeployment.deployment)
pods, err := e2edeployment.GetPodsForDeployment(context.TODO(), client, tDeployment.deployment)
framework.ExpectNoError(err)
singleSpacePattern := regexp.MustCompile(`\s+`)
for _, podCmd := range t.PodCmds {
Expand All @@ -90,6 +92,7 @@ func (t *DynamicallyProvisionedCustomPodTest) Run(client clientset.Interface, na
if err != nil {
panic(err.Error())
}
framework.ExpectEqual(singleSpacePattern.ReplaceAllString(stdout, " "), podCmd.ExpectedString)
omega.Expect(singleSpacePattern.ReplaceAllString(stdout, " ")).To(omega.Equal(podCmd.ExpectedString), "Value should match the expected string")
//framework.ExpectEqual(singleSpacePattern.ReplaceAllString(stdout, " "), podCmd.ExpectedString)
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ limitations under the License.
package testsuites

import (
"context"
"bufio"
"fmt"
"math/big"
Expand Down Expand Up @@ -134,7 +135,7 @@ func (t *DynamicallyProvisionedStatsPodTest) Run(client clientset.Interface, nam
By("checking that the pod is running")
tDeployment.WaitForPodReady()

pods, err := e2edeployment.GetPodsForDeployment(client, tDeployment.deployment)
pods, err := e2edeployment.GetPodsForDeployment(context.TODO(), client, tDeployment.deployment)
framework.ExpectNoError(err)

pod_host_ip := pods.Items[0].Status.HostIP
Expand Down
30 changes: 15 additions & 15 deletions tests/e2e/testsuites/testsuites.go
Original file line number Diff line number Diff line change
Expand Up @@ -291,7 +291,7 @@ func (t *TestPersistentVolumeClaim) WaitForBound() v1.PersistentVolumeClaim {
var err error

By(fmt.Sprintf("waiting for PVC to be in phase %q", v1.ClaimBound))
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, t.client, t.namespace.Name, t.persistentVolumeClaim.Name, framework.Poll, framework.ClaimProvisionTimeout)
err = e2epv.WaitForPersistentVolumeClaimPhase(context.TODO(), v1.ClaimBound, t.client, t.namespace.Name, t.persistentVolumeClaim.Name, framework.Poll, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err)

By("checking the PVC")
Expand All @@ -313,7 +313,7 @@ func generatePVC(namespace, storageClassName, claimSize string, volumeMode v1.Pe
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
Resources: v1.ResourceRequirements{
Resources: v1.VolumeResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(claimSize),
},
Expand All @@ -326,7 +326,7 @@ func generatePVC(namespace, storageClassName, claimSize string, volumeMode v1.Pe

func (t *TestPersistentVolumeClaim) Cleanup() {
framework.Logf("deleting PVC %q/%q", t.namespace.Name, t.persistentVolumeClaim.Name)
err := e2epv.DeletePersistentVolumeClaim(t.client, t.persistentVolumeClaim.Name, t.namespace.Name)
err := e2epv.DeletePersistentVolumeClaim(context.TODO(), t.client, t.persistentVolumeClaim.Name, t.namespace.Name)
framework.ExpectNoError(err)
// Wait for the PV to get deleted if reclaim policy is Delete. (If it's
// Retain, there's no use waiting because the PV won't be auto-deleted and
Expand All @@ -336,7 +336,7 @@ func (t *TestPersistentVolumeClaim) Cleanup() {
// in a couple of minutes.
if t.persistentVolume != nil && t.persistentVolume.Spec.PersistentVolumeReclaimPolicy == v1.PersistentVolumeReclaimDelete {
By(fmt.Sprintf("waiting for claim's PV %q to be deleted", t.persistentVolume.Name))
err := e2epv.WaitForPersistentVolumeDeleted(t.client, t.persistentVolume.Name, 5*time.Second, 20*time.Minute)
err := e2epv.WaitForPersistentVolumeDeleted(context.TODO(), t.client, t.persistentVolume.Name, 5*time.Second, 20*time.Minute)
framework.ExpectNoError(err)
}
// Wait for the PVC to be deleted
Expand All @@ -349,17 +349,17 @@ func (t *TestPersistentVolumeClaim) ReclaimPolicy() v1.PersistentVolumeReclaimPo
}

func (t *TestPersistentVolumeClaim) WaitForPersistentVolumePhase(phase v1.PersistentVolumePhase) {
err := e2epv.WaitForPersistentVolumePhase(phase, t.client, t.persistentVolume.Name, 5*time.Second, 10*time.Minute)
err := e2epv.WaitForPersistentVolumePhase(context.TODO(), phase, t.client, t.persistentVolume.Name, 5*time.Second, 10*time.Minute)
framework.ExpectNoError(err)
}

func (t *TestPersistentVolumeClaim) DeleteBoundPersistentVolume() {

By(fmt.Sprintf("deleting PV %q", t.persistentVolume.Name))
err := e2epv.DeletePersistentVolume(t.client, t.persistentVolume.Name)
err := e2epv.DeletePersistentVolume(context.TODO(), t.client, t.persistentVolume.Name)
framework.ExpectNoError(err)
By(fmt.Sprintf("waiting for claim's PV %q to be deleted", t.persistentVolume.Name))
err = e2epv.WaitForPersistentVolumeDeleted(t.client, t.persistentVolume.Name, 5*time.Second, 20*time.Minute)
err = e2epv.WaitForPersistentVolumeDeleted(context.TODO(), t.client, t.persistentVolume.Name, 5*time.Second, 20*time.Minute)
framework.ExpectNoError(err)
}

Expand Down Expand Up @@ -447,19 +447,19 @@ func (t *TestDeployment) Create() {
framework.ExpectNoError(err)
err = e2edeployment.WaitForDeploymentComplete(t.client, t.deployment)
framework.ExpectNoError(err)
pods, err := e2edeployment.GetPodsForDeployment(t.client, t.deployment)
pods, err := e2edeployment.GetPodsForDeployment(context.TODO(), t.client, t.deployment)
framework.ExpectNoError(err)
// always get first pod as there should only be one
t.podName = pods.Items[0].Name
}

func (t *TestDeployment) WaitForPodReady() {
pods, err := e2edeployment.GetPodsForDeployment(t.client, t.deployment)
pods, err := e2edeployment.GetPodsForDeployment(context.TODO(), t.client, t.deployment)
framework.ExpectNoError(err)
// always get first pod as there should only be one
pod := pods.Items[0]
t.podName = pod.Name
err = e2epod.WaitForPodRunningInNamespace(t.client, &pod)
err = e2epod.WaitForPodRunningInNamespace(context.TODO(), t.client, &pod)
framework.ExpectNoError(err)
}

Expand All @@ -478,7 +478,7 @@ func (t *TestDeployment) DeletePodAndWait() {
return
}
framework.Logf("Waiting for pod %q in namespace %q to be fully deleted", t.podName, t.namespace.Name)
err = e2epod.WaitForPodNotFoundInNamespace(t.client, t.podName, t.namespace.Name, 3*time.Minute)
err = e2epod.WaitForPodNotFoundInNamespace(context.TODO(), t.client, t.podName, t.namespace.Name, 3*time.Minute)
if err != nil {
if !apierrs.IsNotFound(err) {
framework.ExpectNoError(fmt.Errorf("pod %q error waiting for delete: %v", t.podName, err))
Expand Down Expand Up @@ -557,12 +557,12 @@ func (t *TestPod) Create() {
}

func (t *TestPod) WaitForSuccess() {
err := e2epod.WaitForPodSuccessInNamespaceSlow(t.client, t.pod.Name, t.namespace.Name)
err := e2epod.WaitForPodRunningInNamespace(context.TODO(), t.client, t.pod)
framework.ExpectNoError(err)
}

func (t *TestPod) WaitForRunning() {
err := e2epod.WaitForPodRunningInNamespace(t.client, t.pod)
err := e2epod.WaitForPodRunningInNamespace(context.TODO(), t.client, t.pod)
framework.ExpectNoError(err)
}

Expand All @@ -581,7 +581,7 @@ var podFailedCondition = func(pod *v1.Pod) (bool, error) {
}

func (t *TestPod) WaitForFailure() {
err := e2epod.WaitForPodCondition(t.client, t.namespace.Name, t.pod.Name, failedConditionDescription, slowPodStartTimeout, podFailedCondition)
err := e2epod.WaitForPodCondition(context.TODO(), t.client, t.namespace.Name, t.pod.Name, failedConditionDescription, slowPodStartTimeout, podFailedCondition)
framework.ExpectNoError(err)
}

Expand Down Expand Up @@ -642,7 +642,7 @@ func cleanupPodOrFail(client clientset.Interface, name, namespace string) {
} else {
framework.Logf("Pod %s has the following logs: %s", name, body)
}
e2epod.DeletePodOrFail(client, namespace, name)
e2epod.DeletePodOrFail(context.TODO(), client, namespace, name)
}

func podLogs(client clientset.Interface, name, namespace string) ([]byte, error) {
Expand Down

0 comments on commit 6b80c57

Please sign in to comment.