From 7262bdbed1fedb46aa5bde421c2689002441b6e0 Mon Sep 17 00:00:00 2001 From: Eugen Sumin Date: Fri, 4 Oct 2024 11:28:03 +0200 Subject: [PATCH] Migrate pkg/function to errkit --- pkg/function/args.go | 10 ++-- pkg/function/backup_data.go | 13 ++--- pkg/function/backup_data_all.go | 14 +++--- pkg/function/backup_data_stats.go | 14 +++--- .../backup_data_using_kopia_server.go | 30 +++++------ pkg/function/checkRepository.go | 8 +-- pkg/function/copy_volume_data.go | 16 +++--- pkg/function/create_csi_snapshot.go | 6 +-- pkg/function/create_rds_snapshot.go | 22 ++++---- pkg/function/create_volume_from_snapshot.go | 20 ++++---- pkg/function/create_volume_snapshot.go | 50 +++++++++---------- pkg/function/delete_data.go | 18 +++---- pkg/function/delete_data_all.go | 6 +-- .../delete_data_using_kopia_server.go | 18 +++---- pkg/function/delete_rds_snapshot.go | 16 +++--- pkg/function/delete_volume_snapshot.go | 12 ++--- pkg/function/export_rds_snapshot_location.go | 34 ++++++------- pkg/function/kube_exec_all.go | 4 +- pkg/function/kube_task.go | 10 ++-- pkg/function/kubeops.go | 8 +-- pkg/function/location_delete.go | 4 +- pkg/function/prepare_data.go | 18 +++---- pkg/function/restore_csi_snapshot.go | 6 +-- pkg/function/restore_data.go | 19 +++---- pkg/function/restore_data_all.go | 14 +++--- .../restore_data_using_kopia_server.go | 24 ++++----- pkg/function/restore_rds_snapshot.go | 40 +++++++-------- pkg/function/scale_workload.go | 17 ++++--- pkg/function/utils.go | 49 +++++++++--------- pkg/function/wait.go | 12 ++--- pkg/function/wait_for_snapshot_completion.go | 12 ++--- pkg/function/waitv2.go | 4 +- 32 files changed, 276 insertions(+), 272 deletions(-) diff --git a/pkg/function/args.go b/pkg/function/args.go index 48be8626b0..1b87fb3b86 100644 --- a/pkg/function/args.go +++ b/pkg/function/args.go @@ -15,8 +15,10 @@ package function import ( + "fmt" + + "github.com/kanisterio/errkit" "github.com/mitchellh/mapstructure" - "github.com/pkg/errors" "sigs.k8s.io/yaml" crv1alpha1 "github.com/kanisterio/kanister/pkg/apis/cr/v1alpha1" @@ -29,11 +31,11 @@ import ( func Arg(args map[string]interface{}, argName string, result interface{}) error { if val, ok := args[argName]; ok { if err := mapstructure.WeakDecode(val, result); err != nil { - return errors.Wrapf(err, "Failed to decode arg `%s`", argName) + return errkit.Wrap(err, fmt.Sprintf("Failed to decode arg `%s`", argName)) } return nil } - return errors.New("Argument missing " + argName) + return errkit.New("Argument missing " + argName) } // OptArg returns the value of the specified argument if it exists @@ -105,5 +107,5 @@ func GetYamlList(args map[string]interface{}, argName string) ([]string, error) err := yaml.Unmarshal(valListBytes, &valList) return valList, err } - return nil, errors.Errorf("Invalid %s arg format", argName) + return nil, errkit.New(fmt.Sprintf("Invalid %s arg format", argName)) } diff --git a/pkg/function/backup_data.go b/pkg/function/backup_data.go index 3a9d901741..aafbe45713 100644 --- a/pkg/function/backup_data.go +++ b/pkg/function/backup_data.go @@ -16,9 +16,10 @@ package function import ( "context" + "fmt" "time" - "github.com/pkg/errors" + "github.com/kanisterio/errkit" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/rand" "k8s.io/client-go/kubernetes" @@ -110,20 +111,20 @@ func (b *backupDataFunc) Exec(ctx context.Context, tp param.TemplateParams, args } if err = ValidateProfile(tp.Profile); err != nil { - return nil, errors.Wrapf(err, "Failed to validate Profile") + return nil, errkit.Wrap(err, "Failed to validate Profile") } backupArtifactPrefix = ResolveArtifactPrefix(backupArtifactPrefix, tp.Profile) cli, err := kube.NewClient() if err != nil { - return nil, errors.Wrapf(err, "Failed to create Kubernetes client") + return nil, errkit.Wrap(err, "Failed to create Kubernetes client") } ctx = field.Context(ctx, consts.PodNameKey, pod) ctx = field.Context(ctx, consts.ContainerNameKey, container) backupOutputs, err := backupData(ctx, cli, namespace, pod, container, backupArtifactPrefix, includePath, encryptionKey, insecureTLS, tp) if err != nil { - return nil, errors.Wrapf(err, "Failed to backup data") + return nil, errkit.Wrap(err, "Failed to backup data") } output := map[string]interface{}{ BackupDataOutputBackupID: backupOutputs.backupID, @@ -194,12 +195,12 @@ func backupData(ctx context.Context, cli kubernetes.Interface, namespace, pod, c format.LogWithCtx(ctx, pod, container, stdout) format.LogWithCtx(ctx, pod, container, stderr) if err != nil { - return backupDataParsedOutput{}, errors.Wrapf(err, "Failed to create and upload backup") + return backupDataParsedOutput{}, errkit.Wrap(err, "Failed to create and upload backup") } // Get the snapshot ID from log backupID := restic.SnapshotIDFromBackupLog(stdout) if backupID == "" { - return backupDataParsedOutput{}, errors.Errorf("Failed to parse the backup ID from logs, backup logs %s", stdout) + return backupDataParsedOutput{}, errkit.New(fmt.Sprintf("Failed to parse the backup ID from logs, backup logs %s", stdout)) } // Get the file count and size of the backup from log fileCount, backupSize, phySize := restic.SnapshotStatsFromBackupLog(stdout) diff --git a/pkg/function/backup_data_all.go b/pkg/function/backup_data_all.go index dfbb288e95..0b799a9aa5 100644 --- a/pkg/function/backup_data_all.go +++ b/pkg/function/backup_data_all.go @@ -21,7 +21,7 @@ import ( "strings" "time" - "github.com/pkg/errors" + "github.com/kanisterio/errkit" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -106,14 +106,14 @@ func (b *backupDataAllFunc) Exec(ctx context.Context, tp param.TemplateParams, a } if err = ValidateProfile(tp.Profile); err != nil { - return nil, errors.Wrapf(err, "Failed to validate Profile") + return nil, errkit.Wrap(err, "Failed to validate Profile") } backupArtifactPrefix = ResolveArtifactPrefix(backupArtifactPrefix, tp.Profile) cli, err := kube.NewClient() if err != nil { - return nil, errors.Wrapf(err, "Failed to create Kubernetes client") + return nil, errkit.Wrap(err, "Failed to create Kubernetes client") } var ps []string if pods == "" { @@ -123,7 +123,7 @@ func (b *backupDataAllFunc) Exec(ctx context.Context, tp param.TemplateParams, a case tp.StatefulSet != nil: ps = tp.StatefulSet.Pods default: - return nil, errors.New("Failed to get pods") + return nil, errkit.New("Failed to get pods") } } else { ps = strings.Fields(pods) @@ -171,7 +171,7 @@ func backupDataAll(ctx context.Context, cli kubernetes.Interface, namespace stri go func(pod string, container string) { ctx = field.Context(ctx, consts.PodNameKey, pod) backupOutputs, err := backupData(ctx, cli, namespace, pod, container, fmt.Sprintf("%s/%s", backupArtifactPrefix, pod), includePath, encryptionKey, insecureTLS, tp) - errChan <- errors.Wrapf(err, "Failed to backup data for pod %s", pod) + errChan <- errkit.Wrap(err, "Failed to backup data for pod", "pod", pod) outChan <- BackupInfo{PodName: pod, BackupID: backupOutputs.backupID, BackupTag: backupOutputs.backupTag} }(pod, container) } @@ -186,11 +186,11 @@ func backupDataAll(ctx context.Context, cli kubernetes.Interface, namespace stri } } if len(errs) != 0 { - return nil, errors.New(strings.Join(errs, "\n")) + return nil, errkit.New(strings.Join(errs, "\n")) } manifestData, err := json.Marshal(Output) if err != nil { - return nil, errors.Wrapf(err, "Failed to encode JSON data") + return nil, errkit.Wrap(err, "Failed to encode JSON data") } return map[string]interface{}{ BackupDataAllOutput: string(manifestData), diff --git a/pkg/function/backup_data_stats.go b/pkg/function/backup_data_stats.go index 1fdb5b2042..9d36a72991 100644 --- a/pkg/function/backup_data_stats.go +++ b/pkg/function/backup_data_stats.go @@ -19,7 +19,7 @@ import ( "context" "time" - "github.com/pkg/errors" + "github.com/kanisterio/errkit" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -113,7 +113,7 @@ func backupDataStatsPodFunc( // Wait for pod to reach running state if err := pc.WaitForPodReady(ctx); err != nil { - return nil, errors.Wrapf(err, "Failed while waiting for Pod %s to be ready", pod.Name) + return nil, errkit.Wrap(err, "Failed while waiting for Pod to be ready", "pod", pod.Name) } remover, err := MaybeWriteProfileCredentials(ctx, pc, tp.Profile) @@ -131,7 +131,7 @@ func backupDataStatsPodFunc( commandExecutor, err := pc.GetCommandExecutor() if err != nil { - return nil, errors.Wrap(err, "Unable to get pod command executor") + return nil, errkit.Wrap(err, "Unable to get pod command executor") } var stdout, stderr bytes.Buffer @@ -139,12 +139,12 @@ func backupDataStatsPodFunc( format.LogWithCtx(ctx, pod.Name, pod.Spec.Containers[0].Name, stdout.String()) format.LogWithCtx(ctx, pod.Name, pod.Spec.Containers[0].Name, stderr.String()) if err != nil { - return nil, errors.Wrapf(err, "Failed to get backup stats") + return nil, errkit.Wrap(err, "Failed to get backup stats") } // Get File Count and Size from Stats mode, fc, size := restic.SnapshotStatsFromStatsLog(stdout.String()) if fc == "" || size == "" { - return nil, errors.New("Failed to parse snapshot stats from logs") + return nil, errkit.New("Failed to parse snapshot stats from logs") } return map[string]interface{}{ BackupDataStatsOutputMode: mode, @@ -206,14 +206,14 @@ func (b *BackupDataStatsFunc) Exec(ctx context.Context, tp param.TemplateParams, } if err = ValidateProfile(tp.Profile); err != nil { - return nil, errors.Wrapf(err, "Failed to validate Profile") + return nil, errkit.Wrap(err, "Failed to validate Profile") } backupArtifactPrefix = ResolveArtifactPrefix(backupArtifactPrefix, tp.Profile) cli, err := kube.NewClient() if err != nil { - return nil, errors.Wrapf(err, "Failed to create Kubernetes client") + return nil, errkit.Wrap(err, "Failed to create Kubernetes client") } return backupDataStats( ctx, diff --git a/pkg/function/backup_data_using_kopia_server.go b/pkg/function/backup_data_using_kopia_server.go index 2e25b75aa3..3d0e8158c5 100644 --- a/pkg/function/backup_data_using_kopia_server.go +++ b/pkg/function/backup_data_using_kopia_server.go @@ -22,7 +22,7 @@ import ( "time" "github.com/dustin/go-humanize" - "github.com/pkg/errors" + "github.com/kanisterio/errkit" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -131,22 +131,22 @@ func (b *backupDataUsingKopiaServerFunc) Exec(ctx context.Context, tp param.Temp userPassphrase, cert, err := userCredentialsAndServerTLS(&tp) if err != nil { - return nil, errors.Wrap(err, "Failed to fetch User Credentials/Certificate Data from Template Params") + return nil, errkit.Wrap(err, "Failed to fetch User Credentials/Certificate Data from Template Params") } fingerprint, err := kankopia.ExtractFingerprintFromCertificateJSON(cert) if err != nil { - return nil, errors.Wrap(err, "Failed to fetch Kopia API Server Certificate Secret Data from Certificate") + return nil, errkit.Wrap(err, "Failed to fetch Kopia API Server Certificate Secret Data from Certificate") } hostname, userAccessPassphrase, err := hostNameAndUserPassPhraseFromRepoServer(userPassphrase, userHostname) if err != nil { - return nil, errors.Wrap(err, "Failed to fetch Hostname/User Passphrase from Secret") + return nil, errkit.Wrap(err, "Failed to fetch Hostname/User Passphrase from Secret") } cli, err := kube.NewClient() if err != nil { - return nil, errors.Wrap(err, "Failed to create Kubernetes client") + return nil, errkit.Wrap(err, "Failed to create Kubernetes client") } snapInfo, err := backupDataUsingKopiaServer( @@ -164,7 +164,7 @@ func (b *backupDataUsingKopiaServerFunc) Exec(ctx context.Context, tp param.Temp tags, ) if err != nil { - return nil, errors.Wrap(err, "Failed to backup data using Kopia Repository Server") + return nil, errkit.Wrap(err, "Failed to backup data using Kopia Repository Server") } var logSize, phySize int64 @@ -226,7 +226,7 @@ func backupDataUsingKopiaServer( format.Log(pod, container, stdout) format.Log(pod, container, stderr) if err != nil { - return nil, errors.Wrap(err, "Failed to connect to Kopia Repository Server") + return nil, errkit.Wrap(err, "Failed to connect to Kopia Repository Server") } cmd = kopiacmd.SnapshotCreate( @@ -242,7 +242,7 @@ func backupDataUsingKopiaServer( Parallelism: utils.GetEnvAsIntOrDefault(kankopia.DataStoreParallelUploadName, kankopia.DefaultDataStoreParallelUpload), }) if err != nil { - return nil, errors.Wrap(err, "Failed to construct snapshot create command") + return nil, errkit.Wrap(err, "Failed to construct snapshot create command") } stdout, stderr, err = kube.Exec(ctx, cli, namespace, pod, container, cmd, nil) format.Log(pod, container, stdout) @@ -253,7 +253,7 @@ func backupDataUsingKopiaServer( if strings.Contains(err.Error(), kerrors.ErrCodeOutOfMemoryStr) { message = message + ": " + kerrors.ErrOutOfMemoryStr } - return nil, errors.Wrap(err, message) + return nil, errkit.Wrap(err, message) } // Parse logs and return snapshot IDs and stats return kopiacmd.ParseSnapshotCreateOutput(stdout, stderr) @@ -262,14 +262,14 @@ func backupDataUsingKopiaServer( func hostNameAndUserPassPhraseFromRepoServer(userCreds, hostname string) (string, string, error) { var userAccessMap map[string]string if err := json.Unmarshal([]byte(userCreds), &userAccessMap); err != nil { - return "", "", errors.Wrap(err, "Failed to unmarshal User Credentials Data") + return "", "", errkit.Wrap(err, "Failed to unmarshal User Credentials Data") } // Check if hostname provided exists in the User Access Map if hostname != "" { err := checkHostnameExistsInUserAccessMap(userAccessMap, hostname) if err != nil { - return "", "", errors.Wrap(err, "Failed to find hostname in the User Access Map") + return "", "", errkit.Wrap(err, "Failed to find hostname in the User Access Map") } } @@ -286,7 +286,7 @@ func hostNameAndUserPassPhraseFromRepoServer(userCreds, hostname string) (string decodedUserPassphrase, err := base64.StdEncoding.DecodeString(userPassphrase) if err != nil { - return "", "", errors.Wrap(err, "Failed to Decode User Passphrase") + return "", "", errkit.Wrap(err, "Failed to Decode User Passphrase") } return hostname, string(decodedUserPassphrase), nil } @@ -294,11 +294,11 @@ func hostNameAndUserPassPhraseFromRepoServer(userCreds, hostname string) (string func userCredentialsAndServerTLS(tp *param.TemplateParams) (string, string, error) { userCredJSON, err := json.Marshal(tp.RepositoryServer.Credentials.ServerUserAccess.Data) if err != nil { - return "", "", errors.Wrap(err, "Error marshalling User Credentials Data") + return "", "", errkit.Wrap(err, "Error marshalling User Credentials Data") } certJSON, err := json.Marshal(tp.RepositoryServer.Credentials.ServerTLS.Data) if err != nil { - return "", "", errors.Wrap(err, "Error marshalling Certificate Data") + return "", "", errkit.Wrap(err, "Error marshalling Certificate Data") } return string(userCredJSON), string(certJSON), nil } @@ -306,7 +306,7 @@ func userCredentialsAndServerTLS(tp *param.TemplateParams) (string, string, erro func checkHostnameExistsInUserAccessMap(userAccessMap map[string]string, hostname string) error { // check if hostname that is provided by the user exists in the user access map if _, ok := userAccessMap[hostname]; !ok { - return errors.New("hostname provided in the repository server CR does not exist in the user access map") + return errkit.New("hostname provided in the repository server CR does not exist in the user access map") } return nil } diff --git a/pkg/function/checkRepository.go b/pkg/function/checkRepository.go index ec5e3af37c..8e19c1a46c 100644 --- a/pkg/function/checkRepository.go +++ b/pkg/function/checkRepository.go @@ -5,7 +5,7 @@ import ( "strings" "time" - "github.com/pkg/errors" + "github.com/kanisterio/errkit" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -62,7 +62,7 @@ func CheckRepository( ) (map[string]interface{}, error) { namespace, err := kube.GetControllerNamespace() if err != nil { - return nil, errors.Wrapf(err, "Failed to get controller namespace") + return nil, errkit.Wrap(err, "Failed to get controller namespace") } options := &kube.PodOptions{ Namespace: namespace, @@ -94,7 +94,7 @@ func CheckRepositoryPodFunc( // Wait for pod to reach running state if err := pc.WaitForPodReady(ctx); err != nil { - return nil, errors.Wrapf(err, "Failed while waiting for Pod %s to be ready", pod.Name) + return nil, errkit.Wrap(err, "Failed while waiting for Pod to be ready", "pod", pod.Name) } remover, err := MaybeWriteProfileCredentials(ctx, pc, tp.Profile) @@ -193,7 +193,7 @@ func (c *CheckRepositoryFunc) Exec(ctx context.Context, tp param.TemplateParams, cli, err := kube.NewClient() if err != nil { - return nil, errors.Wrapf(err, "Failed to create Kubernetes client") + return nil, errkit.Wrap(err, "Failed to create Kubernetes client") } return CheckRepository( ctx, diff --git a/pkg/function/copy_volume_data.go b/pkg/function/copy_volume_data.go index f3b828dde6..57a97c8113 100644 --- a/pkg/function/copy_volume_data.go +++ b/pkg/function/copy_volume_data.go @@ -20,7 +20,7 @@ import ( "fmt" "time" - "github.com/pkg/errors" + "github.com/kanisterio/errkit" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/rand" "k8s.io/client-go/kubernetes" @@ -87,7 +87,7 @@ func copyVolumeData( // Validate PVC exists pvc, err := cli.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvcName, metav1.GetOptions{}) if err != nil { - return nil, errors.Wrapf(err, "Failed to retrieve PVC. Namespace %s, Name %s", namespace, pvcName) + return nil, errkit.Wrap(err, "Failed to retrieve PVC.", "namespace", namespace, "name", pvcName) } // Create a pod with PVCs attached @@ -125,12 +125,12 @@ func copyVolumeDataPodFunc( return func(ctx context.Context, pc kube.PodController) (map[string]interface{}, error) { // Wait for pod to reach running state if err := pc.WaitForPodReady(ctx); err != nil { - return nil, errors.Wrapf(err, "Failed while waiting for Pod %s to be ready", pc.PodName()) + return nil, errkit.Wrap(err, "Failed while waiting for Pod to be ready", "pod", pc.PodName()) } remover, err := MaybeWriteProfileCredentials(ctx, pc, tp.Profile) if err != nil { - return nil, errors.Wrapf(err, "Failed to write credentials to Pod %s", pc.PodName()) + return nil, errkit.Wrap(err, "Failed to write credentials to Pod", "pod", pc.PodName()) } // Parent context could already be dead, so removing file within new context @@ -166,12 +166,12 @@ func copyVolumeDataPodFunc( format.LogWithCtx(ctx, pod.Name, pod.Spec.Containers[0].Name, stdout.String()) format.LogWithCtx(ctx, pod.Name, pod.Spec.Containers[0].Name, stderr.String()) if err != nil { - return nil, errors.Wrapf(err, "Failed to create and upload backup") + return nil, errkit.Wrap(err, "Failed to create and upload backup") } // Get the snapshot ID from log backupID := restic.SnapshotIDFromBackupLog(stdout.String()) if backupID == "" { - return nil, errors.Errorf("Failed to parse the backup ID from logs, backup logs %s", stdout.String()) + return nil, errkit.New(fmt.Sprintf("Failed to parse the backup ID from logs, backup logs %s", stdout.String())) } fileCount, backupSize, phySize := restic.SnapshotStatsFromBackupLog(stdout.String()) if backupSize == "" { @@ -241,14 +241,14 @@ func (c *copyVolumeDataFunc) Exec(ctx context.Context, tp param.TemplateParams, } if err = ValidateProfile(tp.Profile); err != nil { - return nil, errors.Wrapf(err, "Failed to validate Profile") + return nil, errkit.Wrap(err, "Failed to validate Profile") } targetPath = ResolveArtifactPrefix(targetPath, tp.Profile) cli, err := kube.NewClient() if err != nil { - return nil, errors.Wrapf(err, "Failed to create Kubernetes client") + return nil, errkit.Wrap(err, "Failed to create Kubernetes client") } return copyVolumeData( ctx, diff --git a/pkg/function/create_csi_snapshot.go b/pkg/function/create_csi_snapshot.go index 1733b912a8..b660c914a3 100644 --- a/pkg/function/create_csi_snapshot.go +++ b/pkg/function/create_csi_snapshot.go @@ -19,8 +19,8 @@ import ( "fmt" "time" + "github.com/kanisterio/errkit" v1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" - "github.com/pkg/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/rand" @@ -102,9 +102,9 @@ func (c *createCSISnapshotFunc) Exec(ctx context.Context, tp param.TemplateParam } snapshotter, err := snapshot.NewSnapshotter(kubeCli, dynCli) if err != nil { - if errors.Is(context.DeadlineExceeded, err) { + if errkit.Is(context.DeadlineExceeded, err) { timeoutMsg := "SnapshotContent not provisioned within given timeout. Please check if CSI driver is installed correctly and supports VolumeSnapshot feature" - return nil, errors.Wrap(err, timeoutMsg) + return nil, errkit.Wrap(err, timeoutMsg) } return nil, err } diff --git a/pkg/function/create_rds_snapshot.go b/pkg/function/create_rds_snapshot.go index e67caf429a..5de1172ac2 100644 --- a/pkg/function/create_rds_snapshot.go +++ b/pkg/function/create_rds_snapshot.go @@ -20,7 +20,7 @@ import ( "strconv" "time" - "github.com/pkg/errors" + "github.com/kanisterio/errkit" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/rand" "sigs.k8s.io/yaml" @@ -78,19 +78,19 @@ func createRDSSnapshot(ctx context.Context, instanceID string, dbEngine RDSDBEng var allocatedStorage int64 // Validate profile if err := ValidateProfile(profile); err != nil { - return nil, errors.Wrap(err, "Profile Validation failed") + return nil, errkit.Wrap(err, "Profile Validation failed") } // Get aws config from profile awsConfig, region, err := getAWSConfigFromProfile(ctx, profile) if err != nil { - return nil, errors.Wrap(err, "Failed to get AWS creds from profile") + return nil, errkit.Wrap(err, "Failed to get AWS creds from profile") } // Create rds client rdsCli, err := rds.NewClient(ctx, awsConfig, region) if err != nil { - return nil, errors.Wrap(err, "Failed to create RDS client") + return nil, errkit.Wrap(err, "Failed to create RDS client") } // Create Snapshot @@ -110,13 +110,13 @@ func createRDSSnapshot(ctx context.Context, instanceID string, dbEngine RDSDBEng sgIDs, e = findAuroraSecurityGroups(ctx, rdsCli, instanceID) } if e != nil { - return nil, errors.Wrapf(e, "Failed to fetch security group ids. InstanceID=%s", instanceID) + return nil, errkit.Wrap(e, "Failed to fetch security group ids. InstanceID=", "instanceID=", instanceID) } // Convert to yaml format sgIDYaml, err := yaml.Marshal(sgIDs) if err != nil { - return nil, errors.Wrapf(err, "Failed to create securityGroupID artifact. InstanceID=%s", instanceID) + return nil, errkit.Wrap(err, "Failed to create securityGroupID artifact. InstanceID=", "instanceID=", instanceID) } var dbSubnetGroup *string @@ -127,7 +127,7 @@ func createRDSSnapshot(ctx context.Context, instanceID string, dbEngine RDSDBEng dbSubnetGroup, e = GetRDSDBSubnetGroup(ctx, rdsCli, instanceID) } if e != nil { - return nil, errors.Wrapf(e, "Failed to get dbSubnetGroup ids. InstanceID=%s", instanceID) + return nil, errkit.Wrap(e, "Failed to get dbSubnetGroup ids. InstanceID=", "instanceID=", instanceID) } output := map[string]interface{}{ @@ -146,13 +146,13 @@ func createSnapshot(ctx context.Context, rdsCli *rds.RDS, snapshotID, instanceID if !isAuroraCluster(dbEngine) { dbSnapshotOutput, err := rdsCli.CreateDBSnapshot(ctx, instanceID, snapshotID) if err != nil { - return allocatedStorage, errors.Wrap(err, "Failed to create snapshot") + return allocatedStorage, errkit.Wrap(err, "Failed to create snapshot") } // Wait until snapshot becomes available log.WithContext(ctx).Print("Waiting for RDS snapshot to be available", field.M{"SnapshotID": snapshotID}) if err := rdsCli.WaitUntilDBSnapshotAvailable(ctx, snapshotID); err != nil { - return allocatedStorage, errors.Wrap(err, "Error while waiting snapshot to be available") + return allocatedStorage, errkit.Wrap(err, "Error while waiting snapshot to be available") } if dbSnapshotOutput.DBSnapshot != nil && dbSnapshotOutput.DBSnapshot.AllocatedStorage != nil { allocatedStorage = *(dbSnapshotOutput.DBSnapshot.AllocatedStorage) @@ -160,12 +160,12 @@ func createSnapshot(ctx context.Context, rdsCli *rds.RDS, snapshotID, instanceID return allocatedStorage, nil } if _, err := rdsCli.CreateDBClusterSnapshot(ctx, instanceID, snapshotID); err != nil { - return allocatedStorage, errors.Wrap(err, "Failed to create cluster snapshot") + return allocatedStorage, errkit.Wrap(err, "Failed to create cluster snapshot") } log.WithContext(ctx).Print("Waiting for RDS Aurora snapshot to be available", field.M{"SnapshotID": snapshotID}) if err := rdsCli.WaitUntilDBClusterSnapshotAvailable(ctx, snapshotID); err != nil { - return allocatedStorage, errors.Wrap(err, "Error while waiting snapshot to be available") + return allocatedStorage, errkit.Wrap(err, "Error while waiting snapshot to be available") } return allocatedStorage, nil } diff --git a/pkg/function/create_volume_from_snapshot.go b/pkg/function/create_volume_from_snapshot.go index 95ed5a08a4..267851104f 100644 --- a/pkg/function/create_volume_from_snapshot.go +++ b/pkg/function/create_volume_from_snapshot.go @@ -19,7 +19,7 @@ import ( "encoding/json" "time" - "github.com/pkg/errors" + "github.com/kanisterio/errkit" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -66,10 +66,10 @@ func createVolumeFromSnapshot(ctx context.Context, cli kubernetes.Interface, nam PVCData := []VolumeSnapshotInfo{} err := json.Unmarshal([]byte(snapshotinfo), &PVCData) if err != nil { - return nil, errors.Wrapf(err, "Could not decode JSON data") + return nil, errkit.Wrap(err, "Could not decode JSON data") } if len(pvcNames) > 0 && len(pvcNames) != len(PVCData) { - return nil, errors.New("Invalid number of PVC names provided") + return nil, errkit.New("Invalid number of PVC names provided") } // providerList required for unit testing providerList := make(map[string]blockstorage.Provider) @@ -79,7 +79,7 @@ func createVolumeFromSnapshot(ctx context.Context, cli kubernetes.Interface, nam pvcName = pvcNames[i] } if err = ValidateLocationForBlockstorage(profile, pvcInfo.Type); err != nil { - return nil, errors.Wrap(err, "Profile validation failed") + return nil, errkit.Wrap(err, "Profile validation failed") } config := getConfig(profile, pvcInfo.Type) if pvcInfo.Type == blockstorage.TypeEBS { @@ -88,7 +88,7 @@ func createVolumeFromSnapshot(ctx context.Context, cli kubernetes.Interface, nam provider, err := getter.Get(pvcInfo.Type, config) if err != nil { - return nil, errors.Wrapf(err, "Could not get storage provider %v", pvcInfo.Type) + return nil, errkit.Wrap(err, "Could not get storage provider", "provider", pvcInfo.Type) } _, err = cli.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvcName, metav1.GetOptions{}) if err == nil { @@ -98,7 +98,7 @@ func createVolumeFromSnapshot(ctx context.Context, cli kubernetes.Interface, nam } snapshot, err := provider.SnapshotGet(ctx, pvcInfo.SnapshotID) if err != nil { - return nil, errors.Wrapf(err, "Failed to get Snapshot from Provider") + return nil, errkit.Wrap(err, "Failed to get Snapshot from Provider") } tags := map[string]string{ @@ -109,19 +109,19 @@ func createVolumeFromSnapshot(ctx context.Context, cli kubernetes.Interface, nam snapshot.Volume.Tags = pvcInfo.Tags vol, err := provider.VolumeCreateFromSnapshot(ctx, *snapshot, tags) if err != nil { - return nil, errors.Wrapf(err, "Failed to create volume from snapshot, snapID: %s", snapshot.ID) + return nil, errkit.Wrap(err, "Failed to create volume from snapshot", "snapID", snapshot.ID) } annotations := map[string]string{} pvc, err := kubevolume.CreatePVC(ctx, cli, namespace, pvcName, vol.SizeInBytes, vol.ID, annotations, nil, nil) if err != nil { - return nil, errors.Wrapf(err, "Unable to create PVC for volume %v", *vol) + return nil, errkit.Wrap(err, "Unable to create PVC for volume", "volume", *vol) } pvAnnotations := addPVProvisionedByAnnotation(nil, provider) pv, err := kubevolume.CreatePV(ctx, cli, vol, vol.Type, pvAnnotations, nil, nil) if err != nil { - return nil, errors.Wrapf(err, "Unable to create PV for volume %v", *vol) + return nil, errkit.Wrap(err, "Unable to create PV for volume", "volume", *vol) } log.WithContext(ctx).Print("Restore/Create volume from snapshot completed", field.M{"PVC": pvc, "Volume": pv}) providerList[pvcInfo.PVCName] = provider @@ -152,7 +152,7 @@ func (c *createVolumeFromSnapshotFunc) Exec(ctx context.Context, tp param.Templa cli, err := kube.NewClient() if err != nil { - return nil, errors.Wrapf(err, "Failed to create Kubernetes client") + return nil, errkit.Wrap(err, "Failed to create Kubernetes client") } var namespace, snapshotinfo string var pvcNames []string diff --git a/pkg/function/create_volume_snapshot.go b/pkg/function/create_volume_snapshot.go index 5bcc17f65a..4a27e160f3 100644 --- a/pkg/function/create_volume_snapshot.go +++ b/pkg/function/create_volume_snapshot.go @@ -23,7 +23,7 @@ import ( "sync" "time" - "github.com/pkg/errors" + "github.com/kanisterio/errkit" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -87,22 +87,22 @@ type volumeInfo struct { func ValidateLocationForBlockstorage(profile *param.Profile, sType blockstorage.Type) error { if err := ValidateProfile(profile); err != nil { - return errors.Wrapf(err, "Profile Validation failed") + return errkit.Wrap(err, "Profile Validation failed") } switch sType { case blockstorage.TypeEBS: if profile.Location.Type != crv1alpha1.LocationTypeS3Compliant { - return errors.Errorf("Location type %s not supported for blockstorage type %s", profile.Location.Type, sType) + return errkit.New(fmt.Sprintf("Location type %s not supported for blockstorage type %s", profile.Location.Type, sType)) } if len(profile.Location.Region) == 0 { - return errors.Errorf("Region is not set. Required for blockstorage type %s", sType) + return errkit.New(fmt.Sprintf("Region is not set. Required for blockstorage type %s", sType)) } case blockstorage.TypeGPD: if profile.Location.Type != crv1alpha1.LocationTypeGCS { - return errors.Errorf("Location type %s not supported for blockstorage type %s", profile.Location.Type, sType) + return errkit.New(fmt.Sprintf("Location type %s not supported for blockstorage type %s", profile.Location.Type, sType)) } default: - return errors.Errorf("Storage provider not supported %s", sType) + return errkit.New(fmt.Sprintf("Storage provider not supported %s", sType)) } return nil } @@ -112,7 +112,7 @@ func createVolumeSnapshot(ctx context.Context, tp param.TemplateParams, cli kube for _, pvc := range pvcs { volInfo, err := getPVCInfo(ctx, cli, namespace, pvc, tp, getter) if err != nil { - return nil, errors.Wrapf(err, "Failed to get PVC info") + return nil, errkit.Wrap(err, "Failed to get PVC info") } vols = append(vols, *volInfo) } @@ -136,12 +136,12 @@ func createVolumeSnapshot(ctx context.Context, tp param.TemplateParams, cli kube err := fmt.Errorf(strings.Join(errstrings, "\n")) if len(err.Error()) > 0 { - return nil, errors.Wrapf(err, "Failed to snapshot one of the volumes") + return nil, errkit.Wrap(err, "Failed to snapshot one of the volumes") } manifestData, err := json.Marshal(PVCData) if err != nil { - return nil, errors.Wrapf(err, "Failed to encode JSON data") + return nil, errkit.Wrap(err, "Failed to encode JSON data") } return map[string]interface{}{"volumeSnapshotInfo": string(manifestData)}, nil @@ -151,10 +151,10 @@ func snapshotVolume(ctx context.Context, volume volumeInfo, skipWait bool) (*Vol provider := volume.provider vol, err := provider.VolumeGet(ctx, volume.volumeID, volume.volZone) if err != nil { - return nil, errors.Wrapf(err, "Volume unavailable, volumeID: %s", volume.volumeID) + return nil, errkit.Wrap(err, "Volume unavailable, volumeID:", "volumeID:", volume.volumeID) } if vol.Encrypted { - return nil, errors.New("Encrypted volumes are unsupported") + return nil, errkit.New("Encrypted volumes are unsupported") } // Snapshot the volume. @@ -170,7 +170,7 @@ func snapshotVolume(ctx context.Context, volume volumeInfo, skipWait bool) (*Vol } if !skipWait { if err := provider.SnapshotCreateWaitForCompletion(ctx, snap); err != nil { - return nil, errors.Wrap(err, "Snapshot creation did not complete") + return nil, errkit.Wrap(err, "Snapshot creation did not complete") } } return &VolumeSnapshotInfo{SnapshotID: snap.ID, Type: volume.sType, Region: volume.region, PVCName: volume.pvc, Az: snap.Volume.Az, Tags: snap.Volume.Tags, VolumeType: snap.Volume.VolumeType}, nil @@ -182,15 +182,15 @@ func getPVCInfo(ctx context.Context, kubeCli kubernetes.Interface, namespace str var provider blockstorage.Provider pvc, err := kubeCli.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil { - return nil, errors.Wrapf(err, "Failed to get PVC, PVC name: %s, namespace: %s", name, namespace) + return nil, errkit.Wrap(err, "Failed to get PVC, PVC", "name", name, "namespace", namespace) } pvName := pvc.Spec.VolumeName if pvName == "" { - return nil, errors.Errorf("PVC %s in namespace %s not bound", name, namespace) + return nil, errkit.New(fmt.Sprintf("PVC %s in namespace %s not bound", name, namespace)) } pv, err := kubeCli.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{}) if err != nil { - return nil, errors.Wrapf(err, "Failed to get PV %s, namespace: %s", pvName, namespace) + return nil, errkit.Wrap(err, "Failed to get", "PV", pvName, "namespace", namespace) } pvLabels := pv.GetObjectMeta().GetLabels() var size int64 @@ -203,7 +203,7 @@ func getPVCInfo(ctx context.Context, kubeCli kubernetes.Interface, namespace str case pv.Spec.AWSElasticBlockStore != nil: ebs := pv.Spec.AWSElasticBlockStore if err = ValidateLocationForBlockstorage(tp.Profile, blockstorage.TypeEBS); err != nil { - return nil, errors.Wrap(err, "Profile validation failed") + return nil, errkit.Wrap(err, "Profile validation failed") } // Get Region from PV label or EC2 metadata if pvRegion := kube.GetRegionFromLabels(pvLabels); pvRegion != "" { @@ -219,29 +219,29 @@ func getPVCInfo(ctx context.Context, kubeCli kubernetes.Interface, namespace str config[awsconfig.ConfigRegion] = region provider, err = getter.Get(blockstorage.TypeEBS, config) if err != nil { - return nil, errors.Wrap(err, "Could not get storage provider") + return nil, errkit.Wrap(err, "Could not get storage provider") } return &volumeInfo{provider: provider, volumeID: filepath.Base(ebs.VolumeID), sType: blockstorage.TypeEBS, volZone: pvZone, pvc: name, size: size, region: region}, nil } - return nil, errors.Errorf("PV zone label is empty, pvName: %s, namespace: %s", pvName, namespace) + return nil, errkit.New(fmt.Sprintf("PV zone label is empty, pvName: %s, namespace: %s", pvName, namespace)) case pv.Spec.GCEPersistentDisk != nil: gpd := pv.Spec.GCEPersistentDisk region = "" if err = ValidateLocationForBlockstorage(tp.Profile, blockstorage.TypeGPD); err != nil { - return nil, errors.Wrap(err, "Profile validation failed") + return nil, errkit.Wrap(err, "Profile validation failed") } if pvZone := kube.GetZoneFromLabels(pvLabels); pvZone != "" { config := getConfig(tp.Profile, blockstorage.TypeGPD) provider, err = getter.Get(blockstorage.TypeGPD, config) if err != nil { - return nil, errors.Wrap(err, "Could not get storage provider") + return nil, errkit.Wrap(err, "Could not get storage provider") } return &volumeInfo{provider: provider, volumeID: filepath.Base(gpd.PDName), sType: blockstorage.TypeGPD, volZone: pvZone, pvc: name, size: size, region: region}, nil } - return nil, errors.Errorf("PV zone label is empty, pvName: %s, namespace: %s", pvName, namespace) + return nil, errkit.New(fmt.Sprintf("PV zone label is empty, pvName: %s, namespace: %s", pvName, namespace)) } - return nil, errors.New("Storage type not supported!") + return nil, errkit.New("Storage type not supported!") } func getPVCList(tp param.TemplateParams) ([]string, error) { @@ -253,7 +253,7 @@ func getPVCList(tp param.TemplateParams) ([]string, error) { case tp.StatefulSet != nil: podsToPvcs = tp.StatefulSet.PersistentVolumeClaims default: - return nil, errors.New("Failed to get volumes") + return nil, errkit.New("Failed to get volumes") } for _, podToPvcs := range podsToPvcs { for pvc := range podToPvcs { @@ -261,7 +261,7 @@ func getPVCList(tp param.TemplateParams) ([]string, error) { } } if len(pvcList) == 0 { - return nil, errors.New("No pvcs found") + return nil, errkit.New("No pvcs found") } return pvcList, nil } @@ -273,7 +273,7 @@ func (c *createVolumeSnapshotFunc) Exec(ctx context.Context, tp param.TemplatePa cli, err := kube.NewClient() if err != nil { - return nil, errors.Wrapf(err, "Failed to create Kubernetes client") + return nil, errkit.Wrap(err, "Failed to create Kubernetes client") } var namespace string var pvcs []string diff --git a/pkg/function/delete_data.go b/pkg/function/delete_data.go index 4a29094cd5..c437b48b9c 100644 --- a/pkg/function/delete_data.go +++ b/pkg/function/delete_data.go @@ -21,7 +21,7 @@ import ( "strings" "time" - "github.com/pkg/errors" + "github.com/kanisterio/errkit" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -91,7 +91,7 @@ func deleteData( labels map[string]string, ) (map[string]interface{}, error) { if (len(deleteIdentifiers) == 0) == (len(deleteTags) == 0) { - return nil, errors.Errorf("Require one argument: %s or %s", DeleteDataBackupIdentifierArg, DeleteDataBackupTagArg) + return nil, errkit.New(fmt.Sprintf("Require one argument: %s or %s", DeleteDataBackupIdentifierArg, DeleteDataBackupTagArg)) } options := &kube.PodOptions{ @@ -127,7 +127,7 @@ func deleteDataPodFunc( // Wait for pod to reach running state if err := pc.WaitForPodReady(ctx); err != nil { - return nil, errors.Wrapf(err, "Failed while waiting for Pod %s to be ready", pod.Name) + return nil, errkit.Wrap(err, "Failed while waiting for Pod to be ready", "pod", pod.Name) } remover, err := MaybeWriteProfileCredentials(ctx, pc, tp.Profile) @@ -155,11 +155,11 @@ func deleteDataPodFunc( format.LogWithCtx(ctx, pod.Name, pod.Spec.Containers[0].Name, stdout.String()) format.LogWithCtx(ctx, pod.Name, pod.Spec.Containers[0].Name, stderr.String()) if err != nil { - return nil, errors.Wrapf(err, "Failed to forget data, could not get snapshotID from tag, Tag: %s", deleteTag) + return nil, errkit.Wrap(err, "Failed to forget data, could not get snapshotID from tag", "Tag", deleteTag) } deleteIdentifier, err := restic.SnapshotIDFromSnapshotLog(stdout.String()) if err != nil { - return nil, errors.Wrapf(err, "Failed to forget data, could not get snapshotID from tag, Tag: %s", deleteTag) + return nil, errkit.Wrap(err, "Failed to forget data, could not get snapshotID from tag", "Tag", deleteTag) } deleteIdentifiers = append(deleteIdentifiers, deleteIdentifier) } @@ -175,12 +175,12 @@ func deleteDataPodFunc( format.LogWithCtx(ctx, pod.Name, pod.Spec.Containers[0].Name, stdout.String()) format.LogWithCtx(ctx, pod.Name, pod.Spec.Containers[0].Name, stderr.String()) if err != nil { - return nil, errors.Wrapf(err, "Failed to forget data") + return nil, errkit.Wrap(err, "Failed to forget data") } if reclaimSpace { spaceFreedStr, err := pruneData(tp, pod, podCommandExecutor, encryptionKey, targetPaths[i], insecureTLS) if err != nil { - return nil, errors.Wrapf(err, "Error executing prune command") + return nil, errkit.Wrap(err, "Error executing prune command") } spaceFreedTotal += restic.ParseResticSizeStringBytes(spaceFreedStr) } @@ -211,7 +211,7 @@ func pruneData( format.Log(pod.Name, pod.Spec.Containers[0].Name, stderr.String()) spaceFreed := restic.SpaceFreedFromPruneLog(stdout.String()) - return spaceFreed, errors.Wrapf(err, "Failed to prune data after forget") + return spaceFreed, errkit.Wrap(err, "Failed to prune data after forget") } func (d *deleteDataFunc) Exec(ctx context.Context, tp param.TemplateParams, args map[string]interface{}) (map[string]interface{}, error) { @@ -279,7 +279,7 @@ func (d *deleteDataFunc) Exec(ctx context.Context, tp param.TemplateParams, args cli, err := kube.NewClient() if err != nil { - return nil, errors.Wrapf(err, "Failed to create Kubernetes client") + return nil, errkit.Wrap(err, "Failed to create Kubernetes client") } return deleteData( ctx, diff --git a/pkg/function/delete_data_all.go b/pkg/function/delete_data_all.go index 43222c0022..a7df1bb902 100644 --- a/pkg/function/delete_data_all.go +++ b/pkg/function/delete_data_all.go @@ -20,7 +20,7 @@ import ( "fmt" "time" - "github.com/pkg/errors" + "github.com/kanisterio/errkit" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kanister "github.com/kanisterio/kanister/pkg" @@ -122,12 +122,12 @@ func (d *deleteDataAllFunc) Exec(ctx context.Context, tp param.TemplateParams, a } cli, err := kube.NewClient() if err != nil { - return nil, errors.Wrapf(err, "Failed to create Kubernetes client") + return nil, errkit.Wrap(err, "Failed to create Kubernetes client") } input := make(map[string]BackupInfo) err = json.Unmarshal([]byte(backupInfo), &input) if err != nil { - return nil, errors.Wrapf(err, "Could not decode JSON data") + return nil, errkit.Wrap(err, "Could not decode JSON data") } var targetPaths []string var deleteIdentifiers []string diff --git a/pkg/function/delete_data_using_kopia_server.go b/pkg/function/delete_data_using_kopia_server.go index 081817d0f2..ac66ac8a04 100644 --- a/pkg/function/delete_data_using_kopia_server.go +++ b/pkg/function/delete_data_using_kopia_server.go @@ -19,7 +19,7 @@ import ( "context" "time" - "github.com/pkg/errors" + "github.com/kanisterio/errkit" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -136,22 +136,22 @@ func (d *deleteDataUsingKopiaServerFunc) Exec(ctx context.Context, tp param.Temp userPassphrase, cert, err := userCredentialsAndServerTLS(&tp) if err != nil { - return nil, errors.Wrap(err, "Failed to fetch User Credentials/Certificate Data from Template Params") + return nil, errkit.Wrap(err, "Failed to fetch User Credentials/Certificate Data from Template Params") } fingerprint, err := kankopia.ExtractFingerprintFromCertificateJSON(cert) if err != nil { - return nil, errors.Wrap(err, "Failed to fetch Kopia API Server Certificate Secret Data from Certificate") + return nil, errkit.Wrap(err, "Failed to fetch Kopia API Server Certificate Secret Data from Certificate") } hostname, userAccessPassphrase, err := hostNameAndUserPassPhraseFromRepoServer(userPassphrase, userHostname) if err != nil { - return nil, errors.Wrap(err, "Failed to get hostname/user passphrase from Options") + return nil, errkit.Wrap(err, "Failed to get hostname/user passphrase from Options") } cli, err := kube.NewClient() if err != nil { - return nil, errors.Wrap(err, "Failed to create Kubernetes client") + return nil, errkit.Wrap(err, "Failed to create Kubernetes client") } return deleteDataFromServer( @@ -231,7 +231,7 @@ func deleteDataFromServerPodFunc( // Wait for pod to reach running state if err := pc.WaitForPodReady(ctx); err != nil { - return nil, errors.Wrapf(err, "Failed while waiting for Pod %s to be ready", pod.Name) + return nil, errkit.Wrap(err, "Failed while waiting for Pod to be ready", "pod", pod.Name) } contentCacheMB, metadataCacheMB := kopiacmd.GetCacheSizeSettingsForSnapshot() @@ -254,7 +254,7 @@ func deleteDataFromServerPodFunc( commandExecutor, err := pc.GetCommandExecutor() if err != nil { - return nil, errors.Wrap(err, "Unable to get pod command executor") + return nil, errkit.Wrap(err, "Unable to get pod command executor") } var stdout, stderr bytes.Buffer @@ -262,7 +262,7 @@ func deleteDataFromServerPodFunc( format.LogWithCtx(ctx, pod.Name, pod.Spec.Containers[0].Name, stdout.String()) format.LogWithCtx(ctx, pod.Name, pod.Spec.Containers[0].Name, stderr.String()) if err != nil { - return nil, errors.Wrap(err, "Failed to connect to Kopia Repository server") + return nil, errkit.Wrap(err, "Failed to connect to Kopia Repository server") } cmd = kopiacmd.SnapshotDelete( @@ -279,6 +279,6 @@ func deleteDataFromServerPodFunc( err = commandExecutor.Exec(ctx, cmd, nil, &stdout, &stderr) format.LogWithCtx(ctx, pod.Name, pod.Spec.Containers[0].Name, stdout.String()) format.LogWithCtx(ctx, pod.Name, pod.Spec.Containers[0].Name, stderr.String()) - return nil, errors.Wrap(err, "Failed to delete backup from Kopia API server") + return nil, errkit.Wrap(err, "Failed to delete backup from Kopia API server") } } diff --git a/pkg/function/delete_rds_snapshot.go b/pkg/function/delete_rds_snapshot.go index b5aeaca40f..f213a4e4bb 100644 --- a/pkg/function/delete_rds_snapshot.go +++ b/pkg/function/delete_rds_snapshot.go @@ -20,7 +20,7 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" awsrds "github.com/aws/aws-sdk-go/service/rds" - "github.com/pkg/errors" + "github.com/kanisterio/errkit" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kanister "github.com/kanisterio/kanister/pkg" @@ -58,19 +58,19 @@ func (*deleteRDSSnapshotFunc) Name() string { func deleteRDSSnapshot(ctx context.Context, snapshotID string, profile *param.Profile, dbEngine RDSDBEngine) (map[string]interface{}, error) { // Validate profile if err := ValidateProfile(profile); err != nil { - return nil, errors.Wrap(err, "Profile Validation failed") + return nil, errkit.Wrap(err, "Profile Validation failed") } // Get aws config from profile awsConfig, region, err := getAWSConfigFromProfile(ctx, profile) if err != nil { - return nil, errors.Wrap(err, "Failed to get AWS creds from profile") + return nil, errkit.Wrap(err, "Failed to get AWS creds from profile") } // Create rds client rdsCli, err := rds.NewClient(ctx, awsConfig, region) if err != nil { - return nil, errors.Wrap(err, "Failed to create RDS client") + return nil, errkit.Wrap(err, "Failed to create RDS client") } if !isAuroraCluster(string(dbEngine)) { @@ -84,14 +84,14 @@ func deleteRDSSnapshot(ctx context.Context, snapshotID string, profile *param.Pr log.WithContext(ctx).Print("Could not find matching RDS snapshot; might have been deleted previously", field.M{"SnapshotId": snapshotID}) return nil, nil default: - return nil, errors.Wrap(err, "Failed to delete snapshot") + return nil, errkit.Wrap(err, "Failed to delete snapshot") } } } // Wait until snapshot is deleted log.WithContext(ctx).Print("Waiting for RDS snapshot to be deleted", field.M{"SnapshotID": snapshotID}) err = rdsCli.WaitUntilDBSnapshotDeleted(ctx, snapshotID) - return nil, errors.Wrap(err, "Error while waiting for snapshot to be deleted") + return nil, errkit.Wrap(err, "Error while waiting for snapshot to be deleted") } // delete Aurora DB cluster snapshot @@ -104,7 +104,7 @@ func deleteRDSSnapshot(ctx context.Context, snapshotID string, profile *param.Pr log.WithContext(ctx).Print("Could not find matching Aurora DB cluster snapshot; might have been deleted previously", field.M{"SnapshotId": snapshotID}) return nil, nil default: - return nil, errors.Wrap(err, "Error deleting Aurora DB cluster snapshot") + return nil, errkit.Wrap(err, "Error deleting Aurora DB cluster snapshot") } } } @@ -112,7 +112,7 @@ func deleteRDSSnapshot(ctx context.Context, snapshotID string, profile *param.Pr log.WithContext(ctx).Print("Waiting for Aurora DB cluster snapshot to be deleted") err = rdsCli.WaitUntilDBClusterDeleted(ctx, snapshotID) - return nil, errors.Wrap(err, "Error waiting for Aurora DB cluster snapshot to be deleted") + return nil, errkit.Wrap(err, "Error waiting for Aurora DB cluster snapshot to be deleted") } func (d *deleteRDSSnapshotFunc) Exec(ctx context.Context, tp param.TemplateParams, args map[string]interface{}) (map[string]interface{}, error) { diff --git a/pkg/function/delete_volume_snapshot.go b/pkg/function/delete_volume_snapshot.go index 25593c2837..b813082a5b 100644 --- a/pkg/function/delete_volume_snapshot.go +++ b/pkg/function/delete_volume_snapshot.go @@ -20,7 +20,7 @@ import ( "strings" "time" - "github.com/pkg/errors" + "github.com/kanisterio/errkit" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -65,13 +65,13 @@ func deleteVolumeSnapshot(ctx context.Context, cli kubernetes.Interface, namespa PVCData := []VolumeSnapshotInfo{} err := json.Unmarshal([]byte(snapshotinfo), &PVCData) if err != nil { - return nil, errors.Wrapf(err, "Could not decode JSON data") + return nil, errkit.Wrap(err, "Could not decode JSON data") } // providerList required for unit testing providerList := make(map[string]blockstorage.Provider) for _, pvcInfo := range PVCData { if err = ValidateLocationForBlockstorage(profile, pvcInfo.Type); err != nil { - return nil, errors.Wrap(err, "Profile validation failed") + return nil, errkit.Wrap(err, "Profile validation failed") } config := getConfig(profile, pvcInfo.Type) if pvcInfo.Type == blockstorage.TypeEBS { @@ -80,14 +80,14 @@ func deleteVolumeSnapshot(ctx context.Context, cli kubernetes.Interface, namespa provider, err := getter.Get(pvcInfo.Type, config) if err != nil { - return nil, errors.Wrapf(err, "Could not get storage provider") + return nil, errkit.Wrap(err, "Could not get storage provider") } snapshot, err := provider.SnapshotGet(ctx, pvcInfo.SnapshotID) if err != nil { if strings.Contains(err.Error(), blockstorage.SnapshotDoesNotExistError) { log.WithContext(ctx).Print("Snapshot already deleted", field.M{"SnapshotID": pvcInfo.SnapshotID}) } else { - return nil, errors.Wrapf(err, "Failed to get Snapshot from Provider") + return nil, errkit.Wrap(err, "Failed to get Snapshot from Provider") } } if err = provider.SnapshotDelete(ctx, snapshot); err != nil { @@ -106,7 +106,7 @@ func (d *deleteVolumeSnapshotFunc) Exec(ctx context.Context, tp param.TemplatePa cli, err := kube.NewClient() if err != nil { - return nil, errors.Wrapf(err, "Failed to create Kubernetes client") + return nil, errkit.Wrap(err, "Failed to create Kubernetes client") } var namespace, snapshotinfo string if err = Arg(args, DeleteVolumeSnapshotNamespaceArg, &namespace); err != nil { diff --git a/pkg/function/export_rds_snapshot_location.go b/pkg/function/export_rds_snapshot_location.go index caf4861877..a19376bb47 100644 --- a/pkg/function/export_rds_snapshot_location.go +++ b/pkg/function/export_rds_snapshot_location.go @@ -21,7 +21,7 @@ import ( "strings" "time" - "github.com/pkg/errors" + "github.com/kanisterio/errkit" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/rand" "sigs.k8s.io/yaml" @@ -102,18 +102,18 @@ func exportRDSSnapshotToLoc( ) (map[string]interface{}, error) { // Validate profilextractDumpFromDBe if err := ValidateProfile(profile); err != nil { - return nil, errors.Wrap(err, "Profile Validation failed") + return nil, errkit.Wrap(err, "Profile Validation failed") } awsConfig, region, err := getAWSConfigFromProfile(ctx, profile) if err != nil { - return nil, errors.Wrap(err, "Failed to get AWS creds from profile") + return nil, errkit.Wrap(err, "Failed to get AWS creds from profile") } // Create rds client rdsCli, err := rds.NewClient(ctx, awsConfig, region) if err != nil { - return nil, errors.Wrap(err, "Failed to create RDS client") + return nil, errkit.Wrap(err, "Failed to create RDS client") } // Create tmp instance from the snapshot @@ -123,14 +123,14 @@ func exportRDSSnapshotToLoc( if sgIDs == nil { sgIDs, err = findSecurityGroups(ctx, rdsCli, instanceID) if err != nil { - return nil, errors.Wrapf(err, "Failed to fetch security group ids. InstanceID=%s", instanceID) + return nil, errkit.Wrap(err, "Failed to fetch security group ids. InstanceID=", "instanceID=", instanceID) } } log.WithContext(ctx).Print("Spin up temporary RDS instance from the snapshot.", field.M{"SnapshotID": snapshotID, "InstanceID": tmpInstanceID}) // Create tmp instance from snapshot if err := restoreFromSnapshot(ctx, rdsCli, tmpInstanceID, dbSubnetGroup, snapshotID, sgIDs); err != nil { - return nil, errors.Wrapf(err, "Failed to restore snapshot. SnapshotID=%s", snapshotID) + return nil, errkit.Wrap(err, "Failed to restore snapshot. SnapshotID=", "snapshotID=", snapshotID) } defer func() { if err := cleanupRDSDB(ctx, rdsCli, tmpInstanceID); err != nil { @@ -141,7 +141,7 @@ func exportRDSSnapshotToLoc( // Find host of the instance dbEndpoint, err := findRDSEndpoint(ctx, rdsCli, tmpInstanceID) if err != nil { - return nil, errors.Wrapf(err, "Couldn't find endpoint for instance %s", tmpInstanceID) + return nil, errkit.Wrap(err, "Couldn't find endpoint for instance", "instance", tmpInstanceID) } // Create unique backupID @@ -150,7 +150,7 @@ func exportRDSSnapshotToLoc( // get the engine version dbEngineVersion, err := rdsDBEngineVersion(ctx, rdsCli, tmpInstanceID) if err != nil { - return nil, errors.Wrapf(err, "Couldn't find DBInstance Version") + return nil, errkit.Wrap(err, "Couldn't find DBInstance Version") } // Extract dump from DB @@ -172,13 +172,13 @@ func exportRDSSnapshotToLoc( labels, ) if err != nil { - return nil, errors.Wrap(err, "Unable to extract and push db dump to location") + return nil, errkit.Wrap(err, "Unable to extract and push db dump to location") } // Convert to yaml format sgIDYaml, err := yaml.Marshal(sgIDs) if err != nil { - return nil, errors.Wrapf(err, "Failed to create securityGroupID artifact. InstanceID=%s", tmpInstanceID) + return nil, errkit.Wrap(err, "Failed to create securityGroupID artifact. InstanceID=", "instanceID=", tmpInstanceID) } // Add output artifacts @@ -353,14 +353,14 @@ func execDumpCommand( // Create Kubernetes client cli, err := kube.NewClient() if err != nil { - return nil, errors.Wrap(err, "Failed to create Kubernetes client") + return nil, errkit.Wrap(err, "Failed to create Kubernetes client") } // Create cred secret secretName := fmt.Sprintf("%s-%s", "postgres-secret", rand.String(10)) err = createPostgresSecret(cli, secretName, namespace, username, password) if err != nil { - return nil, errors.Wrap(err, "Unable to create postgres secret") + return nil, errkit.Wrap(err, "Unable to create postgres secret") } defer func() { @@ -408,23 +408,23 @@ func prepareCommand( return command, err } } - return nil, errors.New("Invalid RDSDBEngine or RDSAction") + return nil, errkit.New("Invalid RDSDBEngine or RDSAction") } func findDBList(ctx context.Context, dbEndpoint, username, password string) ([]string, error) { pg, err := postgres.NewClient(dbEndpoint, username, password, postgres.DefaultConnectDatabase, "disable") if err != nil { - return nil, errors.Wrap(err, "Error in creating postgres client") + return nil, errkit.Wrap(err, "Error in creating postgres client") } // Test DB connection if err := pg.PingDB(ctx); err != nil { - return nil, errors.Wrap(err, "Failed to ping postgres database") + return nil, errkit.Wrap(err, "Failed to ping postgres database") } dbList, err := pg.ListDatabases(ctx) if err != nil { - return nil, errors.Wrap(err, "Error while listing databases") + return nil, errkit.Wrap(err, "Error while listing databases") } return filterRestrictedDB(dbList), nil } @@ -432,7 +432,7 @@ func findDBList(ctx context.Context, dbEndpoint, username, password string) ([]s //nolint:unparam func postgresBackupCommand(dbEndpoint, username, password string, dbList []string, backupPrefix, backupID string, profile []byte) ([]string, error) { if len(dbList) == 0 { - return nil, errors.New("No database found to backup") + return nil, errkit.New("No database found to backup") } command := []string{ diff --git a/pkg/function/kube_exec_all.go b/pkg/function/kube_exec_all.go index f726e68675..20f9bfd0ef 100644 --- a/pkg/function/kube_exec_all.go +++ b/pkg/function/kube_exec_all.go @@ -19,7 +19,7 @@ import ( "strings" "time" - "github.com/pkg/errors" + "github.com/kanisterio/errkit" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -143,7 +143,7 @@ func execAll(ctx context.Context, cli kubernetes.Interface, namespace string, ps } } if len(errs) != 0 { - return nil, errors.New(strings.Join(errs, "\n")) + return nil, errkit.New(strings.Join(errs, "\n")) } out, err := parseLogAndCreateOutput(output) if err != nil { diff --git a/pkg/function/kube_task.go b/pkg/function/kube_task.go index 0dc6c69cbf..c301359033 100644 --- a/pkg/function/kube_task.go +++ b/pkg/function/kube_task.go @@ -19,7 +19,7 @@ import ( "path" "time" - "github.com/pkg/errors" + "github.com/kanisterio/errkit" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -93,13 +93,13 @@ func kubeTask( func kubeTaskPodFunc() func(ctx context.Context, pc kube.PodController) (map[string]interface{}, error) { return func(ctx context.Context, pc kube.PodController) (map[string]interface{}, error) { if err := pc.WaitForPodReady(ctx); err != nil { - return nil, errors.Wrapf(err, "Failed while waiting for Pod %s to be ready", pc.PodName()) + return nil, errkit.Wrap(err, "Failed while waiting for Pod to be ready", "pod", pc.PodName()) } ctx = field.Context(ctx, consts.LogKindKey, consts.LogKindDatapath) // Fetch logs from the pod r, err := pc.StreamPodLogs(ctx) if err != nil { - return nil, errors.Wrapf(err, "Failed to fetch logs from the pod") + return nil, errkit.Wrap(err, "Failed to fetch logs from the pod") } out, err := output.LogAndParse(ctx, r) if err != nil { @@ -107,7 +107,7 @@ func kubeTaskPodFunc() func(ctx context.Context, pc kube.PodController) (map[str } // Wait for pod completion if err := pc.WaitForPodCompletion(ctx); err != nil { - return nil, errors.Wrapf(err, "Failed while waiting for Pod %s to complete", pc.PodName()) + return nil, errkit.Wrap(err, "Failed while waiting for Pod to complete", "pod", pc.PodName()) } return out, err } @@ -159,7 +159,7 @@ func (ktf *kubeTaskFunc) Exec(ctx context.Context, tp param.TemplateParams, args cli, err := kube.NewClient() if err != nil { - return nil, errors.Wrapf(err, "Failed to create Kubernetes client") + return nil, errkit.Wrap(err, "Failed to create Kubernetes client") } return kubeTask( ctx, diff --git a/pkg/function/kubeops.go b/pkg/function/kubeops.go index bbc82210cb..61dcedbede 100644 --- a/pkg/function/kubeops.go +++ b/pkg/function/kubeops.go @@ -21,7 +21,7 @@ import ( "strings" "time" - "github.com/pkg/errors" + "github.com/kanisterio/errkit" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/dynamic" @@ -109,18 +109,18 @@ func execKubeOperation(ctx context.Context, dynCli dynamic.Interface, op kube.Op switch op { case kube.CreateOperation: if len(spec) == 0 { - return nil, errors.New(fmt.Sprintf("spec cannot be empty for %s operation", kube.CreateOperation)) + return nil, errkit.New(fmt.Sprintf("spec cannot be empty for %s operation", kube.CreateOperation)) } return kubeopsOp.Create(strings.NewReader(spec), namespace) case kube.DeleteOperation: if objRef.Name == "" || objRef.APIVersion == "" || objRef.Resource == "" { - return nil, errors.New(fmt.Sprintf("missing one or more required fields name/namespace/group/apiVersion/resource in objectReference for %s operation", kube.DeleteOperation)) + return nil, errkit.New(fmt.Sprintf("missing one or more required fields name/namespace/group/apiVersion/resource in objectReference for %s operation", kube.DeleteOperation)) } return kubeopsOp.Delete(ctx, objRef, namespace) } - return nil, errors.New(fmt.Sprintf("invalid operation '%s'", op)) + return nil, errkit.New(fmt.Sprintf("invalid operation '%s'", op)) } func (*kubeops) RequiredArgs() []string { diff --git a/pkg/function/location_delete.go b/pkg/function/location_delete.go index fed5eb4ae6..0211bbd806 100644 --- a/pkg/function/location_delete.go +++ b/pkg/function/location_delete.go @@ -19,7 +19,7 @@ import ( "strings" "time" - "github.com/pkg/errors" + "github.com/kanisterio/errkit" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kanister "github.com/kanisterio/kanister/pkg" @@ -62,7 +62,7 @@ func (l *locationDeleteFunc) Exec(ctx context.Context, tp param.TemplateParams, return nil, err } if err = ValidateProfile(tp.Profile); err != nil { - return nil, errors.Wrapf(err, "Failed to validate Profile") + return nil, errkit.Wrap(err, "Failed to validate Profile") } return nil, location.Delete(ctx, *tp.Profile, strings.TrimPrefix(artifact, tp.Profile.Location.Bucket)) } diff --git a/pkg/function/prepare_data.go b/pkg/function/prepare_data.go index e972be4aae..cc1f3b6b05 100644 --- a/pkg/function/prepare_data.go +++ b/pkg/function/prepare_data.go @@ -20,7 +20,7 @@ import ( "io" "time" - "github.com/pkg/errors" + "github.com/kanisterio/errkit" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -72,7 +72,7 @@ func getVolumes(tp param.TemplateParams) (map[string]string, error) { case tp.StatefulSet != nil: podsToPvcs = tp.StatefulSet.PersistentVolumeClaims default: - return nil, errors.New("Failed to get volumes") + return nil, errkit.New("Failed to get volumes") } for _, podToPvcs := range podsToPvcs { for pvc := range podToPvcs { @@ -80,7 +80,7 @@ func getVolumes(tp param.TemplateParams) (map[string]string, error) { } } if len(vols) == 0 { - return nil, errors.New("No volumes found") + return nil, errkit.New("No volumes found") } return vols, nil } @@ -102,7 +102,7 @@ func prepareData( for pvcName, mountPoint := range vols { pvc, err := cli.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvcName, metav1.GetOptions{}) if err != nil { - return nil, errors.Wrapf(err, "Failed to retrieve PVC. Namespace %s, Name %s", namespace, pvcName) + return nil, errkit.Wrap(err, "Failed to retrieve PVC.", "namespace", namespace, "name", pvcName) } validatedVols[pvcName] = kube.VolumeMountOptions{ @@ -137,26 +137,26 @@ func prepareDataPodFunc(cli kubernetes.Interface) func(ctx context.Context, pc k // Wait for pod to reach running state if err := pc.WaitForPodReady(ctx); err != nil { - return nil, errors.Wrapf(err, "Failed while waiting for Pod %s to be ready", pod.Name) + return nil, errkit.Wrap(err, "Failed while waiting for Pod to be ready", "pod", pod.Name) } ctx = field.Context(ctx, consts.LogKindKey, consts.LogKindDatapath) // Fetch logs from the pod r, err := pc.StreamPodLogs(ctx) if err != nil { - return nil, errors.Wrapf(err, "Failed to fetch logs from the pod") + return nil, errkit.Wrap(err, "Failed to fetch logs from the pod") } defer r.Close() //nolint:errcheck bytes, err := io.ReadAll(r) if err != nil { - return nil, errors.Wrapf(err, "Failed to read logs from the pod") + return nil, errkit.Wrap(err, "Failed to read logs from the pod") } logs := string(bytes) format.LogWithCtx(ctx, pod.Name, pod.Spec.Containers[0].Name, logs) out, err := parseLogAndCreateOutput(logs) - return out, errors.Wrap(err, "Failed to parse phase output") + return out, errkit.Wrap(err, "Failed to parse phase output") } } @@ -212,7 +212,7 @@ func (p *prepareDataFunc) Exec(ctx context.Context, tp param.TemplateParams, arg cli, err := kube.NewClient() if err != nil { - return nil, errors.Wrapf(err, "Failed to create Kubernetes client") + return nil, errkit.Wrap(err, "Failed to create Kubernetes client") } if len(vols) == 0 { if vols, err = getVolumes(tp); err != nil { diff --git a/pkg/function/restore_csi_snapshot.go b/pkg/function/restore_csi_snapshot.go index 0b0b683eb2..81331db3cb 100644 --- a/pkg/function/restore_csi_snapshot.go +++ b/pkg/function/restore_csi_snapshot.go @@ -16,10 +16,10 @@ package function import ( "context" - "errors" "fmt" "time" + "github.com/kanisterio/errkit" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -225,7 +225,7 @@ func validateVolumeModeArg(volumeMode corev1.PersistentVolumeMode) error { case corev1.PersistentVolumeFilesystem, corev1.PersistentVolumeBlock: default: - return errors.New("Given volumeMode " + string(volumeMode) + " is invalid") + return errkit.New("Given volumeMode " + string(volumeMode) + " is invalid") } return nil } @@ -237,7 +237,7 @@ func validateVolumeAccessModesArg(accessModes []corev1.PersistentVolumeAccessMod corev1.ReadWriteMany, corev1.ReadWriteOnce: default: - return errors.New("Given accessMode " + string(accessModeInArg) + " is invalid") + return errkit.New("Given accessMode " + string(accessModeInArg) + " is invalid") } } return nil diff --git a/pkg/function/restore_data.go b/pkg/function/restore_data.go index 976ad77d34..3da480fad5 100644 --- a/pkg/function/restore_data.go +++ b/pkg/function/restore_data.go @@ -17,9 +17,10 @@ package function import ( "bytes" "context" + "fmt" "time" - "github.com/pkg/errors" + "github.com/kanisterio/errkit" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -95,7 +96,7 @@ func validateAndGetOptArgs(args map[string]interface{}, tp param.TemplateParams) } if (pod != "") == (len(vols) > 0) { return restorePath, encryptionKey, pod, vols, tag, id, insecureTLS, podOverride, - errors.Errorf("Require one argument: %s or %s", RestoreDataPodArg, RestoreDataVolsArg) + errkit.New(fmt.Sprintf("Require one argument: %s or %s", RestoreDataPodArg, RestoreDataVolsArg)) } if err = OptArg(args, RestoreDataBackupTagArg, &tag, nil); err != nil { return restorePath, encryptionKey, pod, vols, tag, id, insecureTLS, podOverride, err @@ -108,7 +109,7 @@ func validateAndGetOptArgs(args map[string]interface{}, tp param.TemplateParams) } if (tag != "") == (id != "") { return restorePath, encryptionKey, pod, vols, tag, id, insecureTLS, podOverride, - errors.Errorf("Require one argument: %s or %s", RestoreDataBackupTagArg, RestoreDataBackupIdentifierArg) + errkit.New(fmt.Sprintf("Require one argument: %s or %s", RestoreDataBackupTagArg, RestoreDataBackupIdentifierArg)) } podOverride, err = GetPodSpecOverride(tp, args, RestoreDataPodOverrideArg) if err != nil { @@ -141,7 +142,7 @@ func restoreData( for pvcName, mountPoint := range vols { pvc, err := cli.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvcName, metav1.GetOptions{}) if err != nil { - return nil, errors.Wrapf(err, "Failed to retrieve PVC. Namespace %s, Name %s", namespace, pvcName) + return nil, errkit.Wrap(err, "Failed to retrieve PVC.", "namespace", namespace, "name", pvcName) } validatedVols[pvcName] = kube.VolumeMountOptions{ @@ -183,12 +184,12 @@ func restoreDataPodFunc( // Wait for pod to reach running state if err := pc.WaitForPodReady(ctx); err != nil { - return nil, errors.Wrapf(err, "Failed while waiting for Pod %s to be ready", pod.Name) + return nil, errkit.Wrap(err, "Failed while waiting for Pod to be ready", "pod", pod.Name) } remover, err := MaybeWriteProfileCredentials(ctx, pc, tp.Profile) if err != nil { - return nil, errors.Wrapf(err, "Failed to write credentials to Pod %s", pc.PodName()) + return nil, errkit.Wrap(err, "Failed to write credentials to Pod", "pod", pc.PodName()) } // Parent context could already be dead, so removing file within new context @@ -214,10 +215,10 @@ func restoreDataPodFunc( format.LogWithCtx(ctx, pod.Name, pod.Spec.Containers[0].Name, stdout.String()) format.LogWithCtx(ctx, pod.Name, pod.Spec.Containers[0].Name, stderr.String()) if err != nil { - return nil, errors.Wrapf(err, "Failed to restore backup") + return nil, errkit.Wrap(err, "Failed to restore backup") } out, err := parseLogAndCreateOutput(stdout.String()) - return out, errors.Wrap(err, "Failed to parse phase output") + return out, errkit.Wrap(err, "Failed to parse phase output") } } @@ -293,7 +294,7 @@ func (r *restoreDataFunc) Exec(ctx context.Context, tp param.TemplateParams, arg } cli, err := kube.NewClient() if err != nil { - return nil, errors.Wrapf(err, "Failed to create Kubernetes client") + return nil, errkit.Wrap(err, "Failed to create Kubernetes client") } return restoreData( ctx, diff --git a/pkg/function/restore_data_all.go b/pkg/function/restore_data_all.go index e698cd6f80..908c9de8a6 100644 --- a/pkg/function/restore_data_all.go +++ b/pkg/function/restore_data_all.go @@ -21,7 +21,7 @@ import ( "strings" "time" - "github.com/pkg/errors" + "github.com/kanisterio/errkit" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kanister "github.com/kanisterio/kanister/pkg" @@ -102,7 +102,7 @@ func validateAndGetRestoreAllOptArgs(args map[string]interface{}, tp param.Templ case tp.StatefulSet != nil: ps = tp.StatefulSet.Pods default: - return restorePath, encryptionKey, ps, insecureTLS, podOverride, errors.New("Unsupported workload type") + return restorePath, encryptionKey, ps, insecureTLS, podOverride, errkit.New("Unsupported workload type") } } @@ -162,12 +162,12 @@ func (r *restoreDataAllFunc) Exec(ctx context.Context, tp param.TemplateParams, } cli, err := kube.NewClient() if err != nil { - return nil, errors.Wrapf(err, "Failed to create Kubernetes client") + return nil, errkit.Wrap(err, "Failed to create Kubernetes client") } input := make(map[string]BackupInfo) err = json.Unmarshal([]byte(backupInfo), &input) if err != nil { - return nil, errors.Wrapf(err, "Could not decode JSON data") + return nil, errkit.Wrap(err, "Could not decode JSON data") } var chanLen = len(pods) errChan := make(chan error, chanLen) @@ -178,7 +178,7 @@ func (r *restoreDataAllFunc) Exec(ctx context.Context, tp param.TemplateParams, vols, err := FetchPodVolumes(pod, tp) var out map[string]interface{} if err != nil { - errChan <- errors.Wrapf(err, "Failed to get volumes of pod %s", pod) + errChan <- errkit.Wrap(err, "Failed to get volumes of pod", "pod", pod) outputChan <- out return } @@ -200,7 +200,7 @@ func (r *restoreDataAllFunc) Exec(ctx context.Context, tp param.TemplateParams, annotations, labels, ) - errChan <- errors.Wrapf(err, "Failed to restore data for pod %s", pod) + errChan <- errkit.Wrap(err, "Failed to restore data for pod", "pod", pod) outputChan <- out }(pod) } @@ -217,7 +217,7 @@ func (r *restoreDataAllFunc) Exec(ctx context.Context, tp param.TemplateParams, } } if len(errs) != 0 { - return nil, errors.New(strings.Join(errs, "\n")) + return nil, errkit.New(strings.Join(errs, "\n")) } return output, nil } diff --git a/pkg/function/restore_data_using_kopia_server.go b/pkg/function/restore_data_using_kopia_server.go index c1be2960b2..8d9b2853f0 100644 --- a/pkg/function/restore_data_using_kopia_server.go +++ b/pkg/function/restore_data_using_kopia_server.go @@ -20,7 +20,7 @@ import ( "fmt" "time" - "github.com/pkg/errors" + "github.com/kanisterio/errkit" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -145,12 +145,12 @@ func (r *restoreDataUsingKopiaServerFunc) Exec(ctx context.Context, tp param.Tem userPassphrase, cert, err := userCredentialsAndServerTLS(&tp) if err != nil { - return nil, errors.Wrap(err, "Failed to fetch User Credentials/Certificate Data from Template Params") + return nil, errkit.Wrap(err, "Failed to fetch User Credentials/Certificate Data from Template Params") } fingerprint, err := kankopia.ExtractFingerprintFromCertificateJSON(cert) if err != nil { - return nil, errors.Wrap(err, "Failed to fetch Kopia API Server Certificate Secret Data from Certificate") + return nil, errkit.Wrap(err, "Failed to fetch Kopia API Server Certificate Secret Data from Certificate") } // Validate and get optional arguments @@ -168,12 +168,12 @@ func (r *restoreDataUsingKopiaServerFunc) Exec(ctx context.Context, tp param.Tem hostname, userAccessPassphrase, err := hostNameAndUserPassPhraseFromRepoServer(userPassphrase, userHostname) if err != nil { - return nil, errors.Wrap(err, "Failed to get hostname/user passphrase from Options") + return nil, errkit.Wrap(err, "Failed to get hostname/user passphrase from Options") } cli, err := kube.NewClient() if err != nil { - return nil, errors.Wrap(err, "Failed to create Kubernetes client") + return nil, errkit.Wrap(err, "Failed to create Kubernetes client") } _, sparseRestore := tp.Options[SparseRestoreOption] @@ -231,7 +231,7 @@ func restoreDataFromServer( for pvcName, mountPoint := range vols { pvc, err := cli.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvcName, metav1.GetOptions{}) if err != nil { - return nil, errors.Wrapf(err, "Failed to retrieve PVC. Namespace %s, Name %s", namespace, pvcName) + return nil, errkit.Wrap(err, "Failed to retrieve PVC.", "namespace", namespace, "name", pvcName) } validatedVols[pvcName] = kube.VolumeMountOptions{ @@ -283,7 +283,7 @@ func restoreDataFromServerPodFunc( // Wait for pod to reach running state if err := pc.WaitForPodReady(ctx); err != nil { - return nil, errors.Wrapf(err, "Failed while waiting for Pod %s to be ready", pod.Name) + return nil, errkit.Wrap(err, "Failed while waiting for Pod to be ready", "pod", pod.Name) } contentCacheMB, metadataCacheMB := kopiacmd.GetCacheSizeSettingsForSnapshot() @@ -307,7 +307,7 @@ func restoreDataFromServerPodFunc( commandExecutor, err := pc.GetCommandExecutor() if err != nil { - return nil, errors.Wrap(err, "Unable to get pod command executor") + return nil, errkit.Wrap(err, "Unable to get pod command executor") } var stdout, stderr bytes.Buffer @@ -315,7 +315,7 @@ func restoreDataFromServerPodFunc( format.LogWithCtx(ctx, pod.Name, pod.Spec.Containers[0].Name, stdout.String()) format.LogWithCtx(ctx, pod.Name, pod.Spec.Containers[0].Name, stderr.String()) if err != nil { - return nil, errors.Wrapf(err, "Failed to connect to Kopia Repository server") + return nil, errkit.Wrap(err, "Failed to connect to Kopia Repository server") } cmd = kopiacmd.SnapshotRestore( @@ -338,7 +338,7 @@ func restoreDataFromServerPodFunc( format.LogWithCtx(ctx, pod.Name, pod.Spec.Containers[0].Name, stdout.String()) format.LogWithCtx(ctx, pod.Name, pod.Spec.Containers[0].Name, stderr.String()) - return nil, errors.Wrap(err, "Failed to restore backup from Kopia API server") + return nil, errkit.Wrap(err, "Failed to restore backup from Kopia API server") } } @@ -350,11 +350,11 @@ func validateAndGetOptArgsForRestore(tp param.TemplateParams, args map[string]an return pod, vols, podOverride, err } if (pod != "") && (len(vols) > 0) { - return pod, vols, podOverride, errors.New(fmt.Sprintf("Exactly one of the %s or %s arguments are required, but both are provided", RestoreDataPodArg, RestoreDataVolsArg)) + return pod, vols, podOverride, errkit.New(fmt.Sprintf("Exactly one of the %s or %s arguments are required, but both are provided", RestoreDataPodArg, RestoreDataVolsArg)) } podOverride, err = GetPodSpecOverride(tp, args, RestoreDataPodOverrideArg) if err != nil { - return pod, vols, podOverride, errors.Wrap(err, "Failed to get Pod Override Specs") + return pod, vols, podOverride, errkit.Wrap(err, "Failed to get Pod Override Specs") } return pod, vols, podOverride, nil } diff --git a/pkg/function/restore_rds_snapshot.go b/pkg/function/restore_rds_snapshot.go index 3f1c25b828..df5dd16957 100644 --- a/pkg/function/restore_rds_snapshot.go +++ b/pkg/function/restore_rds_snapshot.go @@ -23,7 +23,7 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" rdserr "github.com/aws/aws-sdk-go/service/rds" "github.com/hashicorp/go-version" - "github.com/pkg/errors" + "github.com/kanisterio/errkit" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kanister "github.com/kanisterio/kanister/pkg" @@ -241,18 +241,18 @@ func restoreRDSSnapshot( ) (map[string]interface{}, error) { // Validate profile if err := ValidateProfile(profile); err != nil { - return nil, errors.Wrap(err, "Error validating profile") + return nil, errkit.Wrap(err, "Error validating profile") } awsConfig, region, err := getAWSConfigFromProfile(ctx, profile) if err != nil { - return nil, errors.Wrap(err, "Failed to get AWS creds from profile") + return nil, errkit.Wrap(err, "Failed to get AWS creds from profile") } // Create rds client rdsCli, err := rds.NewClient(ctx, awsConfig, region) if err != nil { - return nil, errors.Wrap(err, "Failed to create RDS client") + return nil, errkit.Wrap(err, "Failed to create RDS client") } // Restore from snapshot @@ -262,7 +262,7 @@ func restoreRDSSnapshot( if sgIDs == nil { sgIDs, err = findSecurityGroupIDs(ctx, rdsCli, instanceID, string(dbEngine)) if err != nil { - return nil, errors.Wrapf(err, "Failed to fetch security group ids. InstanceID=%s", instanceID) + return nil, errkit.Wrap(err, "Failed to fetch security group ids. InstanceID=", "instanceID=", instanceID) } } if !isAuroraCluster(string(dbEngine)) { @@ -274,7 +274,7 @@ func restoreRDSSnapshot( // Restore from dump descOp, err := rdsCli.DescribeDBInstances(ctx, instanceID) if err != nil { - return nil, errors.Wrapf(err, "Failed to describe DB instance. InstanceID=%s", instanceID) + return nil, errkit.Wrap(err, "Failed to describe DB instance. InstanceID=", "instanceID=", instanceID) } dbEndpoint := *descOp.DBInstances[0].Endpoint.Address @@ -282,7 +282,7 @@ func restoreRDSSnapshot( // get the engine version dbEngineVersion, err := rdsDBEngineVersion(ctx, rdsCli, instanceID) if err != nil { - return nil, errors.Wrapf(err, "Couldn't find DBInstance Version") + return nil, errkit.Wrap(err, "Couldn't find DBInstance Version") } if _, err = execDumpCommand( @@ -302,7 +302,7 @@ func restoreRDSSnapshot( annotations, labels, ); err != nil { - return nil, errors.Wrapf(err, "Failed to restore RDS from dump. InstanceID=%s", instanceID) + return nil, errkit.Wrap(err, "Failed to restore RDS from dump. InstanceID=", "instanceID=", instanceID) } return map[string]interface{}{ @@ -323,12 +323,12 @@ func postgresRestoreCommand(pgHost, username, password string, backupArtifactPre // check if PostgresDB version < 13 v1, err := version.NewVersion(dbEngineVersion) if err != nil { - return nil, errors.Wrapf(err, "Couldn't find DBInstance Version") + return nil, errkit.Wrap(err, "Couldn't find DBInstance Version") } // Add Constraints constraints, err := version.NewConstraint("< " + RDSPostgresDBInstanceEngineVersion) if err != nil { - return nil, errors.Wrapf(err, "Couldn't add constraint to DBInstance Version") + return nil, errkit.Wrap(err, "Couldn't add constraint to DBInstance Version") } // Verify Constraints if constraints.Check(v1) { @@ -362,20 +362,20 @@ func restoreFromSnapshot(ctx context.Context, rdsCli *rds.RDS, instanceID, subne log.WithContext(ctx).Print("Waiting for RDS DB instance to be deleted.", field.M{"instanceID": instanceID}) // Wait for the instance to be deleted if err := rdsCli.WaitUntilDBInstanceDeleted(ctx, instanceID); err != nil { - return errors.Wrapf(err, "Error while waiting RDS DB instance to be deleted") + return errkit.Wrap(err, "Error while waiting RDS DB instance to be deleted") } } log.WithContext(ctx).Print("Restoring RDS DB instance from snapshot.", field.M{"instanceID": instanceID, "snapshotID": snapshotID}) // Restore from snapshot if _, err := rdsCli.RestoreDBInstanceFromDBSnapshot(ctx, instanceID, subnetGroup, snapshotID, securityGrpIDs); err != nil { - return errors.Wrapf(err, "Error restoring RDS DB instance from snapshot") + return errkit.Wrap(err, "Error restoring RDS DB instance from snapshot") } // Wait for instance to be ready log.WithContext(ctx).Print("Waiting for RDS DB instance database to be ready.", field.M{"instanceID": instanceID}) err := rdsCli.WaitUntilDBInstanceAvailable(ctx, instanceID) - return errors.Wrap(err, "Error while waiting for new rds instance to be ready.") + return errkit.Wrap(err, "Error while waiting for new rds instance to be ready.") } func restoreAuroraFromSnapshot(ctx context.Context, rdsCli *rds.RDS, instanceID, subnetGroup, snapshotID, dbEngine string, securityGroupIDs []string) error { @@ -398,20 +398,20 @@ func restoreAuroraFromSnapshot(ctx context.Context, rdsCli *rds.RDS, instanceID, version, err := engineVersion(ctx, rdsCli, snapshotID) if err != nil { - return errors.Wrap(err, "Error getting the engine version before restore") + return errkit.Wrap(err, "Error getting the engine version before restore") } log.WithContext(ctx).Print("Restoring RDS Aurora DB Cluster from snapshot.", field.M{"instanceID": instanceID, "snapshotID": snapshotID}) op, err := rdsCli.RestoreDBClusterFromDBSnapshot(ctx, instanceID, subnetGroup, snapshotID, dbEngine, version, securityGroupIDs) if err != nil { - return errors.Wrap(err, "Error restorig aurora db cluster from snapshot") + return errkit.Wrap(err, "Error restorig aurora db cluster from snapshot") } // From docs: Above action only restores the DB cluster, not the DB instances for that DB cluster // wait for db cluster to be available log.WithContext(ctx).Print("Waiting for db cluster to be available") if err := rdsCli.WaitUntilDBClusterAvailable(ctx, *op.DBCluster.DBClusterIdentifier); err != nil { - return errors.Wrap(err, "Error waiting for DBCluster to be available") + return errkit.Wrap(err, "Error waiting for DBCluster to be available") } log.WithContext(ctx).Print("Creating DB instance in the cluster") @@ -430,12 +430,12 @@ func restoreAuroraFromSnapshot(ctx context.Context, rdsCli *rds.RDS, instanceID, subnetGroup, ) if err != nil { - return errors.Wrap(err, "Error while creating Aurora DB instance in the cluster.") + return errkit.Wrap(err, "Error while creating Aurora DB instance in the cluster.") } // wait for instance to be up and running log.WithContext(ctx).Print("Waiting for RDS Aurora instance to be ready.", field.M{"instanceID": instanceID}) if err = rdsCli.WaitUntilDBInstanceAvailable(ctx, *dbInsOp.DBInstance.DBInstanceIdentifier); err != nil { - return errors.Wrap(err, "Error while waiting for new RDS Aurora instance to be ready.") + return errkit.Wrap(err, "Error while waiting for new RDS Aurora instance to be ready.") } return nil } @@ -451,7 +451,7 @@ func DeleteAuroraDBCluster(ctx context.Context, rdsCli *rds.RDS, descOp *rdserr. } else { log.WithContext(ctx).Print("Waiting for RDS Aurora cluster instance to be deleted", field.M{"instance": k}) if err := rdsCli.WaitUntilDBInstanceDeleted(ctx, *member.DBInstanceIdentifier); err != nil { - return errors.Wrapf(err, "Error while waiting for RDS Aurora DB instance to be deleted") + return errkit.Wrap(err, "Error while waiting for RDS Aurora DB instance to be deleted") } } } @@ -466,7 +466,7 @@ func DeleteAuroraDBCluster(ctx context.Context, rdsCli *rds.RDS, descOp *rdserr. } else { log.WithContext(ctx).Print("Waiting for RDS Aurora cluster to be deleted.", field.M{"instanceID": instanceID}) if err := rdsCli.WaitUntilDBClusterDeleted(ctx, instanceID); err != nil { - return errors.Wrapf(err, "Error while waiting RDS Aurora DB cluster to be deleted") + return errkit.Wrap(err, "Error while waiting RDS Aurora DB cluster to be deleted") } } return nil diff --git a/pkg/function/scale_workload.go b/pkg/function/scale_workload.go index 5f8db1a7d6..419c056786 100644 --- a/pkg/function/scale_workload.go +++ b/pkg/function/scale_workload.go @@ -16,12 +16,13 @@ package function import ( "context" + "fmt" "strconv" "strings" "time" + "github.com/kanisterio/errkit" osversioned "github.com/openshift/client-go/apps/clientset/versioned" - "github.com/pkg/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -78,11 +79,11 @@ func (s *scaleWorkloadFunc) Exec(ctx context.Context, tp param.TemplateParams, a cfg, err := kube.LoadConfig() if err != nil { - return nil, errors.Wrapf(err, "Failed to load Kubernetes config") + return nil, errkit.Wrap(err, "Failed to load Kubernetes config") } cli, err := kubernetes.NewForConfig(cfg) if err != nil { - return nil, errors.Wrapf(err, "Failed to create Kubernetes client") + return nil, errkit.Wrap(err, "Failed to create Kubernetes client") } switch strings.ToLower(s.kind) { case param.StatefulSetKind: @@ -104,7 +105,7 @@ func (s *scaleWorkloadFunc) Exec(ctx context.Context, tp param.TemplateParams, a case param.DeploymentConfigKind: osCli, err := osversioned.NewForConfig(cfg) if err != nil { - return nil, errors.Wrapf(err, "Failed to create OpenShift client") + return nil, errkit.Wrap(err, "Failed to create OpenShift client") } count, err := kube.DeploymentConfigReplicas(ctx, osCli, s.namespace, s.name) if err != nil { @@ -114,7 +115,7 @@ func (s *scaleWorkloadFunc) Exec(ctx context.Context, tp param.TemplateParams, a outputArtifactOriginalReplicaCount: count, }, kube.ScaleDeploymentConfig(ctx, cli, osCli, s.namespace, s.name, s.replicas, s.waitForReady) } - return nil, errors.New("Workload type not supported " + s.kind) + return nil, errkit.New("Workload type not supported " + s.kind) } func (*scaleWorkloadFunc) RequiredArgs() []string { @@ -168,11 +169,11 @@ func (s *scaleWorkloadFunc) setArgs(tp param.TemplateParams, args map[string]int case string: var v int if v, err = strconv.Atoi(val); err != nil { - return errors.Wrapf(err, "Cannot convert %s to int ", val) + return errkit.Wrap(err, fmt.Sprintf("Cannot convert %s to int", val)) } replicas = int32(v) default: - return errors.Errorf("Invalid arg type %T for Arg %s ", rep, ScaleWorkloadReplicas) + return errkit.New(fmt.Sprintf("Invalid arg type %T for Arg %s ", rep, ScaleWorkloadReplicas)) } // Populate default values for optional arguments from template parameters switch { @@ -190,7 +191,7 @@ func (s *scaleWorkloadFunc) setArgs(tp param.TemplateParams, args map[string]int namespace = tp.DeploymentConfig.Namespace default: if !ArgExists(args, ScaleWorkloadNamespaceArg) || !ArgExists(args, ScaleWorkloadNameArg) || !ArgExists(args, ScaleWorkloadKindArg) { - return errors.New("Workload information not available via defaults or namespace/name/kind parameters") + return errkit.New("Workload information not available via defaults or namespace/name/kind parameters") } } diff --git a/pkg/function/utils.go b/pkg/function/utils.go index 611bec8e7b..2d9ae1f7ec 100644 --- a/pkg/function/utils.go +++ b/pkg/function/utils.go @@ -11,7 +11,6 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" rdserr "github.com/aws/aws-sdk-go/service/rds" "github.com/kanisterio/errkit" - "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/validation" @@ -45,48 +44,48 @@ const ( // ValidateCredentials verifies if the given credentials have appropriate values set func ValidateCredentials(creds *param.Credential) error { if creds == nil { - return errors.New("Empty credentials") + return errkit.New("Empty credentials") } switch creds.Type { case param.CredentialTypeKeyPair: if creds.KeyPair == nil { - return errors.New("Empty KeyPair field") + return errkit.New("Empty KeyPair field") } if len(creds.KeyPair.ID) == 0 { - return errors.New("Access key ID is not set") + return errkit.New("Access key ID is not set") } if len(creds.KeyPair.Secret) == 0 { - return errors.New("Secret access key is not set") + return errkit.New("Secret access key is not set") } return nil case param.CredentialTypeSecret: return secrets.ValidateCredentials(creds.Secret) case param.CredentialTypeKopia: if creds.KopiaServerSecret == nil { - return errors.New("Empty KopiaServerSecret field") + return errkit.New("Empty KopiaServerSecret field") } if len(creds.KopiaServerSecret.Username) == 0 { - return errors.New("Kopia Username is not set") + return errkit.New("Kopia Username is not set") } if len(creds.KopiaServerSecret.Password) == 0 { - return errors.New("Kopia UserPassphrase is not set") + return errkit.New("Kopia UserPassphrase is not set") } if len(creds.KopiaServerSecret.Hostname) == 0 { - return errors.New("Kopia Hostname is not set") + return errkit.New("Kopia Hostname is not set") } if len(creds.KopiaServerSecret.Cert) == 0 { - return errors.New("Kopia TLSCert is not set") + return errkit.New("Kopia TLSCert is not set") } return nil default: - return errors.Errorf("Unsupported type '%s' for credentials", creds.Type) + return errkit.New(fmt.Sprintf("Unsupported type '%s' for credentials", creds.Type)) } } // ValidateProfile verifies if the given profile has valid creds and location type func ValidateProfile(profile *param.Profile) error { if profile == nil { - return errors.New("Profile must be non-nil") + return errkit.New("Profile must be non-nil") } if err := ValidateCredentials(&profile.Credential); err != nil { return err @@ -97,7 +96,7 @@ func ValidateProfile(profile *param.Profile) error { case crv1alpha1.LocationTypeAzure: case crv1alpha1.LocationTypeKopia: default: - return errors.New("Location type not supported") + return errkit.New("Location type not supported") } return nil } @@ -120,12 +119,12 @@ func MaybeWriteProfileCredentials(ctx context.Context, pc kube.PodController, pr if profile.Location.Type == crv1alpha1.LocationTypeGCS { pfw, err := pc.GetFileWriter() if err != nil { - return nil, errors.Wrap(err, "Unable to write Google credentials") + return nil, errkit.Wrap(err, "Unable to write Google credentials") } remover, err := pfw.Write(ctx, consts.GoogleCloudCredsFilePath, bytes.NewBufferString(profile.Credential.KeyPair.Secret)) if err != nil { - return nil, errors.Wrap(err, "Unable to write Google credentials") + return nil, errkit.Wrap(err, "Unable to write Google credentials") } return remover, nil @@ -162,14 +161,14 @@ func FetchPodVolumes(pod string, tp param.TemplateParams) (map[string]string, er if pvcToMountPath, ok := tp.Deployment.PersistentVolumeClaims[pod]; ok { return pvcToMountPath, nil } - return nil, errors.New("Failed to find volumes for the Pod: " + pod) + return nil, errkit.New("Failed to find volumes for the Pod: " + pod) case tp.StatefulSet != nil: if pvcToMountPath, ok := tp.StatefulSet.PersistentVolumeClaims[pod]; ok { return pvcToMountPath, nil } - return nil, errors.New("Failed to find volumes for the Pod: " + pod) + return nil, errkit.New("Failed to find volumes for the Pod: " + pod) default: - return nil, errors.New("Invalid Template Params") + return nil, errkit.New("Invalid Template Params") } } @@ -238,7 +237,7 @@ func findRDSEndpoint(ctx context.Context, rdsCli *rds.RDS, instanceID string) (s } if (len(dbInstance.DBInstances) == 0) || (dbInstance.DBInstances[0].Endpoint == nil) { - return "", errors.Errorf("Received nil endpoint") + return "", errkit.New("Received nil endpoint") } return *dbInstance.DBInstances[0].Endpoint.Address, nil } @@ -251,7 +250,7 @@ func rdsDBEngineVersion(ctx context.Context, rdsCli *rds.RDS, instanceID string) } if (len(dbInstance.DBInstances) == 0) || (dbInstance.DBInstances[0].EngineVersion == nil) { - return "", errors.Errorf("DB Instance's Engine version is nil") + return "", errkit.New("DB Instance's Engine version is nil") } return *dbInstance.DBInstances[0].EngineVersion, nil @@ -290,7 +289,7 @@ func GetRDSDBSubnetGroup(ctx context.Context, rdsCli *rds.RDS, instanceID string return nil, err } if len(result.DBInstances) == 0 { - return nil, errors.Errorf("Could not get DBInstance with the instanceID %s", instanceID) + return nil, errkit.New(fmt.Sprintf("Could not get DBInstance with the instanceID %s", instanceID)) } return result.DBInstances[0].DBSubnetGroup.DBSubnetGroupName, nil } @@ -306,7 +305,7 @@ func GetRDSAuroraDBSubnetGroup(ctx context.Context, rdsCli *rds.RDS, instanceID } } if len(desc.DBClusters) == 0 { - return nil, errors.Errorf("Could not get DBCluster with the instanceID %s", instanceID) + return nil, errkit.New(fmt.Sprintf("Could not get DBCluster with the instanceID %s", instanceID)) } return desc.DBClusters[0].DBSubnetGroup, nil } @@ -378,11 +377,11 @@ func PodAnnotationsFromFunctionArgs(args map[string]any) (map[string]string, err func ValidateLabels(labels map[string]string) error { for k, v := range labels { if errs := validation.IsQualifiedName(k); len(errs) > 0 { - return errors.New(fmt.Sprintf("label key '%s' failed validation. %s", k, errs)) + return errkit.New(fmt.Sprintf("label key '%s' failed validation. %s", k, errs)) } if errs := validation.IsValidLabelValue(v); len(errs) > 0 { - return errors.New(fmt.Sprintf("label value '%s' failed validation. %s", v, errs)) + return errkit.New(fmt.Sprintf("label value '%s' failed validation. %s", v, errs)) } } return nil @@ -391,7 +390,7 @@ func ValidateLabels(labels map[string]string) error { func ValidateAnnotations(annotations map[string]string) error { for k := range annotations { if errs := validation.IsQualifiedName(k); len(errs) > 0 { - return errors.New(fmt.Sprintf("annotation key '%s' failed validation. %s", k, errs)) + return errkit.New(fmt.Sprintf("annotation key '%s' failed validation. %s", k, errs)) } } // annotation values don't actually have a strict format diff --git a/pkg/function/wait.go b/pkg/function/wait.go index 2b0eae290a..f0f6ae5e8c 100644 --- a/pkg/function/wait.go +++ b/pkg/function/wait.go @@ -22,7 +22,7 @@ import ( "text/template" "time" - "github.com/pkg/errors" + "github.com/kanisterio/errkit" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -100,7 +100,7 @@ func (w *waitFunc) Exec(ctx context.Context, tp param.TemplateParams, args map[s } timeoutDur, err := time.ParseDuration(timeout) if err != nil { - return nil, errors.Wrap(err, "Failed to parse timeout") + return nil, errkit.Wrap(err, "Failed to parse timeout") } err = waitForCondition(ctx, dynCli, conditions, timeoutDur, tp, evaluateWaitCondition) return nil, err @@ -175,9 +175,9 @@ func waitForCondition( } return result, nil }) - err = errors.Wrap(err, "Failed to wait for the condition to be met") + err = errkit.Wrap(err, "Failed to wait for the condition to be met") if evalErr != nil { - return errors.Wrap(err, evalErr.Error()) + return errkit.Wrap(err, evalErr.Error()) } return err } @@ -200,11 +200,11 @@ func evaluateWaitCondition(ctx context.Context, dynCli dynamic.Interface, cond C log.Debug().Print(fmt.Sprintf("Resolved jsonpath: %s", rcondition)) t, err := template.New("config").Option("missingkey=zero").Funcs(ksprig.TxtFuncMap()).Parse(rcondition) if err != nil { - return false, errors.WithStack(err) + return false, errkit.WithStack(err) } buf := bytes.NewBuffer(nil) if err = t.Execute(buf, nil); err != nil { - return false, errors.WithStack(err) + return false, errkit.WithStack(err) } log.Debug().Print(fmt.Sprintf("Condition evaluation result: %s", strings.TrimSpace(buf.String()))) return strings.TrimSpace(buf.String()) == "true", nil diff --git a/pkg/function/wait_for_snapshot_completion.go b/pkg/function/wait_for_snapshot_completion.go index 766f645e9d..7117f60830 100644 --- a/pkg/function/wait_for_snapshot_completion.go +++ b/pkg/function/wait_for_snapshot_completion.go @@ -19,7 +19,7 @@ import ( "encoding/json" "time" - "github.com/pkg/errors" + "github.com/kanisterio/errkit" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kanister "github.com/kanisterio/kanister/pkg" @@ -94,12 +94,12 @@ func waitForSnapshotsCompletion(ctx context.Context, snapshotinfo string, profil PVCData := []VolumeSnapshotInfo{} err := json.Unmarshal([]byte(snapshotinfo), &PVCData) if err != nil { - return errors.Wrapf(err, "Could not decode JSON data") + return errkit.Wrap(err, "Could not decode JSON data") } for _, pvcInfo := range PVCData { if err = ValidateLocationForBlockstorage(profile, pvcInfo.Type); err != nil { - return errors.Wrap(err, "Profile validation failed") + return errkit.Wrap(err, "Profile validation failed") } config := getConfig(profile, pvcInfo.Type) if pvcInfo.Type == blockstorage.TypeEBS { @@ -108,14 +108,14 @@ func waitForSnapshotsCompletion(ctx context.Context, snapshotinfo string, profil provider, err := getter.Get(pvcInfo.Type, config) if err != nil { - return errors.Wrapf(err, "Could not get storage provider %v", pvcInfo.Type) + return errkit.Wrap(err, "Could not get storage provider", "provider", pvcInfo.Type) } snapshot, err := provider.SnapshotGet(ctx, pvcInfo.SnapshotID) if err != nil { - return errors.Wrapf(err, "Failed to get Snapshot from Provider") + return errkit.Wrap(err, "Failed to get Snapshot from Provider") } if err = provider.SnapshotCreateWaitForCompletion(ctx, snapshot); err != nil { - return errors.Wrap(err, "Snapshot creation did not complete "+snapshot.ID) + return errkit.Wrap(err, "Snapshot creation did not complete "+snapshot.ID) } } return nil diff --git a/pkg/function/waitv2.go b/pkg/function/waitv2.go index 2479bb574e..2c65ad25b8 100644 --- a/pkg/function/waitv2.go +++ b/pkg/function/waitv2.go @@ -20,7 +20,7 @@ import ( "strings" "time" - "github.com/pkg/errors" + "github.com/kanisterio/errkit" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/cli-runtime/pkg/printers" @@ -78,7 +78,7 @@ func (w *waitV2Func) Exec(ctx context.Context, tp param.TemplateParams, args map } timeoutDur, err := time.ParseDuration(timeout) if err != nil { - return nil, errors.Wrap(err, "Failed to parse timeout") + return nil, errkit.Wrap(err, "Failed to parse timeout") } err = waitForCondition(ctx, dynCli, conditions, timeoutDur, tp, evaluateWaitV2Condition) return nil, err