diff --git a/artifactory/commands/buildinfo/adddependencies.go b/artifactory/commands/buildinfo/adddependencies.go index 1754a546c..c4ce550a4 100644 --- a/artifactory/commands/buildinfo/adddependencies.go +++ b/artifactory/commands/buildinfo/adddependencies.go @@ -262,7 +262,7 @@ func collectPatternMatchingFiles(addDepsParams *specutils.CommonParams, rootPath return nil, err } - paths, err := fspatterns.ListFiles(rootPath, addDepsParams.IsRecursive(), addDepsParams.IsIncludeDirs(), true, excludePathPattern) + paths, err := fspatterns.ListFiles(rootPath, addDepsParams.IsRecursive(), addDepsParams.IsIncludeDirs(), false, true, excludePathPattern) if err != nil { return nil, err } diff --git a/artifactory/commands/transferconfig/transferconfig.go b/artifactory/commands/transferconfig/transferconfig.go index 796e99c86..ddc6ce01c 100644 --- a/artifactory/commands/transferconfig/transferconfig.go +++ b/artifactory/commands/transferconfig/transferconfig.go @@ -14,6 +14,7 @@ import ( "github.com/jfrog/jfrog-cli-core/v2/artifactory/commands/generic" "github.com/jfrog/jfrog-cli-core/v2/artifactory/commands/transferconfig/configxmlutils" commandsUtils "github.com/jfrog/jfrog-cli-core/v2/artifactory/commands/utils" + "github.com/jfrog/jfrog-cli-core/v2/artifactory/commands/utils/precheckrunner" "github.com/jfrog/jfrog-cli-core/v2/artifactory/utils" "github.com/jfrog/jfrog-cli-core/v2/common/commands" "github.com/jfrog/jfrog-cli-core/v2/utils/config" @@ -212,7 +213,7 @@ func (tcc *TransferConfigCommand) runPreChecks() error { return err } - return tcc.NewPreChecksRunner(remoteRepositories).Run(context.Background(), tcc.TargetServerDetails) + return tcc.NewPreChecksRunner(selectedRepos, remoteRepositories).Run(context.Background(), tcc.TargetServerDetails) } func (tcc *TransferConfigCommand) printWarnings() (err error) { @@ -296,16 +297,17 @@ func (tcc *TransferConfigCommand) verifyConfigImportPlugin() error { } // Creates the Pre-checks runner for the config import command -func (tcc *TransferConfigCommand) NewPreChecksRunner(remoteRepositories []interface{}) (runner *commandsUtils.PreCheckRunner) { - runner = commandsUtils.NewPreChecksRunner() +func (tcc *TransferConfigCommand) NewPreChecksRunner(selectedRepos map[utils.RepoType][]services.RepositoryDetails, remoteRepositories []interface{}) (runner *precheckrunner.PreCheckRunner) { + runner = precheckrunner.NewPreChecksRunner() // Add pre-checks here - runner.AddCheck(commandsUtils.NewRemoteRepositoryCheck(&tcc.TargetArtifactoryManager, remoteRepositories)) + runner.AddCheck(precheckrunner.NewRepositoryNamingCheck(selectedRepos)) + runner.AddCheck(precheckrunner.NewRemoteRepositoryCheck(&tcc.TargetArtifactoryManager, remoteRepositories)) return } -func (tcc *TransferConfigCommand) getEncryptedItems(selectedSourceRepos map[utils.RepoType][]string) (configXml string, remoteRepositories []interface{}, err error) { +func (tcc *TransferConfigCommand) getEncryptedItems(selectedSourceRepos map[utils.RepoType][]services.RepositoryDetails) (configXml string, remoteRepositories []interface{}, err error) { reactivateKeyEncryption, err := tcc.DeactivateKeyEncryption() if err != nil { return "", nil, err @@ -324,10 +326,10 @@ func (tcc *TransferConfigCommand) getEncryptedItems(selectedSourceRepos map[util } // Get all remote repositories from the source Artifactory server. - if remoteRepositoryNames, ok := selectedSourceRepos[utils.Remote]; ok && len(remoteRepositoryNames) > 0 { - remoteRepositories = make([]interface{}, len(remoteRepositoryNames)) - for i, repoName := range remoteRepositoryNames { - if err = tcc.SourceArtifactoryManager.GetRepository(repoName, &remoteRepositories[i]); err != nil { + if remoteRepositoriesDetails, ok := selectedSourceRepos[utils.Remote]; ok && len(remoteRepositoriesDetails) > 0 { + remoteRepositories = make([]interface{}, len(remoteRepositoriesDetails)) + for i, remoteRepositoryDetails := range remoteRepositoriesDetails { + if err = tcc.SourceArtifactoryManager.GetRepository(remoteRepositoryDetails.Key, &remoteRepositories[i]); err != nil { return } } diff --git a/artifactory/commands/transferconfig/transferconfig_test.go b/artifactory/commands/transferconfig/transferconfig_test.go index 443183196..6c229f860 100644 --- a/artifactory/commands/transferconfig/transferconfig_test.go +++ b/artifactory/commands/transferconfig/transferconfig_test.go @@ -95,7 +95,7 @@ func TestGetConfigXml(t *testing.T) { // Test get config xml transferConfigCmd := createTransferConfigCommand(t, serverDetails, nil) - configXml, _, err := transferConfigCmd.getEncryptedItems(make(map[utils.RepoType][]string)) + configXml, _, err := transferConfigCmd.getEncryptedItems(make(map[utils.RepoType][]services.RepositoryDetails)) assert.NoError(t, err) assert.Equal(t, "", configXml) } diff --git a/artifactory/commands/transferconfigmerge/transferconfigmerge.go b/artifactory/commands/transferconfigmerge/transferconfigmerge.go index fa3171c8c..ea1c0d273 100644 --- a/artifactory/commands/transferconfigmerge/transferconfigmerge.go +++ b/artifactory/commands/transferconfigmerge/transferconfigmerge.go @@ -53,7 +53,7 @@ func (tcmc *TransferConfigMergeCommand) SetExcludeProjectsPatterns(excludeProjec type mergeEntities struct { projectsToTransfer []accessServices.Project - reposToTransfer map[utils.RepoType][]string + reposToTransfer map[utils.RepoType][]services.RepositoryDetails } type Conflict struct { @@ -243,8 +243,8 @@ func compareProjects(sourceProject, targetProject accessServices.Project) (*Conf }, nil } -func (tcmc *TransferConfigMergeCommand) mergeRepositories(conflicts *[]Conflict) (reposToTransfer map[utils.RepoType][]string, err error) { - reposToTransfer = make(map[utils.RepoType][]string) +func (tcmc *TransferConfigMergeCommand) mergeRepositories(conflicts *[]Conflict) (reposToTransfer map[utils.RepoType][]services.RepositoryDetails, err error) { + reposToTransfer = make(map[utils.RepoType][]services.RepositoryDetails) sourceRepos, err := tcmc.SourceArtifactoryManager.GetAllRepositories() if err != nil { return @@ -286,7 +286,7 @@ func (tcmc *TransferConfigMergeCommand) mergeRepositories(conflicts *[]Conflict) } } else { repoType := utils.RepoTypeFromString(sourceRepo.Type) - reposToTransfer[repoType] = append(reposToTransfer[repoType], sourceRepo.Key) + reposToTransfer[repoType] = append(reposToTransfer[repoType], sourceRepo) } } return @@ -351,7 +351,7 @@ func (tcmc *TransferConfigMergeCommand) transferProjectsToTarget(reposToTransfer return } -func (tcmc *TransferConfigMergeCommand) decryptAndGetAllRemoteRepositories(remoteRepositoryNames []string) (remoteRepositories []interface{}, err error) { +func (tcmc *TransferConfigMergeCommand) decryptAndGetAllRemoteRepositories(remoteRepositoriesDetails []services.RepositoryDetails) (remoteRepositories []interface{}, err error) { // Decrypt source Artifactory to get remote repositories with raw text passwords reactivateKeyEncryption, err := tcmc.DeactivateKeyEncryption() if err != nil { @@ -362,7 +362,11 @@ func (tcmc *TransferConfigMergeCommand) decryptAndGetAllRemoteRepositories(remot err = reactivationErr } }() - return tcmc.GetAllRemoteRepositories(remoteRepositoryNames) + var remoteRepositoryKeys []string + for _, remoteRepositoryDetails := range remoteRepositoriesDetails { + remoteRepositoryKeys = append(remoteRepositoryKeys, remoteRepositoryDetails.Key) + } + return tcmc.GetAllRemoteRepositories(remoteRepositoryKeys) } type projectsMapper struct { diff --git a/artifactory/commands/transferfiles/delayedartifactshandler.go b/artifactory/commands/transferfiles/delayedartifactshandler.go index 8e04b740a..a5a20cfc9 100644 --- a/artifactory/commands/transferfiles/delayedartifactshandler.go +++ b/artifactory/commands/transferfiles/delayedartifactshandler.go @@ -89,19 +89,25 @@ type DelayedArtifactsFile struct { } // Collect all the delayed artifact files that were created up to this point for the repository and transfer their artifacts using handleDelayedArtifactsFiles -func consumeAllDelayFiles(base phaseBase) error { +func consumeAllDelayFiles(base phaseBase) (err error) { filesToConsume, err := getDelayFiles([]string{base.repoKey}) - if err != nil { - return err + if err != nil || len(filesToConsume) == 0 { + return } delayFunctions := getDelayUploadComparisonFunctions(base.repoSummary.PackageType) - if len(filesToConsume) > 0 && len(delayFunctions) > 0 { - log.Info("Starting to handle delayed artifacts uploads...") - if err = handleDelayedArtifactsFiles(filesToConsume, base, delayFunctions[1:]); err == nil { - log.Info("Done handling delayed artifacts uploads.") - } + if len(delayFunctions) == 0 { + return + } + + log.Info("Starting to handle delayed artifacts uploads...") + // Each delay function causes the transfer to skip a specific group of files. + // Within the handleDelayedArtifactsFiles function, we recursively remove the first delay function from the slice to transfer the first set of files every time. + if err = handleDelayedArtifactsFiles(filesToConsume, base, delayFunctions[1:]); err != nil { + return } - return err + + log.Info("Done handling delayed artifacts uploads.") + return deleteAllFiles(filesToConsume) } // Call consumeAllDelayFiles only if there are no failed transferred files for the repository up to this point. @@ -182,13 +188,6 @@ func consumeDelayedArtifactsFiles(pcWrapper *producerConsumerWrapper, filesToCon if err = base.stateManager.ChangeDelayedFilesCountBy(uint64(len(delayedArtifactsFile.DelayedArtifacts)), false); err != nil { log.Warn("Couldn't decrease the delayed files counter", err.Error()) } - - // Remove the file, so it won't be consumed again. - if err = os.Remove(filePath); err != nil { - return errorutils.CheckError(err) - } - - log.Debug("Done handling delayed artifacts file: '" + filePath + "'. Deleting it...") } return nil } diff --git a/artifactory/commands/transferfiles/fileserror.go b/artifactory/commands/transferfiles/fileserror.go index 0a74b5e75..47f0b867a 100644 --- a/artifactory/commands/transferfiles/fileserror.go +++ b/artifactory/commands/transferfiles/fileserror.go @@ -3,13 +3,11 @@ package transferfiles import ( "errors" "fmt" - "os" "strings" "time" "github.com/jfrog/gofrog/parallel" "github.com/jfrog/jfrog-cli-core/v2/artifactory/commands/transferfiles/api" - "github.com/jfrog/jfrog-client-go/utils/errorutils" "github.com/jfrog/jfrog-client-go/utils/log" ) @@ -33,7 +31,8 @@ func (e *errorsRetryPhase) run() error { // Does so by creating and uploading by chunks, and polling on status. // Consumed errors files are deleted, new failures are written to new files. func (e *errorsRetryPhase) handlePreviousUploadFailures() error { - if len(e.errorsFilesToHandle) == 0 { + errorsFilesToHandle := e.errorsFilesToHandle + if len(errorsFilesToHandle) == 0 { return nil } log.Info("Starting to handle previous upload failures...") @@ -46,11 +45,13 @@ func (e *errorsRetryPhase) handlePreviousUploadFailures() error { delayAction := func(phase phaseBase, addedDelayFiles []string) error { return consumeAllDelayFiles(phase) } - err := e.transferManager.doTransferWithProducerConsumer(action, delayAction) - if err == nil { - log.Info("Done handling previous upload failures.") + + if err := e.transferManager.doTransferWithProducerConsumer(action, delayAction); err != nil { + return err } - return err + + log.Info("Done handling previous upload failures.") + return deleteAllFiles(errorsFilesToHandle) } func convertUploadStatusToFileRepresentation(statuses []ExtendedFileUploadStatusResponse) (files []api.FileRepresentation) { @@ -83,24 +84,13 @@ func (e *errorsRetryPhase) handleErrorsFile(errFilePath string, pcWrapper *produ } // Upload - shouldStop, err := uploadByChunks(convertUploadStatusToFileRepresentation(failedFiles.Errors), uploadChunkChan, e.phaseBase, delayHelper, errorsChannelMng, pcWrapper) - if err != nil || shouldStop { - return err - } - - // Remove the file, so it won't be consumed again. - err = os.Remove(errFilePath) - if err != nil { - return errorutils.CheckError(err) - } - - log.Debug("Done handling errors file: '", errFilePath, "'. Deleting it...") - return nil + _, err = uploadByChunks(convertUploadStatusToFileRepresentation(failedFiles.Errors), uploadChunkChan, e.phaseBase, delayHelper, errorsChannelMng, pcWrapper) + return err } func (e *errorsRetryPhase) createErrorFilesHandleFunc(pcWrapper *producerConsumerWrapper, uploadChunkChan chan UploadedChunk, delayHelper delayUploadHelper, errorsChannelMng *ErrorsChannelMng) errorFileHandlerFunc { return func() parallel.TaskFunc { - return func(threadId int) error { + return func(int) error { var errList []string var err error for _, errFile := range e.errorsFilesToHandle { diff --git a/artifactory/commands/transferfiles/fulltransfer.go b/artifactory/commands/transferfiles/fulltransfer.go index 1f16ddb4a..eb7247bd1 100644 --- a/artifactory/commands/transferfiles/fulltransfer.go +++ b/artifactory/commands/transferfiles/fulltransfer.go @@ -96,8 +96,15 @@ func (m *fullTransferPhase) run() error { if ShouldStop(&m.phaseBase, &delayHelper, errorsChannelMng) { return nil } - folderHandler := m.createFolderFullTransferHandlerFunc(*pcWrapper, uploadChunkChan, delayHelper, errorsChannelMng) - _, err := pcWrapper.chunkBuilderProducerConsumer.AddTaskWithError(folderHandler(folderParams{relativePath: "."}), pcWrapper.errorsQueue.AddError) + + // Get the directory's node from the snapshot manager, and use information from previous transfer attempts if such exists. + node, done, err := m.getAndHandleDirectoryNode(".") + if err != nil || done { + return err + } + + folderHandler := m.createFolderFullTransferHandlerFunc(node, *pcWrapper, uploadChunkChan, delayHelper, errorsChannelMng) + _, err = pcWrapper.chunkBuilderProducerConsumer.AddTaskWithError(folderHandler(folderParams{relativePath: "."}), pcWrapper.errorsQueue.AddError) return err } delayAction := func(phase phaseBase, addedDelayFiles []string) error { @@ -117,17 +124,17 @@ type folderParams struct { relativePath string } -func (m *fullTransferPhase) createFolderFullTransferHandlerFunc(pcWrapper producerConsumerWrapper, uploadChunkChan chan UploadedChunk, +func (m *fullTransferPhase) createFolderFullTransferHandlerFunc(node *reposnapshot.Node, pcWrapper producerConsumerWrapper, uploadChunkChan chan UploadedChunk, delayHelper delayUploadHelper, errorsChannelMng *ErrorsChannelMng) folderFullTransferHandlerFunc { return func(params folderParams) parallel.TaskFunc { return func(threadId int) error { logMsgPrefix := clientUtils.GetLogMsgPrefix(threadId, false) - return m.transferFolder(params, logMsgPrefix, pcWrapper, uploadChunkChan, delayHelper, errorsChannelMng) + return m.transferFolder(node, params, logMsgPrefix, pcWrapper, uploadChunkChan, delayHelper, errorsChannelMng) } } } -func (m *fullTransferPhase) transferFolder(params folderParams, logMsgPrefix string, pcWrapper producerConsumerWrapper, +func (m *fullTransferPhase) transferFolder(node *reposnapshot.Node, params folderParams, logMsgPrefix string, pcWrapper producerConsumerWrapper, uploadChunkChan chan UploadedChunk, delayHelper delayUploadHelper, errorsChannelMng *ErrorsChannelMng) (err error) { log.Debug(logMsgPrefix+"Handling folder:", path.Join(m.repoKey, params.relativePath)) @@ -139,12 +146,6 @@ func (m *fullTransferPhase) transferFolder(params folderParams, logMsgPrefix str return } - // Get the directory's node from the snapshot manager, and use information from previous transfer attempts if such exist. - node, done, err := m.getAndHandleDirectoryNode(params, logMsgPrefix) - if err != nil || done { - return err - } - curUploadChunk, err := m.searchAndHandleFolderContents(params, pcWrapper, uploadChunkChan, delayHelper, errorsChannelMng, node) if err != nil { @@ -227,7 +228,13 @@ func (m *fullTransferPhase) handleFoundChildFolder(params folderParams, pcWrappe item servicesUtils.ResultItem) (err error) { newRelativePath := getFolderRelativePath(item.Name, params.relativePath) - folderHandler := m.createFolderFullTransferHandlerFunc(pcWrapper, uploadChunkChan, delayHelper, errorsChannelMng) + // Get the directory's node from the snapshot manager, and use information from previous transfer attempts if such exists. + node, done, err := m.getAndHandleDirectoryNode(newRelativePath) + if err != nil || done { + return err + } + + folderHandler := m.createFolderFullTransferHandlerFunc(node, pcWrapper, uploadChunkChan, delayHelper, errorsChannelMng) _, err = pcWrapper.chunkBuilderProducerConsumer.AddTaskWithError(folderHandler(folderParams{relativePath: newRelativePath}), pcWrapper.errorsQueue.AddError) return } @@ -242,7 +249,7 @@ func (m *fullTransferPhase) handleFoundFile(pcWrapper producerConsumerWrapper, return } // Increment the files count in the directory's node in the snapshot manager, to track its progress. - err = node.IncrementFilesCount() + err = node.IncrementFilesCount(uint64(file.Size)) if err != nil { return } @@ -289,15 +296,14 @@ func generateFolderContentAqlQuery(repoKey, relativePath string, paginationOffse // node - A node in the repository snapshot tree, which represents the current directory. // completed - Whether handling the node directory was completed. If it wasn't fully transferred, we start exploring and transferring it from scratch. // previousChildren - If the directory requires exploring, previously known children will be added from this map in order to preserve their states and references. -func (m *fullTransferPhase) getAndHandleDirectoryNode(params folderParams, logMsgPrefix string) (node *reposnapshot.Node, completed bool, err error) { - node, err = m.stateManager.LookUpNode(params.relativePath) +func (m *fullTransferPhase) getAndHandleDirectoryNode(relativePath string) (node *reposnapshot.Node, completed bool, err error) { + node, err = m.stateManager.LookUpNode(relativePath) if err != nil { return } // If data was not loaded from snapshot, we know that the node is visited for the first time and was not explored. - loadedFromSnapshot, err := m.stateManager.WasSnapshotLoaded() - if err != nil || !loadedFromSnapshot { + if !m.stateManager.WasSnapshotLoaded() { return } @@ -306,7 +312,7 @@ func (m *fullTransferPhase) getAndHandleDirectoryNode(params folderParams, logMs return } if completed { - log.Debug(logMsgPrefix+"Skipping completed folder:", path.Join(m.repoKey, params.relativePath)) + log.Debug("Skipping completed folder:", path.Join(m.repoKey, relativePath)) return } // If the node was not completed, we will start exploring it from the beginning. diff --git a/artifactory/commands/transferfiles/longpropertycheck.go b/artifactory/commands/transferfiles/longpropertycheck.go index 6b067bf40..d3135b08c 100644 --- a/artifactory/commands/transferfiles/longpropertycheck.go +++ b/artifactory/commands/transferfiles/longpropertycheck.go @@ -9,6 +9,7 @@ import ( "github.com/jfrog/gofrog/parallel" "github.com/jfrog/jfrog-cli-core/v2/artifactory/commands/transferfiles/api" cmdutils "github.com/jfrog/jfrog-cli-core/v2/artifactory/commands/utils" + "github.com/jfrog/jfrog-cli-core/v2/artifactory/commands/utils/precheckrunner" "github.com/jfrog/jfrog-cli-core/v2/utils/coreutils" "github.com/jfrog/jfrog-cli-core/v2/utils/progressbar" "github.com/jfrog/jfrog-client-go/artifactory" @@ -67,7 +68,7 @@ func (lpc *LongPropertyCheck) Name() string { return longPropertyCheckName } -func (lpc *LongPropertyCheck) ExecuteCheck(args cmdutils.RunArguments) (passed bool, err error) { +func (lpc *LongPropertyCheck) ExecuteCheck(args precheckrunner.RunArguments) (passed bool, err error) { // Init producer consumer lpc.producerConsumer = parallel.NewRunner(threadCount, maxThreadCapacity, false) lpc.filesChan = make(chan FileWithLongProperty, threadCount) @@ -111,7 +112,7 @@ func (lpc *LongPropertyCheck) ExecuteCheck(args cmdutils.RunArguments) (passed b // Search for long properties in the server and create a search task to find the files that contains them // Returns the number of long properties found -func (lpc *LongPropertyCheck) longPropertiesTaskProducer(progress *progressbar.TasksProgressBar, args cmdutils.RunArguments) int { +func (lpc *LongPropertyCheck) longPropertiesTaskProducer(progress *progressbar.TasksProgressBar, args precheckrunner.RunArguments) int { // Init serviceManager, err := createTransferServiceManager(args.Context, args.ServerDetails) if err != nil { @@ -173,7 +174,7 @@ func getSearchAllPropertiesQuery(pageNumber int) string { // Create a task that fetch from the server the files with the given property. // We keep only the files that are at the requested repos and pass them at the files channel -func createSearchPropertyTask(property Property, repos []string, args cmdutils.RunArguments, filesChan chan FileWithLongProperty, progress *progressbar.TasksProgressBar) parallel.TaskFunc { +func createSearchPropertyTask(property Property, repos []string, args precheckrunner.RunArguments, filesChan chan FileWithLongProperty, progress *progressbar.TasksProgressBar) parallel.TaskFunc { return func(threadId int) (err error) { serviceManager, err := createTransferServiceManager(args.Context, args.ServerDetails) if err != nil { diff --git a/artifactory/commands/transferfiles/longpropertycheck_test.go b/artifactory/commands/transferfiles/longpropertycheck_test.go index 9acb698b0..0519dbbca 100644 --- a/artifactory/commands/transferfiles/longpropertycheck_test.go +++ b/artifactory/commands/transferfiles/longpropertycheck_test.go @@ -2,21 +2,22 @@ package transferfiles import ( "encoding/json" + "io" + "net/http" + "net/http/httptest" + "strings" + "sync" + "testing" + "github.com/jfrog/gofrog/parallel" "github.com/jfrog/jfrog-cli-core/v2/artifactory/commands/transferfiles/api" - "github.com/jfrog/jfrog-cli-core/v2/artifactory/commands/utils" + "github.com/jfrog/jfrog-cli-core/v2/artifactory/commands/utils/precheckrunner" commonTests "github.com/jfrog/jfrog-cli-core/v2/common/tests" "github.com/jfrog/jfrog-cli-core/v2/utils/config" "github.com/jfrog/jfrog-client-go/artifactory" servicesUtils "github.com/jfrog/jfrog-client-go/artifactory/services/utils" clientutils "github.com/jfrog/jfrog-client-go/utils" "github.com/stretchr/testify/assert" - "io" - "net/http" - "net/http/httptest" - "strings" - "sync" - "testing" ) var ( @@ -82,7 +83,7 @@ func testGetLongProperties(t *testing.T, serverProperties, expectedLongPropertie longPropertyCheck := NewLongPropertyCheck([]string{}) longPropertyCheck.filesChan = make(chan FileWithLongProperty, threadCount) - count := longPropertyCheck.longPropertiesTaskProducer(nil, utils.RunArguments{Context: nil, ServerDetails: serverDetails}) + count := longPropertyCheck.longPropertiesTaskProducer(nil, precheckrunner.RunArguments{Context: nil, ServerDetails: serverDetails}) assert.Len(t, expectedLongProperties, count) } @@ -119,7 +120,7 @@ func testSearchPropertyInFilesTask(t *testing.T, prop Property, specificRepos [] wait.Done() }() - task := createSearchPropertyTask(prop, specificRepos, utils.RunArguments{Context: nil, ServerDetails: serverDetails}, filesChan, nil) + task := createSearchPropertyTask(prop, specificRepos, precheckrunner.RunArguments{Context: nil, ServerDetails: serverDetails}, filesChan, nil) assert.NoError(t, task(0)) close(filesChan) wait.Wait() @@ -170,7 +171,7 @@ func testSearchPropertiesInFiles(t *testing.T, properties []Property, specificRe waitCollection.Done() }() - longPropertyCheck.longPropertiesTaskProducer(nil, utils.RunArguments{Context: nil, ServerDetails: serverDetails}) + longPropertyCheck.longPropertiesTaskProducer(nil, precheckrunner.RunArguments{Context: nil, ServerDetails: serverDetails}) longPropertyCheck.producerConsumer.Done() longPropertyCheck.producerConsumer.Run() close(longPropertyCheck.filesChan) @@ -201,7 +202,7 @@ func testLongPropertyCheckWithStubServer(t *testing.T, serverProperties []Proper defer testServer.Close() longPropertyCheck := NewLongPropertyCheck(specificRepos) - passed, err := longPropertyCheck.ExecuteCheck(utils.RunArguments{Context: nil, ServerDetails: serverDetails}) + passed, err := longPropertyCheck.ExecuteCheck(precheckrunner.RunArguments{Context: nil, ServerDetails: serverDetails}) assert.NoError(t, err) assert.Equal(t, shouldPass, passed) } diff --git a/artifactory/commands/transferfiles/state/state_test.go b/artifactory/commands/transferfiles/state/state_test.go index 8ddc16f3d..4497d8ff6 100644 --- a/artifactory/commands/transferfiles/state/state_test.go +++ b/artifactory/commands/transferfiles/state/state_test.go @@ -152,7 +152,7 @@ func assertGetTransferStateAndSnapshot(t *testing.T, reset bool, expectedTransfe func getRootAndAddSnapshotData(t *testing.T, stateManager *TransferStateManager) (root *reposnapshot.Node) { root, err := stateManager.LookUpNode(".") assert.NoError(t, err) - assert.NoError(t, root.IncrementFilesCount()) + assert.NoError(t, root.IncrementFilesCount(10)) assert.NoError(t, root.AddChildNode("child", nil)) return } diff --git a/artifactory/commands/transferfiles/state/statemanager.go b/artifactory/commands/transferfiles/state/statemanager.go index 65a3467a2..303d5001f 100644 --- a/artifactory/commands/transferfiles/state/statemanager.go +++ b/artifactory/commands/transferfiles/state/statemanager.go @@ -10,6 +10,7 @@ import ( "github.com/jfrog/jfrog-cli-core/v2/utils/lock" "github.com/jfrog/jfrog-client-go/utils/errorutils" "github.com/jfrog/jfrog-client-go/utils/io/fileutils" + "github.com/jfrog/jfrog-client-go/utils/log" ) // The interval in which to save the state and run transfer files to the file system. @@ -67,6 +68,8 @@ func (ts *TransferStateManager) UnlockTransferStateManager() error { // buildInfoRepo - True if build info repository // reset - Delete the repository's previous transfer info func (ts *TransferStateManager) SetRepoState(repoKey string, totalSizeBytes, totalFiles int64, buildInfoRepo, reset bool) error { + var transferredFiles uint32 = 0 + var transferredSizeBytes uint64 = 0 err := ts.Action(func(*TransferState) error { transferState, repoTransferSnapshot, err := getTransferStateAndSnapshot(repoKey, reset) if err != nil { @@ -75,6 +78,17 @@ func (ts *TransferStateManager) SetRepoState(repoKey string, totalSizeBytes, tot transferState.CurrentRepo.Phase1Info.TotalSizeBytes = totalSizeBytes transferState.CurrentRepo.Phase1Info.TotalUnits = totalFiles + if repoTransferSnapshot != nil && repoTransferSnapshot.loadedFromSnapshot { + transferredFiles, transferredSizeBytes, err = repoTransferSnapshot.snapshotManager.CalculateTransferredFilesAndSize() + if err != nil { + return err + } + log.Info("Calculated transferred files from previous run:", transferredFiles) + log.Info("Calculated transferred bytes from previous run:", transferredSizeBytes) + transferState.CurrentRepo.Phase1Info.TransferredUnits = int64(transferredFiles) + transferState.CurrentRepo.Phase1Info.TransferredSizeBytes = int64(transferredSizeBytes) + } + ts.TransferState = transferState ts.repoTransferSnapshot = repoTransferSnapshot return nil @@ -87,8 +101,8 @@ func (ts *TransferStateManager) SetRepoState(repoKey string, totalSizeBytes, tot transferRunStatus.BuildInfoRepo = buildInfoRepo transferRunStatus.VisitedFolders = 0 - transferRunStatus.OverallTransfer.TransferredUnits += ts.CurrentRepo.Phase1Info.TransferredUnits - transferRunStatus.OverallTransfer.TransferredSizeBytes += ts.CurrentRepo.Phase1Info.TransferredSizeBytes + transferRunStatus.OverallTransfer.TransferredUnits += int64(transferredFiles) + transferRunStatus.OverallTransfer.TransferredSizeBytes += int64(transferredSizeBytes) return nil }) } diff --git a/artifactory/commands/transferfiles/state/transfersnapshot.go b/artifactory/commands/transferfiles/state/transfersnapshot.go index d51b551a1..cb1a66a85 100644 --- a/artifactory/commands/transferfiles/state/transfersnapshot.go +++ b/artifactory/commands/transferfiles/state/transfersnapshot.go @@ -61,12 +61,8 @@ func (ts *TransferStateManager) LookUpNode(relativePath string) (requestedNode * return } -func (ts *TransferStateManager) WasSnapshotLoaded() (wasLoaded bool, err error) { - err = ts.snapshotAction(func(rts *RepoTransferSnapshot) error { - wasLoaded = rts.loadedFromSnapshot - return nil - }) - return +func (ts *TransferStateManager) WasSnapshotLoaded() bool { + return ts.repoTransferSnapshot.loadedFromSnapshot } func (ts *TransferStateManager) GetDirectorySnapshotNodeWithLru(relativePath string) (node *reposnapshot.Node, err error) { diff --git a/artifactory/commands/transferfiles/transfer.go b/artifactory/commands/transferfiles/transfer.go index 740f036ae..5c50505d5 100644 --- a/artifactory/commands/transferfiles/transfer.go +++ b/artifactory/commands/transferfiles/transfer.go @@ -13,7 +13,7 @@ import ( "time" "github.com/jfrog/jfrog-cli-core/v2/artifactory/commands/transferfiles/state" - commandsUtils "github.com/jfrog/jfrog-cli-core/v2/artifactory/commands/utils" + "github.com/jfrog/jfrog-cli-core/v2/artifactory/commands/utils/precheckrunner" "github.com/jfrog/jfrog-cli-core/v2/artifactory/utils" "github.com/jfrog/jfrog-cli-core/v2/utils/config" "github.com/jfrog/jfrog-cli-core/v2/utils/coreutils" @@ -292,7 +292,7 @@ func (tdc *TransferFilesCommand) initStorageInfoManagers() error { } // Creates the Pre-checks runner for the data transfer command -func (tdc *TransferFilesCommand) NewTransferDataPreChecksRunner() (runner *commandsUtils.PreCheckRunner, err error) { +func (tdc *TransferFilesCommand) NewTransferDataPreChecksRunner() (runner *precheckrunner.PreCheckRunner, err error) { // Get relevant repos serviceManager, err := createTransferServiceManager(tdc.context, tdc.sourceServerDetails) if err != nil { @@ -307,7 +307,7 @@ func (tdc *TransferFilesCommand) NewTransferDataPreChecksRunner() (runner *comma return } - runner = commandsUtils.NewPreChecksRunner() + runner = precheckrunner.NewPreChecksRunner() // Add pre checks here runner.AddCheck(NewLongPropertyCheck(append(localRepos, federatedRepos...))) diff --git a/artifactory/commands/transferfiles/utils.go b/artifactory/commands/transferfiles/utils.go index 14ebd3380..e41f50d3a 100644 --- a/artifactory/commands/transferfiles/utils.go +++ b/artifactory/commands/transferfiles/utils.go @@ -735,3 +735,11 @@ func getUniqueErrorOrDelayFilePath(dirPath string, getFileNamePrefix func() stri } return } + +func deleteAllFiles(filesToDelete []string) (err error) { + for _, fileToDelete := range filesToDelete { + log.Debug("Deleting:", fileToDelete, "...") + err = errors.Join(err, errorutils.CheckError(os.Remove(fileToDelete))) + } + return +} diff --git a/artifactory/commands/utils/checkrunner.go b/artifactory/commands/utils/precheckrunner/checkrunner.go similarity index 99% rename from artifactory/commands/utils/checkrunner.go rename to artifactory/commands/utils/precheckrunner/checkrunner.go index 263a0a01a..eca8a5e2e 100644 --- a/artifactory/commands/utils/checkrunner.go +++ b/artifactory/commands/utils/precheckrunner/checkrunner.go @@ -1,4 +1,4 @@ -package utils +package precheckrunner import ( "context" diff --git a/artifactory/commands/utils/checkrunner_test.go b/artifactory/commands/utils/precheckrunner/checkrunner_test.go similarity index 98% rename from artifactory/commands/utils/checkrunner_test.go rename to artifactory/commands/utils/precheckrunner/checkrunner_test.go index 22533f1cf..476f3eea6 100644 --- a/artifactory/commands/utils/checkrunner_test.go +++ b/artifactory/commands/utils/precheckrunner/checkrunner_test.go @@ -1,4 +1,4 @@ -package utils +package precheckrunner import ( "context" diff --git a/artifactory/commands/utils/remoteurlchecker.go b/artifactory/commands/utils/precheckrunner/remoteurlchecker.go similarity index 94% rename from artifactory/commands/utils/remoteurlchecker.go rename to artifactory/commands/utils/precheckrunner/remoteurlchecker.go index f4f3da31f..173ad52f5 100644 --- a/artifactory/commands/utils/remoteurlchecker.go +++ b/artifactory/commands/utils/precheckrunner/remoteurlchecker.go @@ -1,4 +1,4 @@ -package utils +package precheckrunner import ( "encoding/json" @@ -6,6 +6,7 @@ import ( "net/http" "time" + "github.com/jfrog/jfrog-cli-core/v2/artifactory/commands/utils" "github.com/jfrog/jfrog-cli-core/v2/utils/coreutils" "github.com/jfrog/jfrog-cli-core/v2/utils/progressbar" "github.com/jfrog/jfrog-client-go/artifactory" @@ -114,7 +115,7 @@ func (rrc *RemoteRepositoryCheck) doCheckRemoteRepositories(args RunArguments, r } // Create rtDetails - rtDetails, err := CreateArtifactoryClientDetails(*rrc.targetServicesManager) + rtDetails, err := utils.CreateArtifactoryClientDetails(*rrc.targetServicesManager) if err != nil { return nil, err } @@ -145,7 +146,7 @@ func (rrc *RemoteRepositoryCheck) startCheckRemoteRepositories(rtDetails *httput LogMsgPrefix: "[Config import]", ExecutionHandler: func() (shouldRetry bool, err error) { // Start the remote repositories check process - resp, responseBody, err := (*rrc.targetServicesManager).Client().SendPost(artifactoryUrl+PluginsExecuteRestApi+"remoteRepositoriesCheck", requestBody, rtDetails) + resp, responseBody, err := (*rrc.targetServicesManager).Client().SendPost(artifactoryUrl+utils.PluginsExecuteRestApi+"remoteRepositoriesCheck", requestBody, rtDetails) if err != nil { return false, err } @@ -190,7 +191,7 @@ func (rrc *RemoteRepositoryCheck) waitForRemoteReposCheckCompletion(rtDetails *h func (rrc *RemoteRepositoryCheck) createImportPollingAction(rtDetails *httputils.HttpClientDetails, artifactoryUrl string, progressBar *progressbar.TasksProgressBar) httputils.PollingAction { return func() (shouldStop bool, responseBody []byte, err error) { // Get config import status - resp, body, _, err := (*rrc.targetServicesManager).Client().SendGet(artifactoryUrl+PluginsExecuteRestApi+"remoteRepositoriesCheckStatus", true, rtDetails) + resp, body, _, err := (*rrc.targetServicesManager).Client().SendGet(artifactoryUrl+utils.PluginsExecuteRestApi+"remoteRepositoriesCheckStatus", true, rtDetails) if err != nil { return true, nil, err } @@ -227,7 +228,7 @@ func unmarshalRemoteUrlResponse(body []byte) (*remoteUrlResponse, error) { // Create csv summary of all the files with inaccessible remote repositories and log the result func handleFailureRun(inaccessibleRepositories []inaccessibleRepository) (err error) { // Create summary - csvPath, err := CreateCSVFile("inaccessible-repositories", inaccessibleRepositories, time.Now()) + csvPath, err := utils.CreateCSVFile("inaccessible-repositories", inaccessibleRepositories, time.Now()) if err != nil { log.Error("Couldn't create the inaccessible remote repository URLs CSV file", err) return diff --git a/artifactory/commands/utils/remoteurlchecker_test.go b/artifactory/commands/utils/precheckrunner/remoteurlchecker_test.go similarity index 98% rename from artifactory/commands/utils/remoteurlchecker_test.go rename to artifactory/commands/utils/precheckrunner/remoteurlchecker_test.go index b90a0d202..a2edde628 100644 --- a/artifactory/commands/utils/remoteurlchecker_test.go +++ b/artifactory/commands/utils/precheckrunner/remoteurlchecker_test.go @@ -1,4 +1,4 @@ -package utils +package precheckrunner import ( "encoding/json" diff --git a/artifactory/commands/utils/precheckrunner/repositorynamingchecker.go b/artifactory/commands/utils/precheckrunner/repositorynamingchecker.go new file mode 100644 index 000000000..42c9bb927 --- /dev/null +++ b/artifactory/commands/utils/precheckrunner/repositorynamingchecker.go @@ -0,0 +1,74 @@ +package precheckrunner + +import ( + "fmt" + "strings" + "time" + + commandUtils "github.com/jfrog/jfrog-cli-core/v2/artifactory/commands/utils" + + "github.com/jfrog/jfrog-cli-core/v2/artifactory/utils" + "github.com/jfrog/jfrog-client-go/artifactory/services" + "github.com/jfrog/jfrog-client-go/utils/log" +) + +const ( + repositoryNamingCheckName = "Repositories naming" + illegalDockerRepositoryKeyReason = "Docker repository keys in JFrog Cloud are not allowed to include '.' or '_' characters." +) + +type illegalRepositoryKeys struct { + RepoKey string `json:"repo_key,omitempty"` + Reason string `json:"reason,omitempty"` +} + +// Run repository naming check before transferring configuration from one Artifactory to another +type RepositoryNamingCheck struct { + selectedRepos map[utils.RepoType][]services.RepositoryDetails +} + +func NewRepositoryNamingCheck(selectedRepos map[utils.RepoType][]services.RepositoryDetails) *RepositoryNamingCheck { + return &RepositoryNamingCheck{selectedRepos} +} + +func (drc *RepositoryNamingCheck) Name() string { + return repositoryNamingCheckName +} + +func (drc *RepositoryNamingCheck) ExecuteCheck(args RunArguments) (passed bool, err error) { + results := drc.getIllegalRepositoryKeys() + if len(results) == 0 { + return true, nil + } + + return false, handleFailuresInRepositoryKeysRun(results) +} + +func (drc *RepositoryNamingCheck) getIllegalRepositoryKeys() []illegalRepositoryKeys { + var results []illegalRepositoryKeys + for _, repositoriesOfType := range drc.selectedRepos { + for _, repository := range repositoriesOfType { + if strings.ToLower(repository.PackageType) == "docker" && strings.ContainsAny(repository.Key, "_.") { + log.Debug("Found Docker repository with illegal characters:", repository.Key) + results = append(results, illegalRepositoryKeys{ + RepoKey: repository.Key, + Reason: illegalDockerRepositoryKeyReason, + }) + } + } + } + return results +} + +// Create CSV summary of all the files with illegal repository keys and log the result +func handleFailuresInRepositoryKeysRun(illegalDockerRepositoryKeys []illegalRepositoryKeys) (err error) { + // Create summary + csvPath, err := commandUtils.CreateCSVFile("illegal-repository-keys", illegalDockerRepositoryKeys, time.Now()) + if err != nil { + log.Error("Couldn't create the illegal repository keys CSV file", err) + return + } + // Log result + log.Info(fmt.Sprintf("Found %d illegal repository keys. Check the summary CSV file in: %s", len(illegalDockerRepositoryKeys), csvPath)) + return +} diff --git a/artifactory/commands/utils/precheckrunner/repositorynamingchecker_test.go b/artifactory/commands/utils/precheckrunner/repositorynamingchecker_test.go new file mode 100644 index 000000000..a45394ecf --- /dev/null +++ b/artifactory/commands/utils/precheckrunner/repositorynamingchecker_test.go @@ -0,0 +1,26 @@ +package precheckrunner + +import ( + "testing" + + "github.com/jfrog/jfrog-cli-core/v2/artifactory/utils" + "github.com/jfrog/jfrog-client-go/artifactory/services" + "github.com/stretchr/testify/assert" +) + +func TestGetIllegalDockerRepositoryKeys(t *testing.T) { + repositoryNamingCheck := RepositoryNamingCheck{ + selectedRepos: map[utils.RepoType][]services.RepositoryDetails{utils.Local: { + {Key: "a.b-docker", PackageType: "docker"}, + {Key: "a.b-generic", PackageType: "generic"}, + {Key: "a_b-docker", PackageType: "docker"}, + {Key: "ab-docker", PackageType: "docker"}, + {Key: "ab-generic", PackageType: "generic"}, + }}, + } + actualIllegalRepositories := repositoryNamingCheck.getIllegalRepositoryKeys() + assert.ElementsMatch(t, []illegalRepositoryKeys{ + {RepoKey: "a.b-docker", Reason: illegalDockerRepositoryKeyReason}, + {RepoKey: "a_b-docker", Reason: illegalDockerRepositoryKeyReason}, + }, actualIllegalRepositories) +} diff --git a/artifactory/commands/utils/testdata/remoteurlchecker/nuget_repo.json b/artifactory/commands/utils/precheckrunner/testdata/remoteurlchecker/nuget_repo.json similarity index 100% rename from artifactory/commands/utils/testdata/remoteurlchecker/nuget_repo.json rename to artifactory/commands/utils/precheckrunner/testdata/remoteurlchecker/nuget_repo.json diff --git a/artifactory/commands/utils/transferconfigbase.go b/artifactory/commands/utils/transferconfigbase.go index 6f88adda6..7bef0a76e 100644 --- a/artifactory/commands/utils/transferconfigbase.go +++ b/artifactory/commands/utils/transferconfigbase.go @@ -12,6 +12,7 @@ import ( "github.com/jfrog/jfrog-cli-core/v2/utils/coreutils" "github.com/jfrog/jfrog-client-go/access" "github.com/jfrog/jfrog-client-go/artifactory" + "github.com/jfrog/jfrog-client-go/artifactory/services" clientUtils "github.com/jfrog/jfrog-client-go/utils" "github.com/jfrog/jfrog-client-go/utils/errorutils" "github.com/jfrog/jfrog-client-go/utils/io/httputils" @@ -105,13 +106,13 @@ func (tcb *TransferConfigBase) ValidateDifferentServers() error { } // Create a map between the repository types to the list of repositories to transfer. -func (tcb *TransferConfigBase) GetSelectedRepositories() (map[utils.RepoType][]string, error) { +func (tcb *TransferConfigBase) GetSelectedRepositories() (map[utils.RepoType][]services.RepositoryDetails, error) { allTargetRepos, err := tcb.getAllTargetRepositories() if err != nil { return nil, err } - result := make(map[utils.RepoType][]string, len(utils.RepoTypes)+1) + result := make(map[utils.RepoType][]services.RepositoryDetails, len(utils.RepoTypes)+1) sourceRepos, err := tcb.SourceArtifactoryManager.GetAllRepositories() if err != nil { return nil, err @@ -127,7 +128,7 @@ func (tcb *TransferConfigBase) GetSelectedRepositories() (map[utils.RepoType][]s continue } repoType := utils.RepoTypeFromString(sourceRepo.Type) - result[repoType] = append(result[repoType], sourceRepo.Key) + result[repoType] = append(result[repoType], sourceRepo) } } return result, nil @@ -148,7 +149,7 @@ func (tcb *TransferConfigBase) DeactivateKeyEncryption() (reactivateKeyEncryptio // Transfer all repositories to the target Artifactory server // reposToTransfer - Map between a repository type to the list of repository names // remoteRepositories - Remote repositories params, we get the remote repository params in an earlier stage after decryption -func (tcb *TransferConfigBase) TransferRepositoriesToTarget(reposToTransfer map[utils.RepoType][]string, remoteRepositories []interface{}) (err error) { +func (tcb *TransferConfigBase) TransferRepositoriesToTarget(reposToTransfer map[utils.RepoType][]services.RepositoryDetails, remoteRepositories []interface{}) (err error) { // Transfer remote repositories for i, remoteRepositoryName := range reposToTransfer[utils.Remote] { if err = tcb.createRepositoryAndAssignToProject(remoteRepositories[i], remoteRepositoryName); err != nil { @@ -187,10 +188,10 @@ func (tcb *TransferConfigBase) getAllTargetRepositories() (*datastructures.Set[s // Transfer local, federated, unknown, or virtual repositories // reposToTransfer - Repositories names to transfer // repoType - Repository type -func (tcb *TransferConfigBase) transferSpecificRepositoriesToTarget(reposToTransfer []string, repoType utils.RepoType) (err error) { - for _, repoKey := range reposToTransfer { +func (tcb *TransferConfigBase) transferSpecificRepositoriesToTarget(reposToTransfer []services.RepositoryDetails, repoType utils.RepoType) (err error) { + for _, repo := range reposToTransfer { var params interface{} - if err = tcb.SourceArtifactoryManager.GetRepository(repoKey, ¶ms); err != nil { + if err = tcb.SourceArtifactoryManager.GetRepository(repo.Key, ¶ms); err != nil { return } if repoType == utils.Federated { @@ -198,7 +199,7 @@ func (tcb *TransferConfigBase) transferSpecificRepositoriesToTarget(reposToTrans return } } - if err = tcb.createRepositoryAndAssignToProject(params, repoKey); err != nil { + if err = tcb.createRepositoryAndAssignToProject(params, repo); err != nil { return } } @@ -207,17 +208,17 @@ func (tcb *TransferConfigBase) transferSpecificRepositoriesToTarget(reposToTrans // Transfer virtual repositories // reposToTransfer - Repositories names to transfer -func (tcb *TransferConfigBase) transferVirtualRepositoriesToTarget(reposToTransfer []string) (err error) { +func (tcb *TransferConfigBase) transferVirtualRepositoriesToTarget(reposToTransfer []services.RepositoryDetails) (err error) { allReposParams := make(map[string]interface{}) var singleRepoParamsMap map[string]interface{} var singleRepoParams interface{} // Step 1 - Get and create all virtual repositories with the included repositories removed - for _, repoKey := range reposToTransfer { + for _, repoToTransfer := range reposToTransfer { // Get repository params - if err = tcb.SourceArtifactoryManager.GetRepository(repoKey, &singleRepoParams); err != nil { + if err = tcb.SourceArtifactoryManager.GetRepository(repoToTransfer.Key, &singleRepoParams); err != nil { return } - allReposParams[repoKey] = singleRepoParams + allReposParams[repoToTransfer.Key] = singleRepoParams singleRepoParamsMap, err = InterfaceToMap(singleRepoParams) if err != nil { return @@ -226,7 +227,7 @@ func (tcb *TransferConfigBase) transferVirtualRepositoriesToTarget(reposToTransf // Create virtual repository without included repositories repositories := singleRepoParamsMap["repositories"] delete(singleRepoParamsMap, "repositories") - if err = tcb.createRepositoryAndAssignToProject(singleRepoParamsMap, repoKey); err != nil { + if err = tcb.createRepositoryAndAssignToProject(singleRepoParamsMap, repoToTransfer); err != nil { return } @@ -326,9 +327,9 @@ func (tcb *TransferConfigBase) removeFederatedMembers(federatedRepoParams interf // Create a repository in the target server and assign the repository to the required project, if any. // repoParams - Repository parameters // repoKey - Repository key -func (tcb *TransferConfigBase) createRepositoryAndAssignToProject(repoParams interface{}, repoKey string) (err error) { +func (tcb *TransferConfigBase) createRepositoryAndAssignToProject(repoParams interface{}, repoDetails services.RepositoryDetails) (err error) { var projectKey string - if repoParams, projectKey, err = removeProjectKeyIfNeeded(repoParams, repoKey); err != nil { + if repoParams, projectKey, err = removeProjectKeyIfNeeded(repoParams, repoDetails.Key); err != nil { return } if projectKey != "" { @@ -336,13 +337,13 @@ func (tcb *TransferConfigBase) createRepositoryAndAssignToProject(repoParams int // This is why we make sure to detach it before actually creating the repository. // If the project isn't linked to the repository, an error might come up, but we ignore it because we can't // be certain whether the repository was actually assigned to the project or not. - _ = tcb.TargetAccessManager.UnassignRepoFromProject(repoKey) + _ = tcb.TargetAccessManager.UnassignRepoFromProject(repoDetails.Key) } - if err = tcb.TargetArtifactoryManager.CreateRepositoryWithParams(repoParams, repoKey); err != nil { + if err = tcb.TargetArtifactoryManager.CreateRepositoryWithParams(repoParams, repoDetails.Key); err != nil { return } if projectKey != "" { - return tcb.TargetAccessManager.AssignRepoToProject(repoKey, projectKey, true) + return tcb.TargetAccessManager.AssignRepoToProject(repoDetails.Key, projectKey, true) } return } diff --git a/artifactory/commands/utils/transferconfigbase_test.go b/artifactory/commands/utils/transferconfigbase_test.go index 1d0cdc7d0..ac091f28c 100644 --- a/artifactory/commands/utils/transferconfigbase_test.go +++ b/artifactory/commands/utils/transferconfigbase_test.go @@ -153,10 +153,10 @@ func TestGetSelectedRepositories(t *testing.T) { selectedRepos, err := transferConfigBase.GetSelectedRepositories() assert.NoError(t, err) assert.Len(t, selectedRepos, 4) - assert.Equal(t, []string{"generic-local"}, selectedRepos[utils.Local]) - assert.Equal(t, []string{"generic-remote"}, selectedRepos[utils.Remote]) - assert.Equal(t, []string{"generic-virtual"}, selectedRepos[utils.Virtual]) - assert.Equal(t, []string{"generic-federated"}, selectedRepos[utils.Federated]) + assert.Equal(t, []services.RepositoryDetails{{Key: "generic-local", Type: "local"}}, selectedRepos[utils.Local]) + assert.Equal(t, []services.RepositoryDetails{{Key: "generic-remote", Type: "remote"}}, selectedRepos[utils.Remote]) + assert.Equal(t, []services.RepositoryDetails{{Key: "generic-virtual", Type: "virtual"}}, selectedRepos[utils.Virtual]) + assert.Equal(t, []services.RepositoryDetails{{Key: "generic-federated", Type: "federated"}}, selectedRepos[utils.Federated]) } func TestTransferRepositoryToTarget(t *testing.T) { @@ -189,7 +189,8 @@ func TestTransferRepositoryToTarget(t *testing.T) { transferConfigBase := createTransferConfigBase(t, serverDetails, serverDetails) assert.False(t, transferConfigBase.FederatedMembersRemoved) - err := transferConfigBase.transferSpecificRepositoriesToTarget([]string{"federated-local", "federated-local-no-members"}, utils.Federated) + err := transferConfigBase.transferSpecificRepositoriesToTarget([]services.RepositoryDetails{ + {Key: "federated-local"}, {Key: "federated-local-no-members"}}, utils.Federated) assert.NoError(t, err) assert.True(t, transferConfigBase.FederatedMembersRemoved) } @@ -237,7 +238,7 @@ func TestTransferVirtualRepositoriesToTarget(t *testing.T) { defer testServer.Close() transferConfigBase := createTransferConfigBase(t, serverDetails, serverDetails) - assert.NoError(t, transferConfigBase.transferVirtualRepositoriesToTarget([]string{"a-virtual", "b-virtual"})) + assert.NoError(t, transferConfigBase.transferVirtualRepositoriesToTarget([]services.RepositoryDetails{{Key: "a-virtual"}, {Key: "b-virtual"}})) } func TestDeactivateKeyEncryption(t *testing.T) { @@ -343,7 +344,7 @@ func TestCreateRepositoryAndAssignToProject(t *testing.T) { repoParams := services.NewLocalRepositoryBaseParams() repoParams.Key = "local-repo" repoParams.ProjectKey = projectKey - err := transferConfigBase.createRepositoryAndAssignToProject(repoParams, repoParams.Key) + err := transferConfigBase.createRepositoryAndAssignToProject(repoParams, services.RepositoryDetails{Key: repoParams.Key}) assert.NoError(t, err) assert.True(t, projectUnassigned) assert.True(t, repositoryCreated) diff --git a/artifactory/utils/projectconfig.go b/artifactory/utils/projectconfig.go index 4ab09d4be..95072e5a3 100644 --- a/artifactory/utils/projectconfig.go +++ b/artifactory/utils/projectconfig.go @@ -2,16 +2,15 @@ package utils import ( "fmt" - "path/filepath" - "reflect" - - "github.com/jfrog/jfrog-cli-core/v2/utils/coreutils" - "github.com/jfrog/jfrog-cli-core/v2/utils/config" + "github.com/jfrog/jfrog-cli-core/v2/utils/coreutils" + xrayutils "github.com/jfrog/jfrog-cli-core/v2/xray/utils" "github.com/jfrog/jfrog-client-go/utils/errorutils" "github.com/jfrog/jfrog-client-go/utils/io/fileutils" "github.com/jfrog/jfrog-client-go/utils/log" "github.com/spf13/viper" + "path/filepath" + "reflect" ) const ( @@ -38,6 +37,11 @@ const ( Terraform ) +// Associates a technology with another of a different type in the structure. +// Docker is not present, as there is no docker-config command and, consequently, no docker.yaml file we need to operate on. +var techType = map[coreutils.Technology]ProjectType{coreutils.Maven: Maven, coreutils.Gradle: Gradle, coreutils.Npm: Npm, coreutils.Yarn: Yarn, coreutils.Go: Go, coreutils.Pip: Pip, + coreutils.Pipenv: Pipenv, coreutils.Poetry: Poetry, coreutils.Nuget: Nuget, coreutils.Dotnet: Dotnet} + var ProjectTypes = []string{ "go", "pip", @@ -187,3 +191,27 @@ func ReadResolutionOnlyConfiguration(confFilePath string) (*RepositoryConfig, er } return GetRepoConfigByPrefix(confFilePath, ProjectConfigResolverPrefix, vConfig) } + +// Verifies the existence of depsRepo. If it doesn't exist, it searches for a configuration file based on the technology type. If found, it assigns depsRepo in the AuditParams. +func SetResolutionRepoIfExists(params xrayutils.AuditParams, tech coreutils.Technology) (err error) { + if params.DepsRepo() != "" { + return + } + configFilePath, exists, err := GetProjectConfFilePath(techType[tech]) + if err != nil { + err = fmt.Errorf("failed while searching for %s.yaml config file: %s", tech.String(), err.Error()) + return + } + if !exists { + log.Debug(fmt.Sprintf("No %s.yaml configuration file was found. Resolving dependencies from %s default registry", tech.String(), tech.String())) + return + } + + repoConfig, err := ReadResolutionOnlyConfiguration(configFilePath) + if err != nil { + err = fmt.Errorf("failed while reading %s.yaml config file: %s", tech.String(), err.Error()) + return + } + params.SetDepsRepo(repoConfig.targetRepo) + return +} diff --git a/go.mod b/go.mod index 7065e6f93..46fdf6d14 100644 --- a/go.mod +++ b/go.mod @@ -99,6 +99,6 @@ require ( gopkg.in/warnings.v0 v0.1.2 // indirect ) -// replace github.com/jfrog/jfrog-client-go => github.com/jfrog/jfrog-client-go v1.28.1-0.20231003120621-90e9d7ea05e9 +replace github.com/jfrog/jfrog-client-go => github.com/jfrog/jfrog-client-go v1.28.1-0.20231101142932-422f20520a28 -// replace github.com/jfrog/build-info-go => github.com/jfrog/build-info-go v1.8.9-0.20231019085746-e1b192457664 +replace github.com/jfrog/build-info-go => github.com/jfrog/build-info-go v1.8.9-0.20231031143744-13f94ab07bbc diff --git a/go.sum b/go.sum index 369bfedd0..2dce7ebfc 100644 --- a/go.sum +++ b/go.sum @@ -196,14 +196,14 @@ github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOl github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jedib0t/go-pretty/v6 v6.4.8 h1:HiNzyMSEpsBaduKhmK+CwcpulEeBrTmxutz4oX/oWkg= github.com/jedib0t/go-pretty/v6 v6.4.8/go.mod h1:Ndk3ase2CkQbXLLNf5QDHoYb6J9WtVfmHZu9n8rk2xs= -github.com/jfrog/build-info-go v1.9.14 h1:xVezJ16Vpm/boRBn3lI1THCQmkylm+6R4zYWxOQ0NSM= -github.com/jfrog/build-info-go v1.9.14/go.mod h1:ujJ8XQZMdT2tMkLSMJNyDd1pCY+duwHdjV+9or9FLIg= +github.com/jfrog/build-info-go v1.8.9-0.20231031143744-13f94ab07bbc h1:MFejgCB90z7nA/KP48lF1t04tYuXAAQc53cBaFd9zcw= +github.com/jfrog/build-info-go v1.8.9-0.20231031143744-13f94ab07bbc/go.mod h1:ujJ8XQZMdT2tMkLSMJNyDd1pCY+duwHdjV+9or9FLIg= github.com/jfrog/gofrog v1.3.1 h1:QqAwQXCVReT724uga1AYqG/ZyrNQ6f+iTxmzkb+YFQk= github.com/jfrog/gofrog v1.3.1/go.mod h1:IFMc+V/yf7rA5WZ74CSbXe+Lgf0iApEQLxRZVzKRUR0= github.com/jfrog/jfrog-apps-config v1.0.1 h1:mtv6k7g8A8BVhlHGlSveapqf4mJfonwvXYLipdsOFMY= github.com/jfrog/jfrog-apps-config v1.0.1/go.mod h1:8AIIr1oY9JuH5dylz2S6f8Ym2MaadPLR6noCBO4C22w= -github.com/jfrog/jfrog-client-go v1.34.3 h1:kDfw3FUQQvOsTKFqonIgLlziez6CSX80xCYZIH9YYcg= -github.com/jfrog/jfrog-client-go v1.34.3/go.mod h1:fuxhYzWEkA16+ZV5cP/BJUGjA3SXVKbBoDmb8ZS6J4g= +github.com/jfrog/jfrog-client-go v1.28.1-0.20231101142932-422f20520a28 h1:CeuORbXaa9E+jDTT/DX1Ozuo8HGzDO7B8PIs0O35MNo= +github.com/jfrog/jfrog-client-go v1.28.1-0.20231101142932-422f20520a28/go.mod h1:fuxhYzWEkA16+ZV5cP/BJUGjA3SXVKbBoDmb8ZS6J4g= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= diff --git a/utils/coreutils/techutils.go b/utils/coreutils/techutils.go index 3880da6ad..fa043f20e 100644 --- a/utils/coreutils/techutils.go +++ b/utils/coreutils/techutils.go @@ -2,11 +2,16 @@ package coreutils import ( "fmt" - "github.com/jfrog/jfrog-client-go/utils/errorutils" - "github.com/jfrog/jfrog-client-go/utils/log" "os" + "path/filepath" "strings" + "github.com/jfrog/gofrog/datastructures" + "github.com/jfrog/jfrog-client-go/artifactory/services/fspatterns" + "github.com/jfrog/jfrog-client-go/utils/errorutils" + "github.com/jfrog/jfrog-client-go/utils/log" + + "golang.org/x/exp/maps" "golang.org/x/text/cases" "golang.org/x/text/language" @@ -110,13 +115,15 @@ var technologiesData = map[Technology]TechData{ Poetry: { packageType: Pypi, indicators: []string{"pyproject.toml", "poetry.lock"}, + packageDescriptors: []string{"pyproject.toml"}, packageInstallationCommand: "add", packageVersionOperator: "==", applicabilityScannable: true, }, Nuget: { - indicators: []string{".sln", ".csproj"}, - formal: "NuGet", + indicators: []string{".sln", ".csproj"}, + packageDescriptors: []string{".sln", ".csproj"}, + formal: "NuGet", // .NET CLI is used for NuGet projects execCommand: "dotnet", packageInstallationCommand: "add", @@ -124,8 +131,9 @@ var technologiesData = map[Technology]TechData{ packageVersionOperator: " -v ", }, Dotnet: { - indicators: []string{".sln", ".csproj"}, - formal: ".NET", + indicators: []string{".sln", ".csproj"}, + packageDescriptors: []string{".sln", ".csproj"}, + formal: ".NET", }, } @@ -179,7 +187,11 @@ func DetectedTechnologiesList() (technologies []string) { if errorutils.CheckError(err) != nil { return } - detectedTechnologies, err := DetectTechnologies(wd, false, false) + return detectedTechnologiesListInPath(wd, false) +} + +func detectedTechnologiesListInPath(path string, recursive bool) (technologies []string) { + detectedTechnologies, err := DetectTechnologies(path, false, recursive) if err != nil { return } @@ -191,6 +203,198 @@ func DetectedTechnologiesList() (technologies []string) { return techStringsList } +// If recursive is true, the search will not be limited to files in the root path. +// If requestedTechs is empty, all technologies will be checked. +// If excludePathPattern is not empty, files/directories that match the wildcard pattern will be excluded from the search. +func DetectTechnologiesDescriptors(path string, recursive bool, requestedTechs []string, requestedDescriptors map[Technology][]string, excludePathPattern string) (technologiesDetected map[Technology]map[string][]string, err error) { + filesList, err := fspatterns.ListFiles(path, recursive, false, true, true, excludePathPattern) + if err != nil { + return + } + workingDirectoryToIndicators, excludedTechAtWorkingDir := mapFilesToRelevantWorkingDirectories(filesList, requestedDescriptors) + var strJson string + if strJson, err = GetJsonIndent(workingDirectoryToIndicators); err != nil { + return + } else if len(workingDirectoryToIndicators) > 0 { + log.Debug(fmt.Sprintf("mapped %d working directories with indicators/descriptors:\n%s", len(workingDirectoryToIndicators), strJson)) + } + technologiesDetected = mapWorkingDirectoriesToTechnologies(workingDirectoryToIndicators, excludedTechAtWorkingDir, ToTechnologies(requestedTechs), requestedDescriptors) + if len(technologiesDetected) > 0 { + log.Debug(fmt.Sprintf("Detected %d technologies at %s: %s.", len(technologiesDetected), path, maps.Keys(technologiesDetected))) + } + return +} + +// Map files to relevant working directories according to the technologies' indicators/descriptors and requested descriptors. +// files: The file paths to map. +// requestedDescriptors: Special requested descriptors (for example in Pip requirement.txt can have different path) for each technology. +// Returns: +// 1. workingDirectoryToIndicators: A map of working directories to the files that are relevant to the technologies. +// wd1: [wd1/indicator, wd1/descriptor] +// wd/wd2: [wd/wd2/indicator] +// 2. excludedTechAtWorkingDir: A map of working directories to the technologies that are excluded from the working directory. +// wd1: [tech1, tech2] +// wd/wd2: [tech1] +func mapFilesToRelevantWorkingDirectories(files []string, requestedDescriptors map[Technology][]string) (workingDirectoryToIndicators map[string][]string, excludedTechAtWorkingDir map[string][]Technology) { + workingDirectoryToIndicatorsSet := make(map[string]*datastructures.Set[string]) + excludedTechAtWorkingDir = make(map[string][]Technology) + for _, path := range files { + directory := filepath.Dir(path) + + for tech, techData := range technologiesData { + // Check if the working directory contains indicators/descriptors for the technology + relevant := isIndicator(path, techData) || isDescriptor(path, techData) || isRequestedDescriptor(path, requestedDescriptors[tech]) + if relevant { + if _, exist := workingDirectoryToIndicatorsSet[directory]; !exist { + workingDirectoryToIndicatorsSet[directory] = datastructures.MakeSet[string]() + } + workingDirectoryToIndicatorsSet[directory].Add(path) + } + // Check if the working directory contains a file/directory with a name that ends with an excluded suffix + if isExclude(path, techData) { + excludedTechAtWorkingDir[directory] = append(excludedTechAtWorkingDir[directory], tech) + } + } + } + workingDirectoryToIndicators = make(map[string][]string) + for wd, indicators := range workingDirectoryToIndicatorsSet { + workingDirectoryToIndicators[wd] = indicators.ToSlice() + } + return +} + +func isDescriptor(path string, techData TechData) bool { + for _, descriptor := range techData.packageDescriptors { + if strings.HasSuffix(path, descriptor) { + return true + } + } + return false +} + +func isRequestedDescriptor(path string, requestedDescriptors []string) bool { + for _, requestedDescriptor := range requestedDescriptors { + if strings.HasSuffix(path, requestedDescriptor) { + return true + } + } + return false +} + +func isIndicator(path string, techData TechData) bool { + for _, indicator := range techData.indicators { + if strings.HasSuffix(path, indicator) { + return true + } + } + return false +} + +func isExclude(path string, techData TechData) bool { + for _, exclude := range techData.exclude { + if strings.HasSuffix(path, exclude) { + return true + } + } + return false +} + +// Map working directories to technologies according to the given workingDirectoryToIndicators map files. +// workingDirectoryToIndicators: A map of working directories to the files inside the directory that are relevant to the technologies. +// excludedTechAtWorkingDir: A map of working directories to the technologies that are excluded from the working directory. +// requestedTechs: The technologies to check, if empty all technologies will be checked. +// requestedDescriptors: Special requested descriptors (for example in Pip requirement.txt can have different path) for each technology to detect. +func mapWorkingDirectoriesToTechnologies(workingDirectoryToIndicators map[string][]string, excludedTechAtWorkingDir map[string][]Technology, requestedTechs []Technology, requestedDescriptors map[Technology][]string) (technologiesDetected map[Technology]map[string][]string) { + // Get the relevant technologies to check + technologies := requestedTechs + if len(technologies) == 0 { + technologies = GetAllTechnologiesList() + } + technologiesDetected = make(map[Technology]map[string][]string) + // Map working directories to technologies + for _, tech := range technologies { + techWorkingDirs := getTechInformationFromWorkingDir(tech, workingDirectoryToIndicators, excludedTechAtWorkingDir, requestedDescriptors) + if len(techWorkingDirs) > 0 { + // Found indicators of the technology, add to detected. + technologiesDetected[tech] = techWorkingDirs + } + } + for _, tech := range requestedTechs { + if _, exist := technologiesDetected[tech]; !exist { + // Requested (forced with flag) technology and not found any indicators/descriptors in detection, add as detected. + log.Warn(fmt.Sprintf("Requested technology %s but not found any indicators/descriptors in detection.", tech)) + technologiesDetected[tech] = map[string][]string{} + } + } + return +} + +func getTechInformationFromWorkingDir(tech Technology, workingDirectoryToIndicators map[string][]string, excludedTechAtWorkingDir map[string][]Technology, requestedDescriptors map[Technology][]string) (techWorkingDirs map[string][]string) { + techWorkingDirs = make(map[string][]string) + for wd, indicators := range workingDirectoryToIndicators { + descriptorsAtWd := []string{} + foundIndicator := false + if isTechExcludedInWorkingDir(tech, wd, excludedTechAtWorkingDir) { + // Exclude this technology from this working directory + continue + } + // Check if the working directory contains indicators/descriptors for the technology + for _, path := range indicators { + if isDescriptor(path, technologiesData[tech]) || isRequestedDescriptor(path, requestedDescriptors[tech]) { + descriptorsAtWd = append(descriptorsAtWd, path) + } + if isIndicator(path, technologiesData[tech]) || isRequestedDescriptor(path, requestedDescriptors[tech]) { + foundIndicator = true + } + } + if foundIndicator { + // Found indicators of the technology in the current working directory, add to detected. + techWorkingDirs[wd] = descriptorsAtWd + } + } + // Don't allow working directory if sub directory already exists as key for the same technology + techWorkingDirs = cleanSubDirectories(techWorkingDirs) + return +} + +func isTechExcludedInWorkingDir(tech Technology, wd string, excludedTechAtWorkingDir map[string][]Technology) bool { + if excludedTechs, exist := excludedTechAtWorkingDir[wd]; exist { + for _, excludedTech := range excludedTechs { + if excludedTech == tech { + return true + } + } + } + return false +} + +// Remove sub directories keys from the given workingDirectoryToFiles map. +// Keys: [dir/dir, dir/directory] -> [dir/dir, dir/directory] +// Keys: [dir, directory] -> [dir, directory] +// Keys: [dir/dir2, dir/dir2/dir3, dir/dir2/dir3/dir4] -> [dir/dir2] +// Values of removed sub directories will be added to the root directory. +func cleanSubDirectories(workingDirectoryToFiles map[string][]string) (result map[string][]string) { + result = make(map[string][]string) + for wd, files := range workingDirectoryToFiles { + root := getExistingRootDir(wd, workingDirectoryToFiles) + result[root] = append(result[root], files...) + } + return +} + +// Get the root directory of the given path according to the given workingDirectoryToIndicators map. +func getExistingRootDir(path string, workingDirectoryToIndicators map[string][]string) (root string) { + root = path + for wd := range workingDirectoryToIndicators { + parentWd := filepath.Dir(wd) + parentRoot := filepath.Dir(root) + if parentRoot != parentWd && strings.HasPrefix(root, wd) { + root = wd + } + } + return +} + // DetectTechnologies tries to detect all technologies types according to the files in the given path. // 'isCiSetup' will limit the search of possible techs to Maven, Gradle, and npm. // 'recursive' will determine if the search will be limited to files in the root path or not. @@ -205,6 +409,7 @@ func DetectTechnologies(path string, isCiSetup, recursive bool) (map[Technology] if err != nil { return nil, err } + log.Info(fmt.Sprintf("Scanning %d file(s):%s", len(filesList), filesList)) detectedTechnologies := detectTechnologiesByFilePaths(filesList, isCiSetup) return detectedTechnologies, nil } diff --git a/utils/coreutils/techutils_test.go b/utils/coreutils/techutils_test.go index 943813177..7d170b9da 100644 --- a/utils/coreutils/techutils_test.go +++ b/utils/coreutils/techutils_test.go @@ -1,9 +1,12 @@ package coreutils import ( - "github.com/stretchr/testify/assert" + "path/filepath" "reflect" "testing" + + "github.com/stretchr/testify/assert" + "golang.org/x/exp/maps" ) func TestDetectTechnologiesByFilePaths(t *testing.T) { @@ -31,6 +34,437 @@ func TestDetectTechnologiesByFilePaths(t *testing.T) { } } +func TestMapFilesToRelevantWorkingDirectories(t *testing.T) { + noRequest := map[Technology][]string{} + noExclude := map[string][]Technology{} + + tests := []struct { + name string + paths []string + requestedDescriptors map[Technology][]string + expectedWorkingDir map[string][]string + expectedExcluded map[string][]Technology + }{ + { + name: "noTechTest", + paths: []string{"pomxml", filepath.Join("sub1", "file"), filepath.Join("sub", "sub", "file")}, + requestedDescriptors: noRequest, + expectedWorkingDir: map[string][]string{}, + expectedExcluded: noExclude, + }, + { + name: "mavenTest", + paths: []string{"pom.xml", filepath.Join("sub1", "pom.xml"), filepath.Join("sub2", "pom.xml")}, + requestedDescriptors: noRequest, + expectedWorkingDir: map[string][]string{ + ".": {"pom.xml"}, + "sub1": {filepath.Join("sub1", "pom.xml")}, + "sub2": {filepath.Join("sub2", "pom.xml")}, + }, + expectedExcluded: noExclude, + }, + { + name: "npmTest", + paths: []string{filepath.Join("dir", "package.json"), filepath.Join("dir", "package-lock.json"), filepath.Join("dir2", "npm-shrinkwrap.json")}, + requestedDescriptors: noRequest, + expectedWorkingDir: map[string][]string{ + "dir": {filepath.Join("dir", "package.json"), filepath.Join("dir", "package-lock.json")}, + "dir2": {filepath.Join("dir2", "npm-shrinkwrap.json")}, + }, + expectedExcluded: noExclude, + }, + { + name: "yarnTest", + paths: []string{filepath.Join("dir", "package.json"), filepath.Join("dir", ".yarn")}, + requestedDescriptors: noRequest, + expectedWorkingDir: map[string][]string{"dir": {filepath.Join("dir", "package.json"), filepath.Join("dir", ".yarn")}}, + expectedExcluded: map[string][]Technology{"dir": {Npm}}, + }, + { + name: "golangTest", + paths: []string{filepath.Join("dir", "dir2", "go.mod")}, + requestedDescriptors: noRequest, + expectedWorkingDir: map[string][]string{filepath.Join("dir", "dir2"): {filepath.Join("dir", "dir2", "go.mod")}}, + expectedExcluded: noExclude, + }, + { + name: "pipTest", + paths: []string{ + filepath.Join("users_dir", "test", "package", "setup.py"), + filepath.Join("users_dir", "test", "package", "blabla.txt"), + filepath.Join("users_dir", "test", "package2", "requirements.txt"), + }, + requestedDescriptors: noRequest, + expectedWorkingDir: map[string][]string{ + filepath.Join("users_dir", "test", "package"): {filepath.Join("users_dir", "test", "package", "setup.py")}, + filepath.Join("users_dir", "test", "package2"): {filepath.Join("users_dir", "test", "package2", "requirements.txt")}}, + expectedExcluded: noExclude, + }, + { + name: "pipRequestedDescriptorTest", + paths: []string{filepath.Join("dir", "blabla.txt"), filepath.Join("dir", "somefile")}, + requestedDescriptors: map[Technology][]string{Pip: {"blabla.txt"}}, + expectedWorkingDir: map[string][]string{"dir": {filepath.Join("dir", "blabla.txt")}}, + expectedExcluded: noExclude, + }, + { + name: "pipenvTest", + paths: []string{filepath.Join("users", "test", "package", "Pipfile")}, + requestedDescriptors: noRequest, + expectedWorkingDir: map[string][]string{filepath.Join("users", "test", "package"): {filepath.Join("users", "test", "package", "Pipfile")}}, + expectedExcluded: map[string][]Technology{filepath.Join("users", "test", "package"): {Pip}}, + }, + { + name: "gradleTest", + paths: []string{filepath.Join("users", "test", "package", "build.gradle"), filepath.Join("dir", "build.gradle.kts"), filepath.Join("dir", "file")}, + requestedDescriptors: noRequest, + expectedWorkingDir: map[string][]string{ + filepath.Join("users", "test", "package"): {filepath.Join("users", "test", "package", "build.gradle")}, + "dir": {filepath.Join("dir", "build.gradle.kts")}, + }, + expectedExcluded: noExclude, + }, + { + name: "nugetTest", + paths: []string{filepath.Join("dir", "project.sln"), filepath.Join("dir", "sub1", "project.csproj"), filepath.Join("dir", "file")}, + requestedDescriptors: noRequest, + expectedWorkingDir: map[string][]string{ + "dir": {filepath.Join("dir", "project.sln")}, + filepath.Join("dir", "sub1"): {filepath.Join("dir", "sub1", "project.csproj")}, + }, + expectedExcluded: noExclude, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + detectedWd, detectedExcluded := mapFilesToRelevantWorkingDirectories(test.paths, test.requestedDescriptors) + expectedKeys := maps.Keys(test.expectedWorkingDir) + actualKeys := maps.Keys(detectedWd) + assert.ElementsMatch(t, expectedKeys, actualKeys, "expected: %s, actual: %s", expectedKeys, actualKeys) + for key, value := range test.expectedWorkingDir { + assert.ElementsMatch(t, value, detectedWd[key], "expected: %s, actual: %s", value, detectedWd[key]) + } + assert.True(t, reflect.DeepEqual(test.expectedExcluded, detectedExcluded), "expected: %s, actual: %s", test.expectedExcluded, detectedExcluded) + }) + } +} + +func TestMapWorkingDirectoriesToTechnologies(t *testing.T) { + noRequestSpecialDescriptors := map[Technology][]string{} + noRequestTech := []Technology{} + tests := []struct { + name string + workingDirectoryToIndicators map[string][]string + excludedTechAtWorkingDir map[string][]Technology + requestedTechs []Technology + requestedDescriptors map[Technology][]string + + expected map[Technology]map[string][]string + }{ + { + name: "noTechTest", + workingDirectoryToIndicators: map[string][]string{}, + excludedTechAtWorkingDir: map[string][]Technology{}, + requestedTechs: noRequestTech, + requestedDescriptors: noRequestSpecialDescriptors, + expected: map[Technology]map[string][]string{}, + }, + { + name: "all techs test", + workingDirectoryToIndicators: map[string][]string{ + "folder": {filepath.Join("folder", "pom.xml")}, + filepath.Join("folder", "sub1"): {filepath.Join("folder", "sub1", "pom.xml")}, + filepath.Join("folder", "sub2"): {filepath.Join("folder", "sub2", "pom.xml")}, + "dir": {filepath.Join("dir", "package.json"), filepath.Join("dir", "package-lock.json"), filepath.Join("dir", "build.gradle.kts"), filepath.Join("dir", "project.sln")}, + "directory": {filepath.Join("directory", "npm-shrinkwrap.json")}, + "dir3": {filepath.Join("dir3", "package.json"), filepath.Join("dir3", ".yarn")}, + filepath.Join("dir", "dir2"): {filepath.Join("dir", "dir2", "go.mod")}, + filepath.Join("users_dir", "test", "package"): {filepath.Join("users_dir", "test", "package", "setup.py")}, + filepath.Join("users_dir", "test", "package2"): {filepath.Join("users_dir", "test", "package2", "requirements.txt")}, + filepath.Join("users", "test", "package"): {filepath.Join("users", "test", "package", "Pipfile"), filepath.Join("users", "test", "package", "build.gradle")}, + filepath.Join("dir", "sub1"): {filepath.Join("dir", "sub1", "project.csproj")}, + }, + excludedTechAtWorkingDir: map[string][]Technology{ + filepath.Join("users", "test", "package"): {Pip}, + "dir3": {Npm}, + }, + requestedTechs: noRequestTech, + requestedDescriptors: noRequestSpecialDescriptors, + expected: map[Technology]map[string][]string{ + Maven: {"folder": {filepath.Join("folder", "pom.xml"), filepath.Join("folder", "sub1", "pom.xml"), filepath.Join("folder", "sub2", "pom.xml")}}, + Npm: { + "dir": {filepath.Join("dir", "package.json")}, + "directory": {}, + }, + Yarn: {"dir3": {filepath.Join("dir3", "package.json")}}, + Go: {filepath.Join("dir", "dir2"): {filepath.Join("dir", "dir2", "go.mod")}}, + Pip: { + filepath.Join("users_dir", "test", "package"): {filepath.Join("users_dir", "test", "package", "setup.py")}, + filepath.Join("users_dir", "test", "package2"): {filepath.Join("users_dir", "test", "package2", "requirements.txt")}, + }, + Pipenv: {filepath.Join("users", "test", "package"): {filepath.Join("users", "test", "package", "Pipfile")}}, + Gradle: { + "dir": {filepath.Join("dir", "build.gradle.kts")}, + filepath.Join("users", "test", "package"): {filepath.Join("users", "test", "package", "build.gradle")}, + }, + Nuget: {"dir": {filepath.Join("dir", "project.sln"), filepath.Join("dir", "sub1", "project.csproj")}}, + Dotnet: {"dir": {filepath.Join("dir", "project.sln"), filepath.Join("dir", "sub1", "project.csproj")}}, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + detectedTech := mapWorkingDirectoriesToTechnologies(test.workingDirectoryToIndicators, test.excludedTechAtWorkingDir, test.requestedTechs, test.requestedDescriptors) + expectedKeys := maps.Keys(test.expected) + detectedKeys := maps.Keys(detectedTech) + assert.ElementsMatch(t, expectedKeys, detectedKeys, "expected: %s, actual: %s", expectedKeys, detectedKeys) + for key, value := range test.expected { + actualKeys := maps.Keys(detectedTech[key]) + expectedKeys := maps.Keys(value) + assert.ElementsMatch(t, expectedKeys, actualKeys, "for tech %s, expected: %s, actual: %s", key, expectedKeys, actualKeys) + for innerKey, innerValue := range value { + assert.ElementsMatch(t, innerValue, detectedTech[key][innerKey], "expected: %s, actual: %s", innerValue, detectedTech[key][innerKey]) + } + } + }) + } +} + +func TestGetExistingRootDir(t *testing.T) { + tests := []struct { + name string + path string + workingDirectoryToIndicators map[string][]string + expected string + }{ + { + name: "empty", + path: "", + workingDirectoryToIndicators: map[string][]string{}, + expected: "", + }, + { + name: "no match", + path: "dir", + workingDirectoryToIndicators: map[string][]string{ + filepath.Join("folder", "sub1"): {filepath.Join("folder", "sub1", "pom.xml")}, + "dir2": {filepath.Join("dir2", "go.mod")}, + "dir3": {}, + filepath.Join("directory", "dir2"): {filepath.Join("directory", "dir2", "go.mod")}, + }, + expected: "dir", + }, + { + name: "match root", + path: filepath.Join("directory", "dir2"), + workingDirectoryToIndicators: map[string][]string{ + filepath.Join("folder", "sub1"): {filepath.Join("folder", "sub1", "pom.xml")}, + "dir2": {filepath.Join("dir2", "go.mod")}, + "dir3": {}, + filepath.Join("directory", "dir2"): {filepath.Join("directory", "dir2", "go.mod")}, + }, + expected: filepath.Join("directory", "dir2"), + }, + { + name: "match sub", + path: filepath.Join("directory", "dir2"), + workingDirectoryToIndicators: map[string][]string{ + filepath.Join("folder", "sub1"): {filepath.Join("folder", "sub1", "pom.xml")}, + "dir2": {filepath.Join("dir2", "go.mod")}, + "directory": {}, + filepath.Join("directory", "dir2"): {filepath.Join("directory", "dir2", "go.mod")}, + }, + expected: "directory", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + assert.Equal(t, test.expected, getExistingRootDir(test.path, test.workingDirectoryToIndicators)) + }) + } +} + +func TestCleanSubDirectories(t *testing.T) { + tests := []struct { + name string + workingDirectoryToFiles map[string][]string + expected map[string][]string + }{ + { + name: "empty", + workingDirectoryToFiles: map[string][]string{}, + expected: map[string][]string{}, + }, + { + name: "no sub directories", + workingDirectoryToFiles: map[string][]string{ + "directory": {filepath.Join("directory", "file")}, + filepath.Join("dir", "dir"): {filepath.Join("dir", "dir", "file")}, + filepath.Join("dir", "directory"): {filepath.Join("dir", "directory", "file")}, + }, + expected: map[string][]string{ + "directory": {filepath.Join("directory", "file")}, + filepath.Join("dir", "dir"): {filepath.Join("dir", "dir", "file")}, + filepath.Join("dir", "directory"): {filepath.Join("dir", "directory", "file")}, + }, + }, + { + name: "sub directories", + workingDirectoryToFiles: map[string][]string{ + filepath.Join("dir", "dir"): {filepath.Join("dir", "dir", "file")}, + filepath.Join("dir", "directory"): {filepath.Join("dir", "directory", "file")}, + "dir": {filepath.Join("dir", "file")}, + "directory": {filepath.Join("directory", "file")}, + filepath.Join("dir", "dir2"): {filepath.Join("dir", "dir2", "file")}, + filepath.Join("dir", "dir2", "dir3"): {filepath.Join("dir", "dir2", "dir3", "file")}, + filepath.Join("dir", "dir2", "dir3", "dir4"): {filepath.Join("dir", "dir2", "dir3", "dir4", "file")}, + }, + expected: map[string][]string{ + "directory": {filepath.Join("directory", "file")}, + "dir": { + filepath.Join("dir", "file"), + filepath.Join("dir", "dir", "file"), + filepath.Join("dir", "directory", "file"), + filepath.Join("dir", "dir2", "file"), + filepath.Join("dir", "dir2", "dir3", "file"), + filepath.Join("dir", "dir2", "dir3", "dir4", "file"), + }, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + cleaned := cleanSubDirectories(test.workingDirectoryToFiles) + cleanedKeys := maps.Keys(cleaned) + expectedKeys := maps.Keys(test.expected) + assert.ElementsMatch(t, expectedKeys, cleanedKeys, "expected: %s, actual: %s", expectedKeys, cleanedKeys) + for key, value := range test.expected { + assert.ElementsMatch(t, value, cleaned[key], "expected: %s, actual: %s", value, cleaned[key]) + } + }) + } +} + +func TestGetTechInformationFromWorkingDir(t *testing.T) { + workingDirectoryToIndicators := map[string][]string{ + "folder": {filepath.Join("folder", "pom.xml")}, + filepath.Join("folder", "sub1"): {filepath.Join("folder", "sub1", "pom.xml")}, + filepath.Join("folder", "sub2"): {filepath.Join("folder", "sub2", "pom.xml")}, + "dir": {filepath.Join("dir", "package.json"), filepath.Join("dir", "package-lock.json"), filepath.Join("dir", "build.gradle.kts"), filepath.Join("dir", "project.sln"), filepath.Join("dir", "blabla.txt")}, + "directory": {filepath.Join("directory", "npm-shrinkwrap.json")}, + "dir3": {filepath.Join("dir3", "package.json"), filepath.Join("dir3", ".yarn")}, + filepath.Join("dir", "dir2"): {filepath.Join("dir", "dir2", "go.mod")}, + filepath.Join("users_dir", "test", "package"): {filepath.Join("users_dir", "test", "package", "setup.py")}, + filepath.Join("users_dir", "test", "package2"): {filepath.Join("users_dir", "test", "package2", "requirements.txt")}, + filepath.Join("users", "test", "package"): {filepath.Join("users", "test", "package", "Pipfile"), filepath.Join("users", "test", "package", "build.gradle")}, + filepath.Join("dir", "sub1"): {filepath.Join("dir", "sub1", "project.csproj")}, + } + excludedTechAtWorkingDir := map[string][]Technology{ + filepath.Join("users", "test", "package"): {Pip}, + "dir3": {Npm}, + } + + tests := []struct { + name string + tech Technology + requestedDescriptors map[Technology][]string + expected map[string][]string + }{ + { + name: "mavenTest", + tech: Maven, + requestedDescriptors: map[Technology][]string{}, + expected: map[string][]string{ + "folder": { + filepath.Join("folder", "pom.xml"), + filepath.Join("folder", "sub1", "pom.xml"), + filepath.Join("folder", "sub2", "pom.xml"), + }, + }, + }, + { + name: "npmTest", + tech: Npm, + requestedDescriptors: map[Technology][]string{}, + expected: map[string][]string{ + "dir": {filepath.Join("dir", "package.json")}, + "directory": {}, + }, + }, + { + name: "yarnTest", + tech: Yarn, + requestedDescriptors: map[Technology][]string{}, + expected: map[string][]string{"dir3": {filepath.Join("dir3", "package.json")}}, + }, + { + name: "golangTest", + tech: Go, + requestedDescriptors: map[Technology][]string{}, + expected: map[string][]string{filepath.Join("dir", "dir2"): {filepath.Join("dir", "dir2", "go.mod")}}, + }, + { + name: "pipTest", + tech: Pip, + requestedDescriptors: map[Technology][]string{}, + expected: map[string][]string{ + filepath.Join("users_dir", "test", "package"): {filepath.Join("users_dir", "test", "package", "setup.py")}, + filepath.Join("users_dir", "test", "package2"): {filepath.Join("users_dir", "test", "package2", "requirements.txt")}, + }, + }, + { + name: "pipRequestedDescriptorTest", + tech: Pip, + requestedDescriptors: map[Technology][]string{Pip: {"blabla.txt"}}, + expected: map[string][]string{ + "dir": {filepath.Join("dir", "blabla.txt")}, + filepath.Join("users_dir", "test", "package"): {filepath.Join("users_dir", "test", "package", "setup.py")}, + filepath.Join("users_dir", "test", "package2"): {filepath.Join("users_dir", "test", "package2", "requirements.txt")}, + }, + }, + { + name: "pipenvTest", + tech: Pipenv, + requestedDescriptors: map[Technology][]string{}, + expected: map[string][]string{filepath.Join("users", "test", "package"): {filepath.Join("users", "test", "package", "Pipfile")}}, + }, + { + name: "gradleTest", + tech: Gradle, + requestedDescriptors: map[Technology][]string{}, + expected: map[string][]string{ + filepath.Join("users", "test", "package"): {filepath.Join("users", "test", "package", "build.gradle")}, + "dir": {filepath.Join("dir", "build.gradle.kts")}, + }, + }, + { + name: "nugetTest", + tech: Nuget, + requestedDescriptors: map[Technology][]string{}, + expected: map[string][]string{"dir": {filepath.Join("dir", "project.sln"), filepath.Join("dir", "sub1", "project.csproj")}}, + }, + { + name: "dotnetTest", + tech: Dotnet, + requestedDescriptors: map[Technology][]string{}, + expected: map[string][]string{"dir": {filepath.Join("dir", "project.sln"), filepath.Join("dir", "sub1", "project.csproj")}}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + techInformation := getTechInformationFromWorkingDir(test.tech, workingDirectoryToIndicators, excludedTechAtWorkingDir, test.requestedDescriptors) + expectedKeys := maps.Keys(test.expected) + actualKeys := maps.Keys(techInformation) + assert.ElementsMatch(t, expectedKeys, actualKeys, "expected: %s, actual: %s", expectedKeys, actualKeys) + for key, value := range test.expected { + assert.ElementsMatch(t, value, techInformation[key], "expected: %s, actual: %s", value, techInformation[key]) + } + }) + } +} + func TestContainsApplicabilityScannableTech(t *testing.T) { tests := []struct { name string diff --git a/utils/coreutils/utils.go b/utils/coreutils/utils.go index 463a65d50..2269381d2 100644 --- a/utils/coreutils/utils.go +++ b/utils/coreutils/utils.go @@ -2,6 +2,7 @@ package coreutils import ( "bytes" + "encoding/json" "errors" "fmt" "io/fs" @@ -502,6 +503,16 @@ func parseYesNo(s string, def bool) (ans, valid bool) { return false, false } +func GetJsonIndent(o any) (strJson string, err error) { + byteJson, err := json.MarshalIndent(o, "", " ") + if err != nil { + err = errorutils.CheckError(err) + return + } + strJson = string(byteJson) + return +} + func GetCliUserAgent() string { if cliUserAgentVersion == "" { return cliUserAgentName diff --git a/utils/reposnapshot/node.go b/utils/reposnapshot/node.go index 33d862244..fb1056cf6 100644 --- a/utils/reposnapshot/node.go +++ b/utils/reposnapshot/node.go @@ -17,7 +17,9 @@ type Node struct { // Mutex is on the Node level to allow modifying non-conflicting content on multiple nodes simultaneously. mutex sync.Mutex // The files count is used to identify when handling a node is completed. It is only used during runtime, and is not persisted to disk for future runs. - filesCount uint32 + filesCount uint32 + totalFilesCount uint32 + totalFilesSize uint64 NodeStatus } @@ -34,9 +36,11 @@ const ( // The wrapper only contains fields that are used in future runs, hence not all fields from Node are persisted. // In addition, it does not hold the parent pointer to avoid cyclic reference on export. type NodeExportWrapper struct { - Name string `json:"name,omitempty"` - Children []*NodeExportWrapper `json:"children,omitempty"` - Completed bool `json:"completed,omitempty"` + Name string `json:"name,omitempty"` + Children []*NodeExportWrapper `json:"children,omitempty"` + Completed bool `json:"completed,omitempty"` + TotalFilesCount uint32 `json:"total_files_count,omitempty"` + TotalFilesSize uint64 `json:"total_files_size,omitempty"` } type ActionOnNodeFunc func(node *Node) error @@ -55,8 +59,10 @@ func (node *Node) convertToWrapper() (wrapper *NodeExportWrapper, err error) { var children []*Node err = node.action(func(node *Node) error { wrapper = &NodeExportWrapper{ - Name: node.name, - Completed: node.NodeStatus == Completed, + Name: node.name, + Completed: node.NodeStatus == Completed, + TotalFilesCount: node.totalFilesCount, + TotalFilesSize: node.totalFilesSize, } children = node.children return nil @@ -78,7 +84,9 @@ func (node *Node) convertToWrapper() (wrapper *NodeExportWrapper, err error) { // Convert the loaded node export wrapper to node. func (wrapper *NodeExportWrapper) convertToNode() *Node { node := &Node{ - name: wrapper.Name, + name: wrapper.Name, + totalFilesCount: wrapper.TotalFilesCount, + totalFilesSize: wrapper.TotalFilesSize, } // If node wasn't previously completed, we will start exploring it from scratch. if wrapper.Completed { @@ -128,6 +136,31 @@ func (node *Node) setCompleted() (err error) { return } +// Sum up all subtree directories with status "completed" +func (node *Node) CalculateTransferredFilesAndSize() (totalFilesCount uint32, totalFilesSize uint64, err error) { + var children []*Node + err = node.action(func(node *Node) error { + children = node.children + if node.NodeStatus == Completed { + totalFilesCount = node.totalFilesCount + totalFilesSize = node.totalFilesSize + } + return nil + }) + if err != nil { + return + } + for _, child := range children { + childFilesCount, childTotalFilesSize, childErr := child.CalculateTransferredFilesAndSize() + if childErr != nil { + return 0, 0, childErr + } + totalFilesCount += childFilesCount + totalFilesSize += childTotalFilesSize + } + return +} + // Check if node completed - if done exploring, done handling files, children are completed. func (node *Node) CheckCompleted() error { isCompleted := false @@ -135,11 +168,17 @@ func (node *Node) CheckCompleted() error { if node.NodeStatus == Exploring || node.filesCount > 0 { return nil } + var totalFilesCount uint32 = 0 + var totalFilesSize uint64 = 0 for _, child := range node.children { + totalFilesCount += child.totalFilesCount + totalFilesSize += child.totalFilesSize if child.NodeStatus < Completed { return nil } } + node.totalFilesCount += totalFilesCount + node.totalFilesSize += totalFilesSize isCompleted = true return nil }) @@ -150,18 +189,21 @@ func (node *Node) CheckCompleted() error { return node.setCompleted() } -func (node *Node) IncrementFilesCount() error { +func (node *Node) IncrementFilesCount(fileSize uint64) error { return node.action(func(node *Node) error { node.filesCount++ + node.totalFilesCount++ + node.totalFilesSize += fileSize return nil }) } func (node *Node) DecrementFilesCount() error { return node.action(func(node *Node) error { - if node.filesCount > 0 { - node.filesCount-- + if node.filesCount == 0 { + return errorutils.CheckErrorf("attempting to decrease file count in node '%s', but the files count is already 0", node.name) } + node.filesCount-- return nil }) } diff --git a/utils/reposnapshot/node_test.go b/utils/reposnapshot/node_test.go index 6c6e385c1..eb95aaaa9 100644 --- a/utils/reposnapshot/node_test.go +++ b/utils/reposnapshot/node_test.go @@ -1,8 +1,9 @@ package reposnapshot import ( - "github.com/stretchr/testify/assert" "testing" + + "github.com/stretchr/testify/assert" ) // Convert node to wrapper and back to verify conversions. @@ -20,3 +21,81 @@ func TestConversions(t *testing.T) { assert.Equal(t, ".", node2.parent.name) assert.Equal(t, Completed, node2converted.NodeStatus) } + +func TestCheckCompleted(t *testing.T) { + zero, one, two := createThreeNodesTree(t) + + // Set completed and expect false + checkCompleted(t, false, zero, one, two) + + // Mark done exploring and zero all file counts + markDoneExploring(t, zero, one, two) + decrementFilesCount(t, one, two, two) + + // Run check completed one all nodes from down to top + checkCompleted(t, true, two, one, zero) +} + +func TestCalculateTransferredFilesAndSize(t *testing.T) { + zero, one, two := createThreeNodesTree(t) + + // Run calculate and expect that the total files count and size in "zero" are zero + totalFilesCount, totalFilesSize, err := zero.CalculateTransferredFilesAndSize() + assert.NoError(t, err) + assert.Zero(t, totalFilesSize) + assert.Zero(t, totalFilesCount) + + // Mark done exploring + markDoneExploring(t, zero, one, two) + + // Zero the files count of "two" + decrementFilesCount(t, two, two) + checkCompleted(t, true, two) + + // Run calculate and expect that "zero" will contain the files count and size of "two" + totalFilesCount, totalFilesSize, err = zero.CalculateTransferredFilesAndSize() + assert.NoError(t, err) + assert.EqualValues(t, 1, totalFilesSize) + assert.EqualValues(t, 2, totalFilesCount) + + // Zero the file count of "one" + decrementFilesCount(t, one) + checkCompleted(t, true, one, zero) + + // Run calculate and expect that "zero" will contain the files count and size of "one" and "two" + totalFilesCount, totalFilesSize, err = zero.CalculateTransferredFilesAndSize() + assert.NoError(t, err) + assert.EqualValues(t, 1, totalFilesSize) + assert.EqualValues(t, 3, totalFilesCount) +} + +// Create the following tree structure 0 --> 1 -- > 2 +func createThreeNodesTree(t *testing.T) (zero, one, two *Node) { + zero = createNodeBase(t, "0", 0, nil) + one = createNodeBase(t, "1", 1, zero) + two = createNodeBase(t, "2", 2, one) + addChildren(zero, one) + addChildren(one, two) + return +} + +func checkCompleted(t *testing.T, expected bool, nodes ...*Node) { + for _, node := range nodes { + assert.NoError(t, node.CheckCompleted()) + actual, err := node.IsCompleted() + assert.NoError(t, err) + assert.Equal(t, expected, actual) + } +} + +func markDoneExploring(t *testing.T, nodes ...*Node) { + for _, node := range nodes { + assert.NoError(t, node.MarkDoneExploring()) + } +} + +func decrementFilesCount(t *testing.T, nodes ...*Node) { + for _, node := range nodes { + assert.NoError(t, node.DecrementFilesCount()) + } +} diff --git a/utils/reposnapshot/snapshotmanager.go b/utils/reposnapshot/snapshotmanager.go index 5d04569fd..2e92c2868 100644 --- a/utils/reposnapshot/snapshotmanager.go +++ b/utils/reposnapshot/snapshotmanager.go @@ -4,10 +4,11 @@ import ( "encoding/json" "errors" "fmt" + "strings" + "github.com/jfrog/gofrog/lru" "github.com/jfrog/jfrog-client-go/utils/errorutils" "github.com/jfrog/jfrog-client-go/utils/io/fileutils" - "strings" ) // Represents a snapshot of a repository being traversed to do a certain action. @@ -82,6 +83,12 @@ func (sm *RepoSnapshotManager) PersistRepoSnapshot() error { return sm.root.convertAndSaveToFile(sm.snapshotFilePath) } +// Return the count and size of files that have been successfully transferred and their respective directories are marked as complete, +// ensuring they won't be transferred again. This data helps in estimating the remaining files for transfer after stopping. +func (sm *RepoSnapshotManager) CalculateTransferredFilesAndSize() (totalFilesCount uint32, totalFilesSize uint64, err error) { + return sm.root.CalculateTransferredFilesAndSize() +} + // Returns the node corresponding to the directory in the provided relative path. Path should be provided without the repository name. func (sm *RepoSnapshotManager) LookUpNode(relativePath string) (requestedNode *Node, err error) { if relativePath == "" { diff --git a/utils/reposnapshot/snapshotmanager_test.go b/utils/reposnapshot/snapshotmanager_test.go index 06681515a..4b372d640 100644 --- a/utils/reposnapshot/snapshotmanager_test.go +++ b/utils/reposnapshot/snapshotmanager_test.go @@ -1,37 +1,48 @@ package reposnapshot import ( - "github.com/jfrog/jfrog-client-go/utils/io/fileutils" - "github.com/stretchr/testify/assert" + "encoding/json" "os" "path" "path/filepath" "testing" + + clientutils "github.com/jfrog/jfrog-client-go/utils" + "github.com/jfrog/jfrog-client-go/utils/io/fileutils" + "github.com/stretchr/testify/assert" ) const dummyRepoKey = "dummy-repo-local" var expectedFile = filepath.Join("testdata", dummyRepoKey) -func TestLoad(t *testing.T) { - t.Run("repo snapshot doesn't exist", func(t *testing.T) { testLoad(t, "/path/to/file", false, CreateNewNode(".", nil)) }) - t.Run("repo snapshot exists", func(t *testing.T) { testLoad(t, expectedFile, true, createTestSnapshotTree(t)) }) +func TestLoadDoesNotExist(t *testing.T) { + _, exists, err := LoadRepoSnapshotManager(dummyRepoKey, "/path/to/file") + assert.NoError(t, err) + assert.False(t, exists) } -func testLoad(t *testing.T, snapshotPath string, expectedExists bool, expectedRoot *Node) { - sm, exists, err := LoadRepoSnapshotManager(dummyRepoKey, snapshotPath) +func TestLoad(t *testing.T) { + sm, exists, err := LoadRepoSnapshotManager(dummyRepoKey, expectedFile) assert.NoError(t, err) - assert.Equal(t, expectedExists, exists) - if expectedExists { - // Convert to wrapper in order to compare. - expectedWrapper, err := expectedRoot.convertToWrapper() - assert.NoError(t, err) - rootWrapper, err := sm.root.convertToWrapper() - assert.NoError(t, err) - assert.Equal(t, expectedWrapper, rootWrapper) - assert.Equal(t, snapshotPath, sm.snapshotFilePath) - assert.Equal(t, dummyRepoKey, sm.repoKey) - } + assert.True(t, exists) + // Convert to wrapper in order to compare + expectedRoot := createTestSnapshotTree(t) + expectedWrapper, err := expectedRoot.convertToWrapper() + assert.NoError(t, err) + rootWrapper, err := sm.root.convertToWrapper() + assert.NoError(t, err) + + // Marshal json to compare strings + expected, err := json.Marshal(expectedWrapper) + assert.NoError(t, err) + actual, err := json.Marshal(rootWrapper) + assert.NoError(t, err) + + // Compare + assert.Equal(t, clientutils.IndentJson(expected), clientutils.IndentJson(actual)) + assert.Equal(t, expectedFile, sm.snapshotFilePath) + assert.Equal(t, dummyRepoKey, sm.repoKey) } func TestSaveToFile(t *testing.T) { @@ -43,7 +54,7 @@ func TestSaveToFile(t *testing.T) { assert.NoError(t, err) actual, err := os.ReadFile(manager.snapshotFilePath) assert.NoError(t, err) - assert.Equal(t, expected, actual) + assert.Equal(t, clientutils.IndentJson(expected), clientutils.IndentJson(actual)) } func TestNodeCompletedAndTreeCollapsing(t *testing.T) { @@ -179,7 +190,7 @@ func createNodeBase(t *testing.T, name string, filesCount int, parent *Node) *No node := CreateNewNode(name, parent) node.NodeStatus = DoneExploring for i := 0; i < filesCount; i++ { - assert.NoError(t, node.IncrementFilesCount()) + assert.NoError(t, node.IncrementFilesCount(uint64(i))) } return node } diff --git a/utils/reposnapshot/testdata/dummy-repo-local b/utils/reposnapshot/testdata/dummy-repo-local index a5fc9e5b9..533170477 100644 --- a/utils/reposnapshot/testdata/dummy-repo-local +++ b/utils/reposnapshot/testdata/dummy-repo-local @@ -1 +1,34 @@ -{"name":".","children":[{"name":"0","children":[{"name":"a"}]},{"name":"1","children":[{"name":"a"},{"name":"b"}]},{"name":"2"}]} \ No newline at end of file +{ + "name": ".", + "children": [ + { + "name": "0", + "children": [ + { + "name": "a", + "total_files_count": 3, + "total_files_size": 3 + } + ] + }, + { + "name": "1", + "children": [ + { + "name": "a", + "total_files_count": 1 + }, + { + "name": "b", + "total_files_count": 2, + "total_files_size": 1 + } + ], + "total_files_count": 1 + }, + { + "name": "2" + } + ], + "total_files_count": 1 +} \ No newline at end of file diff --git a/xray/commands/audit/audit.go b/xray/commands/audit/audit.go index 1347e5357..ea9d68807 100644 --- a/xray/commands/audit/audit.go +++ b/xray/commands/audit/audit.go @@ -2,6 +2,8 @@ package audit import ( "errors" + "os" + rtutils "github.com/jfrog/jfrog-cli-core/v2/artifactory/utils" "github.com/jfrog/jfrog-cli-core/v2/utils/coreutils" "github.com/jfrog/jfrog-cli-core/v2/xray/scangraph" @@ -10,7 +12,6 @@ import ( "github.com/jfrog/jfrog-client-go/xray" "github.com/jfrog/jfrog-client-go/xray/services" "golang.org/x/sync/errgroup" - "os" xrayutils "github.com/jfrog/jfrog-cli-core/v2/xray/utils" ) @@ -92,7 +93,8 @@ func (auditCmd *AuditCommand) Run() (err error) { SetMinSeverityFilter(auditCmd.minSeverityFilter). SetFixableOnly(auditCmd.fixableOnly). SetGraphBasicParams(auditCmd.AuditBasicParams). - SetThirdPartyApplicabilityScan(auditCmd.thirdPartyApplicabilityScan) + SetThirdPartyApplicabilityScan(auditCmd.thirdPartyApplicabilityScan). + SetExclusions(auditCmd.exclusions) auditResults, err := RunAudit(auditParams) if err != nil { return @@ -107,10 +109,10 @@ func (auditCmd *AuditCommand) Run() (err error) { messages = []string{coreutils.PrintTitle("The ‘jf audit’ command also supports JFrog Advanced Security features, such as 'Contextual Analysis', 'Secret Detection', 'IaC Scan' and ‘SAST’.\nThis feature isn't enabled on your system. Read more - ") + coreutils.PrintLink("https://jfrog.com/xray/")} } // Print Scan results on all cases except if errors accrued on SCA scan and no security/license issues found. - printScanResults := !(auditResults.ScaError != nil && xrayutils.IsEmptyScanResponse(auditResults.ExtendedScanResults.XrayResults)) + printScanResults := !(auditResults.ScaError != nil && !auditResults.IsScaIssuesFound()) if printScanResults { - if err = xrayutils.NewResultsWriter(auditResults.ExtendedScanResults). - SetIsMultipleRootProject(auditResults.IsMultipleRootProject). + if err = xrayutils.NewResultsWriter(auditResults). + SetIsMultipleRootProject(auditResults.IsMultipleProject()). SetIncludeVulnerabilities(auditCmd.IncludeVulnerabilities). SetIncludeLicenses(auditCmd.IncludeLicenses). SetOutputFormat(auditCmd.OutputFormat()). @@ -126,7 +128,7 @@ func (auditCmd *AuditCommand) Run() (err error) { } // Only in case Xray's context was given (!auditCmd.IncludeVulnerabilities), and the user asked to fail the build accordingly, do so. - if auditCmd.Fail && !auditCmd.IncludeVulnerabilities && xrayutils.CheckIfFailBuild(auditResults.ExtendedScanResults.XrayResults) { + if auditCmd.Fail && !auditCmd.IncludeVulnerabilities && xrayutils.CheckIfFailBuild(auditResults.GetScaScansXrayResults()) { err = xrayutils.NewFailBuildError() } return @@ -136,23 +138,12 @@ func (auditCmd *AuditCommand) CommandName() string { return "generic_audit" } -type Results struct { - IsMultipleRootProject bool - ScaError error - JasError error - ExtendedScanResults *xrayutils.ExtendedScanResults -} - -func NewAuditResults() *Results { - return &Results{ExtendedScanResults: &xrayutils.ExtendedScanResults{}} -} - // Runs an audit scan based on the provided auditParams. // Returns an audit Results object containing all the scan results. // If the current server is entitled for JAS, the advanced security results will be included in the scan results. -func RunAudit(auditParams *AuditParams) (results *Results, err error) { +func RunAudit(auditParams *AuditParams) (results *xrayutils.Results, err error) { // Initialize Results struct - results = NewAuditResults() + results = xrayutils.NewAuditResults() serverDetails, err := auditParams.ServerDetails() if err != nil { @@ -165,7 +156,7 @@ func RunAudit(auditParams *AuditParams) (results *Results, err error) { if err = clientutils.ValidateMinimumVersion(clientutils.Xray, auditParams.xrayVersion, scangraph.GraphScanMinXrayVersion); err != nil { return } - results.ExtendedScanResults.XrayVersion = auditParams.xrayVersion + results.XrayVersion = auditParams.xrayVersion results.ExtendedScanResults.EntitledForJas, err = isEntitledForJas(xrayManager, auditParams.xrayVersion) if err != nil { return @@ -178,7 +169,7 @@ func RunAudit(auditParams *AuditParams) (results *Results, err error) { } // The sca scan doesn't require the analyzer manager, so it can run separately from the analyzer manager download routine. - results.ScaError = runScaScan(auditParams, results) + results.ScaError = runScaScan(auditParams, results) // runScaScan(auditParams, results) // Wait for the Download of the AnalyzerManager to complete. if err = errGroup.Wait(); err != nil { @@ -187,7 +178,7 @@ func RunAudit(auditParams *AuditParams) (results *Results, err error) { // Run scanners only if the user is entitled for Advanced Security if results.ExtendedScanResults.EntitledForJas { - results.JasError = runJasScannersAndSetResults(results.ExtendedScanResults, auditParams.DirectDependencies(), serverDetails, auditParams.workingDirs, auditParams.Progress(), auditParams.xrayGraphScanParams.MultiScanId, auditParams.thirdPartyApplicabilityScan) + results.JasError = runJasScannersAndSetResults(results, auditParams.DirectDependencies(), serverDetails, auditParams.workingDirs, auditParams.Progress(), auditParams.xrayGraphScanParams.MultiScanId, auditParams.thirdPartyApplicabilityScan) } return } diff --git a/xray/commands/audit/auditparams.go b/xray/commands/audit/auditparams.go index 9dc42e3c2..b4130a9d4 100644 --- a/xray/commands/audit/auditparams.go +++ b/xray/commands/audit/auditparams.go @@ -8,6 +8,7 @@ import ( type AuditParams struct { xrayGraphScanParams *services.XrayGraphScanParams workingDirs []string + exclusions []string installFunc func(tech string) error fixableOnly bool minSeverityFilter string @@ -40,6 +41,15 @@ func (params *AuditParams) XrayVersion() string { return params.xrayVersion } +func (params *AuditParams) Exclusions() []string { + return params.exclusions +} + +func (params *AuditParams) SetExclusions(exclusions []string) *AuditParams { + params.exclusions = exclusions + return params +} + func (params *AuditParams) SetXrayGraphScanParams(xrayGraphScanParams *services.XrayGraphScanParams) *AuditParams { params.xrayGraphScanParams = xrayGraphScanParams return params @@ -82,3 +92,8 @@ func (params *AuditParams) SetThirdPartyApplicabilityScan(includeThirdPartyDeps params.thirdPartyApplicabilityScan = includeThirdPartyDeps return params } + +func (params *AuditParams) SetDepsRepo(depsRepo string) *AuditParams { + params.AuditBasicParams.SetDepsRepo(depsRepo) + return params +} diff --git a/xray/commands/audit/jasrunner.go b/xray/commands/audit/jasrunner.go index c528fdadc..b9dc0c154 100644 --- a/xray/commands/audit/jasrunner.go +++ b/xray/commands/audit/jasrunner.go @@ -13,7 +13,7 @@ import ( "github.com/jfrog/jfrog-client-go/utils/log" ) -func runJasScannersAndSetResults(scanResults *utils.ExtendedScanResults, directDependencies []string, +func runJasScannersAndSetResults(scanResults *utils.Results, directDependencies []string, serverDetails *config.ServerDetails, workingDirs []string, progress io.ProgressMgr, multiScanId string, thirdPartyApplicabilityScan bool) (err error) { if serverDetails == nil || len(serverDetails.Url) == 0 { log.Warn("To include 'Advanced Security' scan as part of the audit output, please run the 'jf c add' command before running this command.") @@ -30,7 +30,7 @@ func runJasScannersAndSetResults(scanResults *utils.ExtendedScanResults, directD if progress != nil { progress.SetHeadlineMsg("Running applicability scanning") } - scanResults.ApplicabilityScanResults, err = applicability.RunApplicabilityScan(scanResults.XrayResults, directDependencies, scanResults.ScannedTechnologies, scanner, thirdPartyApplicabilityScan) + scanResults.ExtendedScanResults.ApplicabilityScanResults, err = applicability.RunApplicabilityScan(scanResults.GetScaScansXrayResults(), directDependencies, scanResults.GetScaScannedTechnologies(), scanner, thirdPartyApplicabilityScan) if err != nil { return } @@ -41,20 +41,20 @@ func runJasScannersAndSetResults(scanResults *utils.ExtendedScanResults, directD if progress != nil { progress.SetHeadlineMsg("Running secrets scanning") } - scanResults.SecretsScanResults, err = secrets.RunSecretsScan(scanner) + scanResults.ExtendedScanResults.SecretsScanResults, err = secrets.RunSecretsScan(scanner) if err != nil { return } if progress != nil { progress.SetHeadlineMsg("Running IaC scanning") } - scanResults.IacScanResults, err = iac.RunIacScan(scanner) + scanResults.ExtendedScanResults.IacScanResults, err = iac.RunIacScan(scanner) if err != nil { return } if progress != nil { progress.SetHeadlineMsg("Running SAST scanning") } - scanResults.SastScanResults, err = sast.RunSastScan(scanner) + scanResults.ExtendedScanResults.SastScanResults, err = sast.RunSastScan(scanner) return } diff --git a/xray/commands/audit/jasrunner_test.go b/xray/commands/audit/jasrunner_test.go index 0d92f1b18..2c6445b66 100644 --- a/xray/commands/audit/jasrunner_test.go +++ b/xray/commands/audit/jasrunner_test.go @@ -21,23 +21,23 @@ func TestGetExtendedScanResults_AnalyzerManagerDoesntExist(t *testing.T) { defer func() { assert.NoError(t, os.Unsetenv(coreutils.HomeDir)) }() - scanResults := &utils.ExtendedScanResults{XrayResults: jas.FakeBasicXrayResults, ScannedTechnologies: []coreutils.Technology{coreutils.Yarn}} + scanResults := &utils.Results{ScaResults: []utils.ScaScanResult{{Technology: coreutils.Yarn, XrayResults: jas.FakeBasicXrayResults}}, ExtendedScanResults: &utils.ExtendedScanResults{}} err = runJasScannersAndSetResults(scanResults, []string{"issueId_1_direct_dependency", "issueId_2_direct_dependency"}, &jas.FakeServerDetails, nil, nil, "", false) // Expect error: assert.Error(t, err) } func TestGetExtendedScanResults_ServerNotValid(t *testing.T) { - scanResults := &utils.ExtendedScanResults{XrayResults: jas.FakeBasicXrayResults, ScannedTechnologies: []coreutils.Technology{coreutils.Pip}} + scanResults := &utils.Results{ScaResults: []utils.ScaScanResult{{Technology: coreutils.Pip, XrayResults: jas.FakeBasicXrayResults}}, ExtendedScanResults: &utils.ExtendedScanResults{}} err := runJasScannersAndSetResults(scanResults, []string{"issueId_1_direct_dependency", "issueId_2_direct_dependency"}, nil, nil, nil, "", false) assert.NoError(t, err) } func TestGetExtendedScanResults_AnalyzerManagerReturnsError(t *testing.T) { - mockDirectDependencies := []string{"issueId_2_direct_dependency", "issueId_1_direct_dependency"} assert.NoError(t, rtutils.DownloadAnalyzerManagerIfNeeded()) - scanResults := &utils.ExtendedScanResults{XrayResults: jas.FakeBasicXrayResults, ScannedTechnologies: []coreutils.Technology{coreutils.Yarn}} - err := runJasScannersAndSetResults(scanResults, mockDirectDependencies, &jas.FakeServerDetails, nil, nil, "", false) + + scanResults := &utils.Results{ScaResults: []utils.ScaScanResult{{Technology: coreutils.Yarn, XrayResults: jas.FakeBasicXrayResults}}, ExtendedScanResults: &utils.ExtendedScanResults{}} + err := runJasScannersAndSetResults(scanResults, []string{"issueId_2_direct_dependency", "issueId_1_direct_dependency"}, &jas.FakeServerDetails, nil, nil, "", false) // Expect error: assert.ErrorContains(t, err, "failed to run Applicability scan") diff --git a/xray/commands/audit/sca/npm/npm.go b/xray/commands/audit/sca/npm/npm.go index 3574a3065..3e9520ebe 100644 --- a/xray/commands/audit/sca/npm/npm.go +++ b/xray/commands/audit/sca/npm/npm.go @@ -1,8 +1,11 @@ package npm import ( + "errors" + "fmt" biutils "github.com/jfrog/build-info-go/build/utils" buildinfo "github.com/jfrog/build-info-go/entities" + "github.com/jfrog/jfrog-cli-core/v2/artifactory/commands/npm" "github.com/jfrog/jfrog-cli-core/v2/utils/coreutils" "github.com/jfrog/jfrog-cli-core/v2/xray/commands/audit/sca" "github.com/jfrog/jfrog-cli-core/v2/xray/utils" @@ -31,6 +34,17 @@ func BuildDependencyTree(params utils.AuditParams) (dependencyTrees []*xrayUtils treeDepsParam := createTreeDepsParam(params) + restoreNpmrcFunc, err := configNpmResolutionServerIfNeeded(params) + if err != nil { + err = fmt.Errorf("failed while configuring a resolution server: %s", err.Error()) + return + } + defer func() { + if restoreNpmrcFunc != nil { + err = errors.Join(err, restoreNpmrcFunc()) + } + }() + // Calculate npm dependencies dependenciesMap, err := biutils.CalculateDependenciesMap(npmExecutablePath, currentDir, packageInfo.BuildInfoModuleId(), treeDepsParam, log.Logger) if err != nil { @@ -47,6 +61,32 @@ func BuildDependencyTree(params utils.AuditParams) (dependencyTrees []*xrayUtils return } +// Generates a .npmrc file to configure an Artifactory server as the resolver server. +func configNpmResolutionServerIfNeeded(params utils.AuditParams) (restoreNpmrcFunc func() error, err error) { + if params == nil { + err = fmt.Errorf("got empty params upon configuring resolution server") + return + } + serverDetails, err := params.ServerDetails() + if err != nil || serverDetails == nil { + return + } + depsRepo := params.DepsRepo() + if depsRepo == "" { + return + } + + npmCmd := npm.NewNpmCommand("install", false).SetServerDetails(serverDetails) + if err = npmCmd.PreparePrerequisites(depsRepo); err != nil { + return + } + if err = npmCmd.CreateTempNpmrc(); err != nil { + return + } + restoreNpmrcFunc = npmCmd.RestoreNpmrcFunc() + return +} + func createTreeDepsParam(params utils.AuditParams) biutils.NpmTreeDepListParam { if params == nil { return biutils.NpmTreeDepListParam{ @@ -54,7 +94,8 @@ func createTreeDepsParam(params utils.AuditParams) biutils.NpmTreeDepListParam { } } npmTreeDepParam := biutils.NpmTreeDepListParam{ - Args: addIgnoreScriptsFlag(params.Args()), + Args: addIgnoreScriptsFlag(params.Args()), + InstallCommandArgs: params.InstallCommandArgs(), } if npmParams, ok := params.(utils.AuditNpmParams); ok { npmTreeDepParam.IgnoreNodeModules = npmParams.NpmIgnoreNodeModules() diff --git a/xray/commands/audit/sca/npm/npm_test.go b/xray/commands/audit/sca/npm/npm_test.go index 36871d29a..ad1591238 100644 --- a/xray/commands/audit/sca/npm/npm_test.go +++ b/xray/commands/audit/sca/npm/npm_test.go @@ -116,6 +116,7 @@ func TestIgnoreScripts(t *testing.T) { // The package.json file contain a postinstall script running an "exit 1" command. // Without the "--ignore-scripts" flag, the test will fail. - _, _, err := BuildDependencyTree(nil) + params := &utils.AuditBasicParams{} + _, _, err := BuildDependencyTree(params) assert.NoError(t, err) } diff --git a/xray/commands/audit/scarunner.go b/xray/commands/audit/scarunner.go index ae4a25b1a..17eaa1d9b 100644 --- a/xray/commands/audit/scarunner.go +++ b/xray/commands/audit/scarunner.go @@ -4,8 +4,13 @@ import ( "encoding/json" "errors" "fmt" + "os" + "time" + "github.com/jfrog/build-info-go/utils/pythonutils" "github.com/jfrog/gofrog/datastructures" + "github.com/jfrog/jfrog-cli-core/v2/artifactory/utils" + "github.com/jfrog/jfrog-cli-core/v2/utils/config" "github.com/jfrog/jfrog-cli-core/v2/utils/coreutils" "github.com/jfrog/jfrog-cli-core/v2/xray/commands/audit/sca" _go "github.com/jfrog/jfrog-cli-core/v2/xray/commands/audit/sca/go" @@ -16,104 +21,162 @@ import ( "github.com/jfrog/jfrog-cli-core/v2/xray/commands/audit/sca/yarn" "github.com/jfrog/jfrog-cli-core/v2/xray/scangraph" xrayutils "github.com/jfrog/jfrog-cli-core/v2/xray/utils" + "github.com/jfrog/jfrog-client-go/artifactory/services/fspatterns" clientutils "github.com/jfrog/jfrog-client-go/utils" "github.com/jfrog/jfrog-client-go/utils/errorutils" "github.com/jfrog/jfrog-client-go/utils/log" + "github.com/jfrog/jfrog-client-go/xray/services" xrayCmdUtils "github.com/jfrog/jfrog-client-go/xray/services/utils" - "os" - "time" ) -func runScaScan(params *AuditParams, results *Results) (err error) { - rootDir, err := os.Getwd() +var DefaultExcludePatterns = []string{"*node_modules*", "*target*", "*venv*", "*test*"} + +func runScaScan(params *AuditParams, results *xrayutils.Results) (err error) { + // Prepare + currentWorkingDir, err := os.Getwd() if errorutils.CheckError(err) != nil { return } - for _, wd := range params.workingDirs { - if len(params.workingDirs) > 1 { - log.Info("Running SCA scan for vulnerable dependencies scan in", wd, "directory...") - } else { - log.Info("Running SCA scan for vulnerable dependencies...") - } - wdScanErr := runScaScanOnWorkingDir(params, results, wd, rootDir) - if wdScanErr != nil { - err = errors.Join(err, fmt.Errorf("audit command in '%s' failed:\n%s\n", wd, wdScanErr.Error())) - continue - } - } - return -} - -// Audits the project found in the current directory using Xray. -func runScaScanOnWorkingDir(params *AuditParams, results *Results, workingDir, rootDir string) (err error) { - err = os.Chdir(workingDir) + serverDetails, err := params.ServerDetails() if err != nil { return } - defer func() { - err = errors.Join(err, os.Chdir(rootDir)) - }() - var technologies []string - requestedTechnologies := params.Technologies() - if len(requestedTechnologies) != 0 { - technologies = requestedTechnologies - } else { - technologies = coreutils.DetectedTechnologiesList() - } - if len(technologies) == 0 { + scans := getScaScansToPreform(currentWorkingDir, params) + if len(scans) == 0 { log.Info("Couldn't determine a package manager or build tool used by this project. Skipping the SCA scan...") return } - serverDetails, err := params.ServerDetails() + scanInfo, err := coreutils.GetJsonIndent(scans) if err != nil { return } + log.Info(fmt.Sprintf("Preforming %d SCA scans:\n%s", len(scans), scanInfo)) - for _, tech := range coreutils.ToTechnologies(technologies) { - if tech == coreutils.Dotnet { + defer func() { + // Make sure to return to the original working directory, executeScaScan may change it + err = errors.Join(err, os.Chdir(currentWorkingDir)) + }() + for _, scan := range scans { + // Run the scan + log.Info("Running SCA scan for", scan.Technology, "vulnerable dependencies in", scan.WorkingDirectory, "directory...") + if wdScanErr := executeScaScan(serverDetails, params, scan); wdScanErr != nil { + err = errors.Join(err, fmt.Errorf("audit command in '%s' failed:\n%s", scan.WorkingDirectory, wdScanErr.Error())) continue } - flattenTree, fullDependencyTrees, techErr := GetTechDependencyTree(params.AuditBasicParams, tech) - if techErr != nil { - err = errors.Join(err, fmt.Errorf("failed while building '%s' dependency tree:\n%s\n", tech, techErr.Error())) + // Add the scan to the results + results.ScaResults = append(results.ScaResults, *scan) + } + return +} + +// Calculate the scans to preform +func getScaScansToPreform(currentWorkingDir string, params *AuditParams) (scansToPreform []*xrayutils.ScaScanResult) { + recursive := len(currentWorkingDir) > 0 + for _, requestedDirectory := range getRequestedDirectoriesToScan(currentWorkingDir, params) { + // Detect descriptors and technologies in the requested directory. + techToWorkingDirs, err := coreutils.DetectTechnologiesDescriptors(requestedDirectory, recursive, params.Technologies(), getRequestedDescriptors(params), getExcludePattern(params, recursive)) + if err != nil { + log.Warn("Couldn't detect technologies in", requestedDirectory, "directory.", err.Error()) continue } - if flattenTree == nil || len(flattenTree.Nodes) == 0 { - err = errors.Join(err, errors.New("no dependencies were found. Please try to build your project and re-run the audit command")) - continue + // Create scans to preform + for tech, workingDirs := range techToWorkingDirs { + if tech == coreutils.Dotnet { + // We detect Dotnet and Nuget the same way, if one detected so does the other. + // We don't need to scan for both and get duplicate results. + continue + } + if len(workingDirs) == 0 { + // Requested technology (from params) descriptors/indicators was not found, scan only requested directory for this technology. + scansToPreform = append(scansToPreform, &xrayutils.ScaScanResult{WorkingDirectory: requestedDirectory, Technology: tech}) + } + for workingDir, descriptors := range workingDirs { + // Add scan for each detected working directory. + scansToPreform = append(scansToPreform, &xrayutils.ScaScanResult{WorkingDirectory: workingDir, Technology: tech, Descriptors: descriptors}) + } } + } + return +} - scanGraphParams := scangraph.NewScanGraphParams(). - SetServerDetails(serverDetails). - SetXrayGraphScanParams(params.xrayGraphScanParams). - SetXrayVersion(params.xrayVersion). - SetFixableOnly(params.fixableOnly). - SetSeverityLevel(params.minSeverityFilter) - techResults, techErr := sca.RunXrayDependenciesTreeScanGraph(flattenTree, params.Progress(), tech, scanGraphParams) - if techErr != nil { - err = errors.Join(err, fmt.Errorf("'%s' Xray dependency tree scan request failed:\n%s\n", tech, techErr.Error())) - continue - } - techResults = sca.BuildImpactPathsForScanResponse(techResults, fullDependencyTrees) +func getRequestedDescriptors(params *AuditParams) map[coreutils.Technology][]string { + requestedDescriptors := map[coreutils.Technology][]string{} + if params.PipRequirementsFile() != "" { + requestedDescriptors[coreutils.Pip] = []string{params.PipRequirementsFile()} + } + return requestedDescriptors +} - var dependenciesForApplicabilityScan []string - if shouldUseAllDependencies(params.thirdPartyApplicabilityScan, tech) { - dependenciesForApplicabilityScan = getDirectDependenciesFromTree([]*xrayCmdUtils.GraphNode{flattenTree}) - } else { - dependenciesForApplicabilityScan = getDirectDependenciesFromTree(fullDependencyTrees) - } - params.AppendDependenciesForApplicabilityScan(dependenciesForApplicabilityScan) +func getExcludePattern(params *AuditParams, recursive bool) string { + exclusions := params.Exclusions() + if len(exclusions) == 0 { + exclusions = append(exclusions, DefaultExcludePatterns...) + } + return fspatterns.PrepareExcludePathPattern(exclusions, clientutils.WildCardPattern, recursive) +} - results.ExtendedScanResults.XrayResults = append(results.ExtendedScanResults.XrayResults, techResults...) - if !results.IsMultipleRootProject { - results.IsMultipleRootProject = len(fullDependencyTrees) > 1 - } - results.ExtendedScanResults.ScannedTechnologies = append(results.ExtendedScanResults.ScannedTechnologies, tech) +func getRequestedDirectoriesToScan(currentWorkingDir string, params *AuditParams) []string { + workingDirs := datastructures.MakeSet[string]() + for _, wd := range params.workingDirs { + workingDirs.Add(wd) + } + if workingDirs.Size() == 0 { + workingDirs.Add(currentWorkingDir) + } + return workingDirs.ToSlice() +} + +// Preform the SCA scan for the given scan information. +// This method will change the working directory to the scan's working directory. +func executeScaScan(serverDetails *config.ServerDetails, params *AuditParams, scan *xrayutils.ScaScanResult) (err error) { + // Get the dependency tree for the technology in the working directory. + if err = os.Chdir(scan.WorkingDirectory); err != nil { + return errorutils.CheckError(err) + } + flattenTree, fullDependencyTrees, techErr := GetTechDependencyTree(params.AuditBasicParams, scan.Technology) + if techErr != nil { + return fmt.Errorf("failed while building '%s' dependency tree:\n%s", scan.Technology, techErr.Error()) + } + if flattenTree == nil || len(flattenTree.Nodes) == 0 { + return errorutils.CheckErrorf("no dependencies were found. Please try to build your project and re-run the audit command") } + // Scan the dependency tree. + scanResults, xrayErr := runScaWithTech(scan.Technology, params, serverDetails, flattenTree, fullDependencyTrees) + if xrayErr != nil { + return fmt.Errorf("'%s' Xray dependency tree scan request failed:\n%s", scan.Technology, xrayErr.Error()) + } + scan.IsMultipleRootProject = clientutils.Pointer(len(fullDependencyTrees) > 1) + addThirdPartyDependenciesToParams(params, scan.Technology, flattenTree, fullDependencyTrees) + scan.XrayResults = append(scan.XrayResults, scanResults...) + return +} + +func runScaWithTech(tech coreutils.Technology, params *AuditParams, serverDetails *config.ServerDetails, flatTree *xrayCmdUtils.GraphNode, fullDependencyTrees []*xrayCmdUtils.GraphNode) (techResults []services.ScanResponse, err error) { + scanGraphParams := scangraph.NewScanGraphParams(). + SetServerDetails(serverDetails). + SetXrayGraphScanParams(params.xrayGraphScanParams). + SetXrayVersion(params.xrayVersion). + SetFixableOnly(params.fixableOnly). + SetSeverityLevel(params.minSeverityFilter) + techResults, err = sca.RunXrayDependenciesTreeScanGraph(flatTree, params.Progress(), tech, scanGraphParams) + if err != nil { + return + } + techResults = sca.BuildImpactPathsForScanResponse(techResults, fullDependencyTrees) return } +func addThirdPartyDependenciesToParams(params *AuditParams, tech coreutils.Technology, flatTree *xrayCmdUtils.GraphNode, fullDependencyTrees []*xrayCmdUtils.GraphNode) { + var dependenciesForApplicabilityScan []string + if shouldUseAllDependencies(params.thirdPartyApplicabilityScan, tech) { + dependenciesForApplicabilityScan = getDirectDependenciesFromTree([]*xrayCmdUtils.GraphNode{flatTree}) + } else { + dependenciesForApplicabilityScan = getDirectDependenciesFromTree(fullDependencyTrees) + } + params.AppendDependenciesForApplicabilityScan(dependenciesForApplicabilityScan) +} + // When building pip dependency tree using pipdeptree, some of the direct dependencies are recognized as transitive and missed by the CA scanner. // Our solution for this case is to send all dependencies to the CA scanner. // When thirdPartyApplicabilityScan is true, use flatten graph to include all the dependencies in applicability scanning. @@ -143,6 +206,10 @@ func GetTechDependencyTree(params xrayutils.AuditParams, tech coreutils.Technolo if err != nil { return } + err = utils.SetResolutionRepoIfExists(params, tech) + if err != nil { + return + } var uniqueDeps []string startTime := time.Now() switch tech { diff --git a/xray/commands/audit/scarunner_test.go b/xray/commands/audit/scarunner_test.go index 3e3d198d7..758db9d3c 100644 --- a/xray/commands/audit/scarunner_test.go +++ b/xray/commands/audit/scarunner_test.go @@ -1,9 +1,17 @@ package audit import ( + "os" + "path/filepath" + "sort" + "testing" + + "github.com/jfrog/jfrog-cli-core/v2/utils/coreutils" + xrayutils "github.com/jfrog/jfrog-cli-core/v2/xray/utils" + "github.com/jfrog/jfrog-client-go/utils/io/fileutils" + xrayUtils "github.com/jfrog/jfrog-client-go/xray/services/utils" "github.com/stretchr/testify/assert" - "testing" ) func TestGetDirectDependenciesList(t *testing.T) { @@ -42,3 +50,220 @@ func TestGetDirectDependenciesList(t *testing.T) { assert.ElementsMatch(t, test.expectedResult, result) } } + +func createTestDir(t *testing.T) (directory string, cleanUp func()) { + tmpDir, err := fileutils.CreateTempDir() + assert.NoError(t, err) + + // Temp dir structure: + // tempDir + // ├── dir + // │ ├── maven + // │ │ ├── maven-sub + // │ │ └── maven-sub + // │ ├── npm + // │ └── go + // ├── yarn + // │ ├── Pip + // │ └── Pipenv + // └── Nuget + // ├── Nuget-sub + + dir := createEmptyDir(t, filepath.Join(tmpDir, "dir")) + // Maven + maven := createEmptyDir(t, filepath.Join(dir, "maven")) + createEmptyFile(t, filepath.Join(maven, "pom.xml")) + mavenSub := createEmptyDir(t, filepath.Join(maven, "maven-sub")) + createEmptyFile(t, filepath.Join(mavenSub, "pom.xml")) + mavenSub2 := createEmptyDir(t, filepath.Join(maven, "maven-sub2")) + createEmptyFile(t, filepath.Join(mavenSub2, "pom.xml")) + // Npm + npm := createEmptyDir(t, filepath.Join(dir, "npm")) + createEmptyFile(t, filepath.Join(npm, "package.json")) + createEmptyFile(t, filepath.Join(npm, "package-lock.json")) + // Go + goDir := createEmptyDir(t, filepath.Join(dir, "go")) + createEmptyFile(t, filepath.Join(goDir, "go.mod")) + // Yarn + yarn := createEmptyDir(t, filepath.Join(tmpDir, "yarn")) + createEmptyFile(t, filepath.Join(yarn, "package.json")) + createEmptyFile(t, filepath.Join(yarn, "yarn.lock")) + // Pip + pip := createEmptyDir(t, filepath.Join(yarn, "Pip")) + createEmptyFile(t, filepath.Join(pip, "requirements.txt")) + // Pipenv + pipenv := createEmptyDir(t, filepath.Join(yarn, "Pipenv")) + createEmptyFile(t, filepath.Join(pipenv, "Pipfile")) + createEmptyFile(t, filepath.Join(pipenv, "Pipfile.lock")) + // Nuget + nuget := createEmptyDir(t, filepath.Join(tmpDir, "Nuget")) + createEmptyFile(t, filepath.Join(nuget, "project.sln")) + nugetSub := createEmptyDir(t, filepath.Join(nuget, "Nuget-sub")) + createEmptyFile(t, filepath.Join(nugetSub, "project.csproj")) + + return tmpDir, func() { + assert.NoError(t, fileutils.RemoveTempDir(tmpDir), "Couldn't removeAll: "+tmpDir) + } +} + +func createEmptyDir(t *testing.T, path string) string { + assert.NoError(t, fileutils.CreateDirIfNotExist(path)) + return path +} + +func createEmptyFile(t *testing.T, path string) { + file, err := os.Create(path) + assert.NoError(t, err) + assert.NoError(t, file.Close()) +} + +func TestGetExcludePattern(t *testing.T) { + tests := []struct { + name string + params func() *AuditParams + recursive bool + expected string + }{ + { + name: "Test exclude pattern recursive", + params: func() *AuditParams { + param := NewAuditParams() + param.SetExclusions([]string{"exclude1", "exclude2"}) + return param + }, + recursive: true, + expected: "(^exclude1$)|(^exclude2$)", + }, + { + name: "Test no exclude pattern recursive", + params: NewAuditParams, + recursive: true, + expected: "(^.*node_modules.*$)|(^.*target.*$)|(^.*venv.*$)|(^.*test.*$)", + }, + { + name: "Test exclude pattern not recursive", + params: func() *AuditParams { + param := NewAuditParams() + param.SetExclusions([]string{"exclude1", "exclude2"}) + return param + }, + recursive: false, + expected: "(^exclude1$)|(^exclude2$)", + }, + { + name: "Test no exclude pattern", + params: NewAuditParams, + recursive: false, + expected: "(^.*node_modules.*$)|(^.*target.*$)|(^.*venv.*$)|(^.*test.*$)", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + result := getExcludePattern(test.params(), test.recursive) + assert.Equal(t, test.expected, result) + }) + } +} + +func TestGetScaScansToPreform(t *testing.T) { + + dir, cleanUp := createTestDir(t) + + tests := []struct { + name string + wd string + params func() *AuditParams + expected []*xrayutils.ScaScanResult + }{ + { + name: "Test specific technologies", + wd: dir, + params: func() *AuditParams { + param := NewAuditParams() + param.SetTechnologies([]string{"maven", "npm", "go"}) + return param + }, + expected: []*xrayutils.ScaScanResult{ + { + Technology: coreutils.Maven, + WorkingDirectory: filepath.Join(dir, "dir", "maven"), + Descriptors: []string{ + filepath.Join(dir, "dir", "maven", "pom.xml"), + filepath.Join(dir, "dir", "maven", "maven-sub", "pom.xml"), + filepath.Join(dir, "dir", "maven", "maven-sub2", "pom.xml"), + }, + }, + { + Technology: coreutils.Npm, + WorkingDirectory: filepath.Join(dir, "dir", "npm"), + Descriptors: []string{filepath.Join(dir, "dir", "npm", "package.json")}, + }, + { + Technology: coreutils.Go, + WorkingDirectory: filepath.Join(dir, "dir", "go"), + Descriptors: []string{filepath.Join(dir, "dir", "go", "go.mod")}, + }, + }, + }, + { + name: "Test all", + wd: dir, + params: NewAuditParams, + expected: []*xrayutils.ScaScanResult{ + { + Technology: coreutils.Maven, + WorkingDirectory: filepath.Join(dir, "dir", "maven"), + Descriptors: []string{ + filepath.Join(dir, "dir", "maven", "pom.xml"), + filepath.Join(dir, "dir", "maven", "maven-sub", "pom.xml"), + filepath.Join(dir, "dir", "maven", "maven-sub2", "pom.xml"), + }, + }, + { + Technology: coreutils.Npm, + WorkingDirectory: filepath.Join(dir, "dir", "npm"), + Descriptors: []string{filepath.Join(dir, "dir", "npm", "package.json")}, + }, + { + Technology: coreutils.Go, + WorkingDirectory: filepath.Join(dir, "dir", "go"), + Descriptors: []string{filepath.Join(dir, "dir", "go", "go.mod")}, + }, + { + Technology: coreutils.Yarn, + WorkingDirectory: filepath.Join(dir, "yarn"), + Descriptors: []string{filepath.Join(dir, "yarn", "package.json")}, + }, + { + Technology: coreutils.Pip, + WorkingDirectory: filepath.Join(dir, "yarn", "Pip"), + Descriptors: []string{filepath.Join(dir, "yarn", "Pip", "requirements.txt")}, + }, + { + Technology: coreutils.Pipenv, + WorkingDirectory: filepath.Join(dir, "yarn", "Pipenv"), + Descriptors: []string{filepath.Join(dir, "yarn", "Pipenv", "Pipfile")}, + }, + { + Technology: coreutils.Nuget, + WorkingDirectory: filepath.Join(dir, "Nuget"), + Descriptors: []string{filepath.Join(dir, "Nuget", "project.sln"), filepath.Join(dir, "Nuget", "Nuget-sub", "project.csproj")}, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + result := getScaScansToPreform(test.wd, test.params()) + for i := range result { + sort.Strings(result[i].Descriptors) + sort.Strings(test.expected[i].Descriptors) + } + assert.ElementsMatch(t, test.expected, result) + }) + } + + cleanUp() +} diff --git a/xray/commands/scan/buildscan.go b/xray/commands/scan/buildscan.go index d44a9dcb2..5980f6a84 100644 --- a/xray/commands/scan/buildscan.go +++ b/xray/commands/scan/buildscan.go @@ -126,9 +126,11 @@ func (bsc *BuildScanCommand) runBuildScanAndPrintResults(xrayManager *xray.XrayS XrayDataUrl: buildScanResults.MoreDetailsUrl, }} - extendedScanResults := &xrutils.ExtendedScanResults{XrayResults: scanResponse, XrayVersion: xrayVersion} + scanResults := xrutils.NewAuditResults() + scanResults.XrayVersion = xrayVersion + scanResults.ScaResults = []xrutils.ScaScanResult{{XrayResults: scanResponse}} - resultsPrinter := xrutils.NewResultsWriter(extendedScanResults). + resultsPrinter := xrutils.NewResultsWriter(scanResults). SetOutputFormat(bsc.outputFormat). SetIncludeVulnerabilities(bsc.includeVulnerabilities). SetIncludeLicenses(false). diff --git a/xray/commands/scan/scan.go b/xray/commands/scan/scan.go index 7a291a21c..0747ca48a 100644 --- a/xray/commands/scan/scan.go +++ b/xray/commands/scan/scan.go @@ -241,9 +241,12 @@ func (scanCmd *ScanCommand) Run() (err error) { } scanErrors = appendErrorSlice(scanErrors, fileProducerErrors) scanErrors = appendErrorSlice(scanErrors, indexedFileProducerErrors) - extendedScanResults := &xrutils.ExtendedScanResults{XrayResults: flatResults, XrayVersion: xrayVersion} - if err = xrutils.NewResultsWriter(extendedScanResults). + scanResults := xrutils.NewAuditResults() + scanResults.XrayVersion = xrayVersion + scanResults.ScaResults = []xrutils.ScaScanResult{{XrayResults: flatResults}} + + if err = xrutils.NewResultsWriter(scanResults). SetOutputFormat(scanCmd.outputFormat). SetIncludeVulnerabilities(scanCmd.includeVulnerabilities). SetIncludeLicenses(scanCmd.includeLicenses). @@ -410,7 +413,7 @@ func collectPatternMatchingFiles(fileData spec.File, rootPath string, dataHandle return err } - paths, err := fspatterns.ListFiles(rootPath, recursive, false, false, excludePathPattern) + paths, err := fspatterns.ListFiles(rootPath, recursive, false, false, false, excludePathPattern) if err != nil { return err } diff --git a/xray/utils/analyzermanager.go b/xray/utils/analyzermanager.go index cf894d660..bec032c7e 100644 --- a/xray/utils/analyzermanager.go +++ b/xray/utils/analyzermanager.go @@ -14,8 +14,6 @@ import ( "github.com/jfrog/jfrog-client-go/utils/errorutils" "github.com/jfrog/jfrog-client-go/utils/io/fileutils" "github.com/jfrog/jfrog-client-go/utils/log" - "github.com/jfrog/jfrog-client-go/xray/services" - "github.com/owenrumney/go-sarif/v2/sarif" ) const ( @@ -78,22 +76,6 @@ var exitCodeErrorsMap = map[int]string{ unsupportedOsExitCode: "got unsupported operating system error from analyzer manager", } -type ExtendedScanResults struct { - XrayResults []services.ScanResponse - XrayVersion string - ScannedTechnologies []coreutils.Technology - - ApplicabilityScanResults []*sarif.Run - SecretsScanResults []*sarif.Run - IacScanResults []*sarif.Run - SastScanResults []*sarif.Run - EntitledForJas bool -} - -func (e *ExtendedScanResults) getXrayScanResults() []services.ScanResponse { - return e.XrayResults -} - type AnalyzerManager struct { AnalyzerManagerFullPath string MultiScanId string diff --git a/xray/utils/auditbasicparams.go b/xray/utils/auditbasicparams.go index e5c739517..997c09006 100644 --- a/xray/utils/auditbasicparams.go +++ b/xray/utils/auditbasicparams.go @@ -23,6 +23,7 @@ type AuditParams interface { Progress() ioUtils.ProgressMgr SetProgress(progress ioUtils.ProgressMgr) Args() []string + InstallCommandArgs() []string SetNpmScope(depType string) *AuditBasicParams OutputFormat() OutputFormat DepsRepo() string @@ -44,6 +45,7 @@ type AuditBasicParams struct { args []string depsRepo string ignoreConfigFile bool + installCommandArgs []string } func (abp *AuditBasicParams) DirectDependencies() []string { @@ -64,6 +66,10 @@ func (abp *AuditBasicParams) SetServerDetails(serverDetails *config.ServerDetail return abp } +func (abp *AuditBasicParams) SetInstallCommandArgs(installCommandArgs []string) *AuditBasicParams { + abp.installCommandArgs = installCommandArgs + return abp +} func (abp *AuditBasicParams) PipRequirementsFile() string { return abp.pipRequirementsFile } @@ -121,6 +127,10 @@ func (abp *AuditBasicParams) Args() []string { return abp.args } +func (abp *AuditBasicParams) InstallCommandArgs() []string { + return abp.installCommandArgs +} + func (abp *AuditBasicParams) SetNpmScope(depType string) *AuditBasicParams { switch depType { case "devOnly": diff --git a/xray/utils/results.go b/xray/utils/results.go new file mode 100644 index 000000000..c298ab54f --- /dev/null +++ b/xray/utils/results.go @@ -0,0 +1,100 @@ +package utils + +import ( + "github.com/jfrog/gofrog/datastructures" + "github.com/jfrog/jfrog-cli-core/v2/utils/coreutils" + "github.com/jfrog/jfrog-client-go/xray/services" + "github.com/owenrumney/go-sarif/v2/sarif" +) + +type Results struct { + ScaResults []ScaScanResult + XrayVersion string + ScaError error + + ExtendedScanResults *ExtendedScanResults + JasError error +} + +func NewAuditResults() *Results { + return &Results{ExtendedScanResults: &ExtendedScanResults{}} +} + +func (r *Results) GetScaScansXrayResults() (results []services.ScanResponse) { + for _, scaResult := range r.ScaResults { + results = append(results, scaResult.XrayResults...) + } + return +} + +func (r *Results) GetScaScannedTechnologies() []coreutils.Technology { + technologies := datastructures.MakeSet[coreutils.Technology]() + for _, scaResult := range r.ScaResults { + technologies.Add(scaResult.Technology) + } + return technologies.ToSlice() +} + +func (r *Results) IsMultipleProject() bool { + if len(r.ScaResults) == 0 { + return false + } + if len(r.ScaResults) == 1 { + if r.ScaResults[0].IsMultipleRootProject == nil { + return false + } + return *r.ScaResults[0].IsMultipleRootProject + } + return true +} + +func (r *Results) IsScaIssuesFound() bool { + for _, scan := range r.ScaResults { + if scan.HasInformation() { + return true + } + } + return false +} + +func (r *Results) IsIssuesFound() bool { + if r.IsScaIssuesFound() { + return true + } + if r.ExtendedScanResults.IsIssuesFound() { + return true + } + return false +} + +type ScaScanResult struct { + Technology coreutils.Technology `json:"Technology"` + WorkingDirectory string `json:"WorkingDirectory"` + XrayResults []services.ScanResponse `json:"XrayResults,omitempty"` + Descriptors []string `json:"Descriptors,omitempty"` + IsMultipleRootProject *bool `json:"IsMultipleRootProject,omitempty"` +} + +func (s ScaScanResult) HasInformation() bool { + for _, scan := range s.XrayResults { + if len(scan.Vulnerabilities) > 0 || len(scan.Violations) > 0 || len(scan.Licenses) > 0 { + return true + } + } + return false +} + +type ExtendedScanResults struct { + ApplicabilityScanResults []*sarif.Run + SecretsScanResults []*sarif.Run + IacScanResults []*sarif.Run + SastScanResults []*sarif.Run + EntitledForJas bool +} + +func (e *ExtendedScanResults) IsIssuesFound() bool { + return GetResultsLocationCount(e.ApplicabilityScanResults...) > 0 || + GetResultsLocationCount(e.SecretsScanResults...) > 0 || + GetResultsLocationCount(e.IacScanResults...) > 0 || + GetResultsLocationCount(e.SastScanResults...) > 0 +} diff --git a/xray/utils/resultstable.go b/xray/utils/resultstable.go index 0b308b9ff..0586bf680 100644 --- a/xray/utils/resultstable.go +++ b/xray/utils/resultstable.go @@ -37,8 +37,8 @@ const ( // In case one (or more) of the violations contains the field FailBuild set to true, CliError with exit code 3 will be returned. // Set printExtended to true to print fields with 'extended' tag. // If the scan argument is set to true, print the scan tables. -func PrintViolationsTable(violations []services.Violation, extendedResults *ExtendedScanResults, multipleRoots, printExtended bool, scanType services.ScanType) error { - securityViolationsRows, licenseViolationsRows, operationalRiskViolationsRows, err := prepareViolations(violations, extendedResults, multipleRoots, true, true) +func PrintViolationsTable(violations []services.Violation, results *Results, multipleRoots, printExtended bool, scanType services.ScanType) error { + securityViolationsRows, licenseViolationsRows, operationalRiskViolationsRows, err := prepareViolations(violations, results, multipleRoots, true, true) if err != nil { return err } @@ -72,11 +72,11 @@ func PrintViolationsTable(violations []services.Violation, extendedResults *Exte } // Prepare violations for all non-table formats (without style or emoji) -func PrepareViolations(violations []services.Violation, extendedResults *ExtendedScanResults, multipleRoots, simplifiedOutput bool) ([]formats.VulnerabilityOrViolationRow, []formats.LicenseRow, []formats.OperationalRiskViolationRow, error) { - return prepareViolations(violations, extendedResults, multipleRoots, false, simplifiedOutput) +func PrepareViolations(violations []services.Violation, results *Results, multipleRoots, simplifiedOutput bool) ([]formats.VulnerabilityOrViolationRow, []formats.LicenseRow, []formats.OperationalRiskViolationRow, error) { + return prepareViolations(violations, results, multipleRoots, false, simplifiedOutput) } -func prepareViolations(violations []services.Violation, extendedResults *ExtendedScanResults, multipleRoots, isTable, simplifiedOutput bool) ([]formats.VulnerabilityOrViolationRow, []formats.LicenseRow, []formats.OperationalRiskViolationRow, error) { +func prepareViolations(violations []services.Violation, results *Results, multipleRoots, isTable, simplifiedOutput bool) ([]formats.VulnerabilityOrViolationRow, []formats.LicenseRow, []formats.OperationalRiskViolationRow, error) { if simplifiedOutput { violations = simplifyViolations(violations, multipleRoots) } @@ -91,12 +91,12 @@ func prepareViolations(violations []services.Violation, extendedResults *Extende switch violation.ViolationType { case "security": cves := convertCves(violation.Cves) - if extendedResults.EntitledForJas { + if results.ExtendedScanResults.EntitledForJas { for i := range cves { - cves[i].Applicability = getCveApplicabilityField(cves[i], extendedResults.ApplicabilityScanResults, violation.Components) + cves[i].Applicability = getCveApplicabilityField(cves[i], results.ExtendedScanResults.ApplicabilityScanResults, violation.Components) } } - applicabilityStatus := getApplicableCveStatus(extendedResults.EntitledForJas, extendedResults.ApplicabilityScanResults, cves) + applicabilityStatus := getApplicableCveStatus(results.ExtendedScanResults.EntitledForJas, results.ExtendedScanResults.ApplicabilityScanResults, cves) currSeverity := GetSeverity(violation.Severity, applicabilityStatus) jfrogResearchInfo := convertJfrogResearchInformation(violation.ExtendedInformation) for compIndex := 0; compIndex < len(impactedPackagesNames); compIndex++ { @@ -182,8 +182,8 @@ func prepareViolations(violations []services.Violation, extendedResults *Extende // In case multipleRoots is true, the field Component will show the root of each impact path, otherwise it will show the root's child. // Set printExtended to true to print fields with 'extended' tag. // If the scan argument is set to true, print the scan tables. -func PrintVulnerabilitiesTable(vulnerabilities []services.Vulnerability, extendedResults *ExtendedScanResults, multipleRoots, printExtended bool, scanType services.ScanType) error { - vulnerabilitiesRows, err := prepareVulnerabilities(vulnerabilities, extendedResults, multipleRoots, true, true) +func PrintVulnerabilitiesTable(vulnerabilities []services.Vulnerability, results *Results, multipleRoots, printExtended bool, scanType services.ScanType) error { + vulnerabilitiesRows, err := prepareVulnerabilities(vulnerabilities, results, multipleRoots, true, true) if err != nil { return err } @@ -192,7 +192,7 @@ func PrintVulnerabilitiesTable(vulnerabilities []services.Vulnerability, extende return coreutils.PrintTable(formats.ConvertToVulnerabilityScanTableRow(vulnerabilitiesRows), "Vulnerable Components", "✨ No vulnerable components were found ✨", printExtended) } var emptyTableMessage string - if len(extendedResults.ScannedTechnologies) > 0 { + if len(results.ScaResults) > 0 { emptyTableMessage = "✨ No vulnerable dependencies were found ✨" } else { emptyTableMessage = coreutils.PrintYellow("🔧 Couldn't determine a package manager or build tool used by this project 🔧") @@ -201,11 +201,11 @@ func PrintVulnerabilitiesTable(vulnerabilities []services.Vulnerability, extende } // Prepare vulnerabilities for all non-table formats (without style or emoji) -func PrepareVulnerabilities(vulnerabilities []services.Vulnerability, extendedResults *ExtendedScanResults, multipleRoots, simplifiedOutput bool) ([]formats.VulnerabilityOrViolationRow, error) { - return prepareVulnerabilities(vulnerabilities, extendedResults, multipleRoots, false, simplifiedOutput) +func PrepareVulnerabilities(vulnerabilities []services.Vulnerability, results *Results, multipleRoots, simplifiedOutput bool) ([]formats.VulnerabilityOrViolationRow, error) { + return prepareVulnerabilities(vulnerabilities, results, multipleRoots, false, simplifiedOutput) } -func prepareVulnerabilities(vulnerabilities []services.Vulnerability, extendedResults *ExtendedScanResults, multipleRoots, isTable, simplifiedOutput bool) ([]formats.VulnerabilityOrViolationRow, error) { +func prepareVulnerabilities(vulnerabilities []services.Vulnerability, results *Results, multipleRoots, isTable, simplifiedOutput bool) ([]formats.VulnerabilityOrViolationRow, error) { if simplifiedOutput { vulnerabilities = simplifyVulnerabilities(vulnerabilities, multipleRoots) } @@ -216,12 +216,12 @@ func prepareVulnerabilities(vulnerabilities []services.Vulnerability, extendedRe return nil, err } cves := convertCves(vulnerability.Cves) - if extendedResults.EntitledForJas { + if results.ExtendedScanResults.EntitledForJas { for i := range cves { - cves[i].Applicability = getCveApplicabilityField(cves[i], extendedResults.ApplicabilityScanResults, vulnerability.Components) + cves[i].Applicability = getCveApplicabilityField(cves[i], results.ExtendedScanResults.ApplicabilityScanResults, vulnerability.Components) } } - applicabilityStatus := getApplicableCveStatus(extendedResults.EntitledForJas, extendedResults.ApplicabilityScanResults, cves) + applicabilityStatus := getApplicableCveStatus(results.ExtendedScanResults.EntitledForJas, results.ExtendedScanResults.ApplicabilityScanResults, cves) currSeverity := GetSeverity(vulnerability.Severity, applicabilityStatus) jfrogResearchInfo := convertJfrogResearchInformation(vulnerability.ExtendedInformation) for compIndex := 0; compIndex < len(impactedPackagesNames); compIndex++ { diff --git a/xray/utils/resultstable_test.go b/xray/utils/resultstable_test.go index d0589fe24..14d315b00 100644 --- a/xray/utils/resultstable_test.go +++ b/xray/utils/resultstable_test.go @@ -25,7 +25,7 @@ func TestPrintViolationsTable(t *testing.T) { } for _, test := range tests { - err := PrintViolationsTable(test.violations, &ExtendedScanResults{}, false, true, services.Binary) + err := PrintViolationsTable(test.violations, NewAuditResults(), false, true, services.Binary) assert.NoError(t, err) if CheckIfFailBuild([]services.ScanResponse{{Violations: test.violations}}) { err = NewFailBuildError() diff --git a/xray/utils/resultwriter.go b/xray/utils/resultwriter.go index 35a8fafd4..5d7702841 100644 --- a/xray/utils/resultwriter.go +++ b/xray/utils/resultwriter.go @@ -38,7 +38,7 @@ var CurationOutputFormats = []string{string(Table), string(Json)} type ResultsWriter struct { // The scan results. - results *ExtendedScanResults + results *Results // SimpleJsonError Errors to be added to output of the SimpleJson format. simpleJsonError []formats.SimpleJsonError // Format The output format. @@ -57,8 +57,8 @@ type ResultsWriter struct { messages []string } -func NewResultsWriter(extendedScanResults *ExtendedScanResults) *ResultsWriter { - return &ResultsWriter{results: extendedScanResults} +func NewResultsWriter(scanResults *Results) *ResultsWriter { + return &ResultsWriter{results: scanResults} } func (rw *ResultsWriter) SetOutputFormat(format OutputFormat) *ResultsWriter { @@ -115,7 +115,7 @@ func (rw *ResultsWriter) PrintScanResults() error { } return PrintJson(jsonTable) case Json: - return PrintJson(rw.results.getXrayScanResults()) + return PrintJson(rw.results.GetScaScansXrayResults()) case Sarif: sarifReport, err := GenereateSarifReportFromResults(rw.results, rw.isMultipleRoots, rw.includeLicenses) if err != nil { @@ -131,8 +131,8 @@ func (rw *ResultsWriter) PrintScanResults() error { } func (rw *ResultsWriter) printScanResultsTables() (err error) { printMessages(rw.messages) - violations, vulnerabilities, licenses := SplitScanResults(rw.results.getXrayScanResults()) - if len(rw.results.getXrayScanResults()) > 0 { + violations, vulnerabilities, licenses := SplitScanResults(rw.results.ScaResults) + if rw.results.IsIssuesFound() { var resultsPath string if resultsPath, err = writeJsonResults(rw.results); err != nil { return @@ -153,13 +153,13 @@ func (rw *ResultsWriter) printScanResultsTables() (err error) { return } } - if err = PrintSecretsTable(rw.results.SecretsScanResults, rw.results.EntitledForJas); err != nil { + if err = PrintSecretsTable(rw.results.ExtendedScanResults.SecretsScanResults, rw.results.ExtendedScanResults.EntitledForJas); err != nil { return } - if err = PrintIacTable(rw.results.IacScanResults, rw.results.EntitledForJas); err != nil { + if err = PrintIacTable(rw.results.ExtendedScanResults.IacScanResults, rw.results.ExtendedScanResults.EntitledForJas); err != nil { return } - return PrintSastTable(rw.results.SastScanResults, rw.results.EntitledForJas) + return PrintSastTable(rw.results.ExtendedScanResults.SastScanResults, rw.results.ExtendedScanResults.EntitledForJas) } func printMessages(messages []string) { @@ -175,21 +175,21 @@ func printMessage(message string) { log.Output("💬" + message) } -func GenereateSarifReportFromResults(extendedResults *ExtendedScanResults, isMultipleRoots, includeLicenses bool) (report *sarif.Report, err error) { +func GenereateSarifReportFromResults(results *Results, isMultipleRoots, includeLicenses bool) (report *sarif.Report, err error) { report, err = NewReport() if err != nil { return } - xrayRun, err := convertXrayResponsesToSarifRun(extendedResults, isMultipleRoots, includeLicenses) + xrayRun, err := convertXrayResponsesToSarifRun(results, isMultipleRoots, includeLicenses) if err != nil { return } report.Runs = append(report.Runs, xrayRun) - report.Runs = append(report.Runs, extendedResults.ApplicabilityScanResults...) - report.Runs = append(report.Runs, extendedResults.IacScanResults...) - report.Runs = append(report.Runs, extendedResults.SecretsScanResults...) - report.Runs = append(report.Runs, extendedResults.SastScanResults...) + report.Runs = append(report.Runs, results.ExtendedScanResults.ApplicabilityScanResults...) + report.Runs = append(report.Runs, results.ExtendedScanResults.IacScanResults...) + report.Runs = append(report.Runs, results.ExtendedScanResults.SecretsScanResults...) + report.Runs = append(report.Runs, results.ExtendedScanResults.SastScanResults...) return } @@ -202,13 +202,13 @@ func ConvertSarifReportToString(report *sarif.Report) (sarifStr string, err erro return clientUtils.IndentJson(out), nil } -func convertXrayResponsesToSarifRun(extendedResults *ExtendedScanResults, isMultipleRoots, includeLicenses bool) (run *sarif.Run, err error) { - xrayJson, err := convertXrayScanToSimpleJson(extendedResults, isMultipleRoots, includeLicenses, true) +func convertXrayResponsesToSarifRun(results *Results, isMultipleRoots, includeLicenses bool) (run *sarif.Run, err error) { + xrayJson, err := convertXrayScanToSimpleJson(results, isMultipleRoots, includeLicenses, true) if err != nil { return } xrayRun := sarif.NewRunWithInformationURI("JFrog Xray SCA", BaseDocumentationURL+"sca") - xrayRun.Tool.Driver.Version = &extendedResults.XrayVersion + xrayRun.Tool.Driver.Version = &results.XrayVersion if len(xrayJson.Vulnerabilities) > 0 || len(xrayJson.SecurityViolations) > 0 { if err = extractXrayIssuesToSarifRun(xrayRun, xrayJson); err != nil { return @@ -351,18 +351,18 @@ func addXrayRule(ruleId, ruleDescription, maxCveScore, summary, markdownDescript }) } -func convertXrayScanToSimpleJson(extendedResults *ExtendedScanResults, isMultipleRoots, includeLicenses, simplifiedOutput bool) (formats.SimpleJsonResults, error) { - violations, vulnerabilities, licenses := SplitScanResults(extendedResults.XrayResults) +func convertXrayScanToSimpleJson(results *Results, isMultipleRoots, includeLicenses, simplifiedOutput bool) (formats.SimpleJsonResults, error) { + violations, vulnerabilities, licenses := SplitScanResults(results.ScaResults) jsonTable := formats.SimpleJsonResults{} if len(vulnerabilities) > 0 { - vulJsonTable, err := PrepareVulnerabilities(vulnerabilities, extendedResults, isMultipleRoots, simplifiedOutput) + vulJsonTable, err := PrepareVulnerabilities(vulnerabilities, results, isMultipleRoots, simplifiedOutput) if err != nil { return formats.SimpleJsonResults{}, err } jsonTable.Vulnerabilities = vulJsonTable } if len(violations) > 0 { - secViolationsJsonTable, licViolationsJsonTable, opRiskViolationsJsonTable, err := PrepareViolations(violations, extendedResults, isMultipleRoots, simplifiedOutput) + secViolationsJsonTable, licViolationsJsonTable, opRiskViolationsJsonTable, err := PrepareViolations(violations, results, isMultipleRoots, simplifiedOutput) if err != nil { return formats.SimpleJsonResults{}, err } @@ -386,14 +386,14 @@ func (rw *ResultsWriter) convertScanToSimpleJson() (formats.SimpleJsonResults, e if err != nil { return formats.SimpleJsonResults{}, err } - if len(rw.results.SecretsScanResults) > 0 { - jsonTable.Secrets = PrepareSecrets(rw.results.SecretsScanResults) + if len(rw.results.ExtendedScanResults.SecretsScanResults) > 0 { + jsonTable.Secrets = PrepareSecrets(rw.results.ExtendedScanResults.SecretsScanResults) } - if len(rw.results.IacScanResults) > 0 { - jsonTable.Iacs = PrepareIacs(rw.results.IacScanResults) + if len(rw.results.ExtendedScanResults.IacScanResults) > 0 { + jsonTable.Iacs = PrepareIacs(rw.results.ExtendedScanResults.IacScanResults) } - if len(rw.results.SastScanResults) > 0 { - jsonTable.Sast = PrepareSast(rw.results.SastScanResults) + if len(rw.results.ExtendedScanResults.SastScanResults) > 0 { + jsonTable.Sast = PrepareSast(rw.results.ExtendedScanResults.SastScanResults) } jsonTable.Errors = rw.simpleJsonError @@ -483,19 +483,21 @@ func findMaxCVEScore(cves []formats.CveRow) (string, error) { } // Splits scan responses into aggregated lists of violations, vulnerabilities and licenses. -func SplitScanResults(results []services.ScanResponse) ([]services.Violation, []services.Vulnerability, []services.License) { +func SplitScanResults(results []ScaScanResult) ([]services.Violation, []services.Vulnerability, []services.License) { var violations []services.Violation var vulnerabilities []services.Vulnerability var licenses []services.License - for _, result := range results { - violations = append(violations, result.Violations...) - vulnerabilities = append(vulnerabilities, result.Vulnerabilities...) - licenses = append(licenses, result.Licenses...) + for _, scan := range results { + for _, result := range scan.XrayResults { + violations = append(violations, result.Violations...) + vulnerabilities = append(vulnerabilities, result.Vulnerabilities...) + licenses = append(licenses, result.Licenses...) + } } return violations, vulnerabilities, licenses } -func writeJsonResults(results *ExtendedScanResults) (resultsPath string, err error) { +func writeJsonResults(results *Results) (resultsPath string, err error) { out, err := fileutils.CreateTempFile() if errorutils.CheckError(err) != nil { return