From 7bcc943077097125e16d705ba1366139ba4c5871 Mon Sep 17 00:00:00 2001 From: Aswin Shakil Balasubramanian Date: Wed, 18 Dec 2024 11:22:00 -0800 Subject: [PATCH] Revert "Merge branch 'master' into HDDS-10239-container-reconciliation" This reverts commit 01b44374345fa5fa8d2737390709f41e2b6d21ec. Conflicts: hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java --- .github/workflows/ci.yml | 2 +- .mvn/extensions.xml | 2 +- NOTICE.txt | 2 +- dev-support/ci/selective_ci_checks.bats | 12 - dev-support/ci/selective_ci_checks.sh | 1 - dev-support/pom.xml | 98 - .../hadoop/hdds/scm/OzoneClientConfig.java | 73 +- .../hdds/scm/client/ClientTrustManager.java | 0 .../scm/storage/AbstractCommitWatcher.java | 1 + .../scm/storage/BlockDataStreamOutput.java | 1 + .../hdds/scm/storage/BlockOutputStream.java | 123 +- .../hdds/scm/storage/ECBlockOutputStream.java | 34 +- .../hdds/scm/storage/ExtendedInputStream.java | 1 + .../scm/storage/RatisBlockOutputStream.java | 4 +- .../client/io/BlockInputStreamFactory.java | 1 + .../io/BlockInputStreamFactoryImpl.java | 1 + .../ozone/client/io/ECBlockInputStream.java | 5 + .../client/io/ECBlockInputStreamFactory.java | 1 + .../io/ECBlockInputStreamFactoryImpl.java | 1 + ...ECBlockReconstructedStripeInputStream.java | 2 +- .../hdds/scm/TestOzoneClientConfig.java | 40 - .../org/apache/hadoop/hdds/HddsUtils.java | 5 +- .../org/apache/hadoop/hdds/JavaUtils.java | 14 +- .../hdds/annotation/InterfaceStability.java | 6 +- .../hadoop/hdds/client/DecommissionUtils.java | 2 + .../hadoop/hdds/recon/ReconConfigKeys.java | 6 +- .../org/apache/hadoop/hdds/scm/ScmConfig.java | 3 +- .../apache/hadoop/hdds/scm/ScmConfigKeys.java | 4 - .../hadoop/hdds/scm/client/ScmClient.java | 3 +- .../container/ReplicationManagerReport.java | 2 + .../hadoop/hdds/scm/ha/SCMNodeInfo.java | 1 + .../hadoop/hdds/scm/net/InnerNodeImpl.java | 5 +- .../hdds/scm/net/NetworkTopologyImpl.java | 6 - .../hadoop/hdds/scm/pipeline/Pipeline.java | 1 + .../StorageContainerLocationProtocol.java | 2 +- .../certificate/client/CertificateClient.java | 40 +- .../hadoop/hdds/server/ServerUtils.java | 2 +- .../hadoop/hdds/utils/LeakDetector.java | 13 +- .../hadoop/hdds/utils/db/DelegatedCodec.java | 8 +- .../apache/hadoop/ozone/OzoneConfigKeys.java | 12 +- .../org/apache/hadoop/ozone/OzoneConsts.java | 63 +- .../hadoop/ozone/OzoneManagerVersion.java | 2 - .../ozone/common/ChecksumByteBufferImpl.java | 14 +- .../hadoop/ozone/common/ChunkBuffer.java | 8 +- .../container/common/helpers/BlockData.java | 11 - .../common/helpers/ChunkInfoList.java | 4 +- .../upgrade/LayoutVersionInstanceFactory.java | 14 +- .../ozone/upgrade/LayoutVersionManager.java | 1 + .../ozone/upgrade/UpgradeFinalizer.java | 12 +- .../ozone/upgrade/VersionFactoryKey.java | 2 +- .../src/main/resources/ozone-default.xml | 45 +- .../hadoop/hdds/conf/ConfigurationSource.java | 2 +- .../checksum/DNContainerOperationClient.java | 8 +- .../checksum/ReconcileContainerTask.java | 10 - .../container/common/impl/ContainerSet.java | 1 + .../container/common/impl/HddsDispatcher.java | 2 +- .../common/impl/OpenContainerBlockMap.java | 2 +- ...ntainerDeletionChoosingPolicyTemplate.java | 2 +- .../interfaces/ContainerDispatcher.java | 1 + .../statemachine/DatanodeStateMachine.java | 1 + .../CloseContainerCommandHandler.java | 15 +- .../commandhandler/CommandDispatcher.java | 17 +- .../DeleteBlocksCommandHandler.java | 4 +- .../DeleteContainerCommandHandler.java | 17 +- .../endpoint/HeartbeatEndpointTask.java | 2 +- .../states/endpoint/RegisterEndpointTask.java | 2 +- .../server/ratis/ContainerStateMachine.java | 18 +- .../server/ratis/XceiverServerRatis.java | 88 +- .../container/common/volume/HddsVolume.java | 2 +- .../container/common/volume/VolumeInfo.java | 17 +- .../container/common/volume/VolumeUsage.java | 6 +- .../ECContainerOperationClient.java | 20 +- .../ECReconstructionCoordinator.java | 15 +- .../ECReconstructionCoordinatorTask.java | 10 - .../container/keyvalue/KeyValueContainer.java | 1 + .../keyvalue/KeyValueContainerData.java | 2 + .../container/keyvalue/KeyValueHandler.java | 14 +- .../keyvalue/helpers/BlockUtils.java | 1 + .../keyvalue/helpers/ChunkUtils.java | 70 +- .../helpers/KeyValueContainerUtil.java | 5 +- .../keyvalue/impl/BlockManagerImpl.java | 9 - .../keyvalue/impl/ChunkManagerFactory.java | 1 + .../keyvalue/impl/FilePerBlockStrategy.java | 14 +- .../keyvalue/impl/FilePerChunkStrategy.java | 12 +- .../keyvalue/impl/MappedBufferManager.java | 122 - .../keyvalue/impl/StreamDataChannelBase.java | 4 +- .../keyvalue/interfaces/BlockManager.java | 3 - .../metadata/AbstractDatanodeStore.java | 8 +- .../metadata/SchemaOneChunkInfoListCodec.java | 3 +- .../ozoneimpl/ContainerController.java | 1 + .../container/ozoneimpl/OzoneContainer.java | 4 +- .../replication/AbstractReplicationTask.java | 4 - .../replication/ReplicationSupervisor.java | 89 +- .../ReplicationSupervisorMetrics.java | 33 +- .../replication/ReplicationTask.java | 10 - .../container/stream/StreamingSource.java | 4 +- .../ReconstructECContainersCommand.java | 4 +- ...atanodeProtocolClientSideTranslatorPB.java | 1 + .../TestCloseContainerCommandHandler.java | 27 - .../TestDeleteContainerCommandHandler.java | 59 +- .../keyvalue/helpers/TestChunkUtils.java | 4 +- .../impl/AbstractTestChunkManager.java | 56 - .../impl/CommonChunkManagerTestCases.java | 25 - .../impl/TestMappedBufferManager.java | 46 - .../TestReplicationSupervisor.java | 221 +- ...estReconstructionECContainersCommands.java | 13 +- .../dev-support/checkstyle/checkstyle.xml | 1 - hadoop-hdds/docs/content/concept/Datanodes.md | 12 - .../docs/content/concept/Datanodes.zh.md | 12 - .../hdds/conf/DatanodeRatisGrpcConfig.java | 17 + .../hdds/protocol/SCMSecurityProtocol.java | 1 + ...ecurityProtocolClientSideTranslatorPB.java | 1 + ...ocationProtocolClientSideTranslatorPB.java | 2 +- .../hdds/security/token/TokenVerifier.java | 3 +- .../authority/DefaultApprover.java | 2 +- .../authority/DefaultCAServer.java | 2 + .../client/DefaultCertificateClient.java | 57 +- .../hadoop/hdds/server/http/HttpServer2.java | 4 +- .../hdds/server/http/ProfileServlet.java | 8 +- .../hdds/server/http/PrometheusServlet.java | 3 + .../org/apache/hadoop/hdds/utils/HAUtils.java | 116 +- .../hadoop/hdds/utils/HddsServerUtil.java | 4 +- .../hadoop/hdds/utils/TransactionInfo.java | 11 +- .../apache/hadoop/hdds/utils/db/DBStore.java | 2 +- .../hdds/utils/db/RDBCheckpointUtils.java | 2 +- .../hadoop/hdds/utils/db/RocksDatabase.java | 2 +- .../apache/hadoop/hdds/utils/db/Table.java | 30 +- .../hadoop/ozone/audit/package-info.java | 2 +- .../main/resources/webapps/static/ozone.css | 4 - .../webapps/static/templates/jvm.html | 2 +- .../client/CertificateClientTestImpl.java | 22 +- .../src/main/proto/hdds.proto | 1 - hadoop-hdds/pom.xml | 24 - .../hdds/utils/NativeLibraryLoader.java | 13 +- .../hadoop/hdds/scm/PlacementPolicy.java | 6 +- .../hdds/scm/SCMCommonPlacementPolicy.java | 14 +- .../hdds/scm/block/DeletedBlockLog.java | 2 +- .../hdds/scm/container/ContainerManager.java | 9 +- .../balancer/ContainerBalancerTask.java | 17 +- .../placement/metrics/SCMNodeMetric.java | 3 +- .../replication/ContainerHealthResult.java | 1 + .../ContainerReplicaPendingOps.java | 8 +- .../RatisContainerReplicaCount.java | 6 +- .../replication/health/HealthCheck.java | 1 + .../container/states/ContainerAttribute.java | 8 +- .../hadoop/hdds/scm/ha/HASecurityUtils.java | 3 +- .../hadoop/hdds/scm/ha/SCMHAManager.java | 1 + .../hadoop/hdds/scm/node/DatanodeInfo.java | 2 +- .../hdds/scm/node/DatanodeUsageInfo.java | 11 - .../hadoop/hdds/scm/node/NodeManager.java | 2 + .../hdds/scm/node/NodeManagerMXBean.java | 2 +- .../hdds/scm/node/NodeStateManager.java | 4 +- .../hadoop/hdds/scm/node/SCMNodeManager.java | 29 +- .../scm/pipeline/PipelineStateManager.java | 1 + .../scm/security/RootCARotationManager.java | 8 +- .../SCMDatanodeHeartbeatDispatcher.java | 4 +- .../scm/server/SCMDatanodeProtocolServer.java | 1 + .../hdds/scm/server/SCMHTTPServerConfig.java | 7 +- .../hadoop/hdds/scm/server/SCMMXBean.java | 3 +- .../scm/server/SCMSecurityProtocolServer.java | 1 + .../scm/server/StorageContainerManager.java | 60 +- .../resources/webapps/scm/scm-overview.html | 147 +- .../src/main/resources/webapps/scm/scm.js | 75 - .../scm/container/balancer/MockedSCM.java | 2 +- .../TestContainerBalancerStatusInfo.java | 28 - .../apache/ozone/test/GenericTestUtils.java | 12 +- .../apache/ozone/test/LambdaTestUtils.java | 4 - .../scm/cli/ContainerOperationClient.java | 2 +- .../scm/cli/datanode/UsageInfoSubcommand.java | 9 - .../cli/pipeline/ClosePipelineSubcommand.java | 18 +- .../cli/datanode/TestUsageInfoSubcommand.java | 3 - .../TestClosePipelinesSubCommand.java | 178 - .../hadoop/ozone/client/ObjectStore.java | 2 +- .../ozone/client/OzoneClientFactory.java | 2 +- .../client/io/KeyOutputStreamSemaphore.java | 2 +- .../ozone/client/protocol/ClientProtocol.java | 34 + .../hadoop/ozone/client/rpc/RpcClient.java | 64 +- ...tBlockOutputStreamIncrementalPutBlock.java | 2 - .../java/org/apache/hadoop/ozone/OFSPath.java | 2 +- .../java/org/apache/hadoop/ozone/OmUtils.java | 16 +- .../ozone/client/io/SelectorOutputStream.java | 2 +- .../apache/hadoop/ozone/om/OMConfigKeys.java | 5 - .../hadoop/ozone/om/S3SecretManager.java | 2 +- .../exceptions/OMLeaderNotReadyException.java | 2 +- .../hadoop/ozone/om/helpers/OmKeyInfo.java | 5 +- .../hadoop/ozone/om/helpers/OzoneFSUtils.java | 33 - .../om/helpers/OzoneIdentityProvider.java | 3 +- .../ozone/om/helpers/RepeatedOmKeyInfo.java | 6 +- .../hadoop/ozone/om/helpers/ServiceInfo.java | 46 +- .../hadoop/ozone/om/helpers/SnapshotInfo.java | 59 +- .../om/multitenant/AccountNameSpace.java | 4 +- .../ozone/om/multitenant/BucketNameSpace.java | 4 +- .../om/protocol/OzoneManagerProtocol.java | 50 +- ...ManagerProtocolClientSideTranslatorPB.java | 123 +- .../hadoop/ozone/protocolPB/OMPBHelper.java | 12 +- .../security/acl/OzoneAccessAuthorizer.java | 9 +- .../ozone/security/acl/OzoneObjInfo.java | 2 +- .../ozone/om/helpers/TestOzoneFsUtils.java | 30 - .../dist/src/main/assemblies/ozone-src.xml | 7 - .../dashboards/Ozone - ReadKey Metrics.json | 3180 +++++++---------- .../dist/src/main/compose/common/s3a-test.sh | 3 +- .../main/compose/compatibility/docker-config | 1 - .../main/compose/ozonesecure-ha/docker-config | 1 - .../dist/src/main/compose/test-all.sh | 2 +- .../compose/upgrade/compose/ha/docker-config | 1 - .../upgrade/compose/non-ha/docker-config | 1 - .../upgrade/compose/om-ha/docker-config | 1 - .../dist/src/main/compose/upgrade/testlib.sh | 4 +- .../callbacks/1.5.0/callback.sh | 31 - .../dist/src/main/compose/xcompat/.env | 2 - .../src/main/compose/xcompat/clients.yaml | 22 +- .../src/main/compose/xcompat/docker-config | 94 +- .../dist/src/main/compose/xcompat/krb5.conf | 41 - .../src/main/compose/xcompat/new-cluster.yaml | 33 +- .../src/main/compose/xcompat/old-cluster.yaml | 33 +- .../dist/src/main/compose/xcompat/test.sh | 29 +- .../dist/src/main/license/bin/LICENSE.txt | 1 - .../dist/src/main/license/jar-report.txt | 1 - .../src/main/smoketest/compatibility/om.robot | 5 - .../main/smoketest/compatibility/read.robot | 4 - .../smoketest/debug/ozone-debug-ldb.robot | 93 - .../debug/ozone-debug-lease-recovery.robot | 22 +- .../dist/src/main/smoketest/freon/hsync.robot | 51 - .../smoketest/hsync/upgrade-hsync-check.robot | 68 - .../src/main/smoketest/ozone-lib/freon.robot | 6 - .../smoketest/ozone-lib/shell_tests.robot | 8 - .../main/smoketest/s3/MultipartUpload.robot | 5 - hadoop-ozone/dist/src/shell/ozone/ozone | 5 - .../fs/ozone/AbstractOzoneFileSystemTest.java | 2 - .../AbstractRootedOzoneFileSystemTest.java | 2 - .../TestDirectoryDeletingServiceWithFSO.java | 140 - .../org/apache/hadoop/fs/ozone/TestHSync.java | 30 - .../hadoop/fs/ozone/TestHSyncUpgrade.java | 2 - .../hadoop/fs/ozone/TestLeaseRecovery.java | 2 - .../contract/AbstractOzoneContractTest.java | 3 - .../pipeline/TestPipelineManagerMXBean.java | 6 +- .../scm/storage/TestContainerCommandsEC.java | 104 +- .../ozone/TestOzoneConfigurationFields.java | 1 - .../rpc/AbstractTestECKeyOutputStream.java | 2 - .../client/rpc/TestBlockOutputStream.java | 6 +- .../TestBlockOutputStreamWithFailures.java | 18 +- .../client/rpc/TestSecureOzoneRpcClient.java | 2 - .../apache/hadoop/ozone/debug/TestLDBCli.java | 12 - .../ozone/debug/TestLeaseRecoverer.java | 2 - .../apache/hadoop/ozone/om/TestOmMetrics.java | 19 +- .../om/service/TestRangerBGSyncService.java | 3 +- .../ozone/om/snapshot/TestOmSnapshot.java | 12 +- .../snapshot/TestOzoneManagerHASnapshot.java | 104 - ....java => TestSnapshotDeletingService.java} | 383 +- .../TestSnapshotDirectoryCleaningService.java | 2 - .../hadoop/ozone/recon/TestReconTasks.java | 44 +- .../ozone/shell/TestOzoneRepairShell.java | 30 - .../hadoop/ozone/shell/TestOzoneShellHA.java | 5 - .../ozone/shell/TestOzoneShellHAWithFSO.java | 2 - .../src/test/resources/ozone-site.xml | 2 +- .../src/main/proto/OmClientProtocol.proto | 86 +- .../hadoop/ozone/om/OMMetadataManager.java | 48 +- .../hadoop/ozone/om/BucketManagerImpl.java | 2 +- .../ozone/om/GrpcOzoneManagerServer.java | 2 +- .../apache/hadoop/ozone/om/KeyManager.java | 68 +- .../hadoop/ozone/om/KeyManagerImpl.java | 107 +- .../apache/hadoop/ozone/om/ListIterator.java | 2 +- .../org/apache/hadoop/ozone/om/OMMetrics.java | 21 + .../hadoop/ozone/om/OMMultiTenantManager.java | 2 +- .../ozone/om/OMMultiTenantManagerImpl.java | 1 + .../ozone/om/OmMetadataManagerImpl.java | 161 +- .../hadoop/ozone/om/OmSnapshotManager.java | 74 +- .../apache/hadoop/ozone/om/OzoneManager.java | 85 +- .../hadoop/ozone/om/OzoneManagerUtils.java | 12 +- .../hadoop/ozone/om/SnapshotChainManager.java | 62 +- .../hadoop/ozone/om/TrashOzoneFileSystem.java | 31 +- .../ozone/om/multitenant/AuthorizerLock.java | 2 +- .../om/ratis/OzoneManagerDoubleBuffer.java | 34 +- .../om/ratis/OzoneManagerRatisServer.java | 18 +- .../ratis/utils/OzoneManagerRatisUtils.java | 19 +- .../ozone/om/request/OMClientRequest.java | 1 + .../ozone/om/request/RequestAuditor.java | 1 + .../file/OMDirectoryCreateRequest.java | 2 +- .../ozone/om/request/file/OMFileRequest.java | 2 +- .../key/OMDirectoriesPurgeRequestWithFSO.java | 50 +- .../om/request/key/OMKeyCommitRequest.java | 13 +- .../om/request/key/OMKeyCreateRequest.java | 5 - .../key/OMKeyCreateRequestWithFSO.java | 2 +- .../om/request/key/OMKeyPurgeRequest.java | 74 +- .../key/OMKeyRenameRequestWithFSO.java | 4 +- .../ozone/om/request/key/OMKeyRequest.java | 6 +- .../snapshot/OMSnapshotCreateRequest.java | 3 +- .../OMSnapshotMoveDeletedKeysRequest.java | 32 +- .../OMSnapshotMoveTableKeysRequest.java | 184 - .../snapshot/OMSnapshotPurgeRequest.java | 96 +- .../om/request/volume/OMVolumeRequest.java | 1 + .../OMDirectoriesPurgeResponseWithFSO.java | 13 +- .../om/response/key/OMKeyPurgeResponse.java | 15 +- .../OMSnapshotMoveDeletedKeysResponse.java | 50 +- .../OMSnapshotMoveTableKeysResponse.java | 162 - .../snapshot/OMSnapshotPurgeResponse.java | 15 +- .../service/AbstractKeyDeletingService.java | 85 +- .../om/service/DirectoryDeletingService.java | 64 +- .../ozone/om/service/KeyDeletingService.java | 89 +- .../MultipartUploadCleanupService.java | 24 +- .../om/service/OMRangerBGSyncService.java | 21 +- .../om/service/OpenKeyCleanupService.java | 24 +- .../ozone/om/service/QuotaRepairTask.java | 106 +- .../om/service/SnapshotDeletingService.java | 550 ++- .../SnapshotDirectoryCleaningService.java | 36 +- .../ozone/om/snapshot/OmSnapshotUtils.java | 2 +- .../ozone/om/snapshot/SnapshotUtils.java | 153 +- .../ozone/om/upgrade/OMLayoutFeature.java | 1 - .../OzoneManagerRequestHandler.java | 55 +- .../ozone/protocolPB/RequestHandler.java | 2 +- .../hadoop/ozone/om/TestTrashService.java | 137 + .../ratis/TestOzoneManagerDoubleBuffer.java | 63 +- .../ozone/om/request/OMRequestTestUtils.java | 81 +- ...tOMDirectoriesPurgeRequestAndResponse.java | 91 +- .../request/key/TestOMKeyCommitRequest.java | 20 +- .../key/TestOMKeyPurgeRequestAndResponse.java | 64 +- .../om/request/key/TestOMKeyRequest.java | 40 +- .../snapshot/TestOMSnapshotCreateRequest.java | 176 +- .../snapshot/TestOMSnapshotDeleteRequest.java | 157 +- .../TestOMSnapshotMoveTableKeysRequest.java | 264 -- ...TestOMSnapshotPurgeRequestAndResponse.java | 225 +- .../snapshot/TestOMSnapshotRenameRequest.java | 174 +- ...SnapshotSetPropertyRequestAndResponse.java | 86 +- .../TestOMSnapshotCreateResponse.java | 4 +- .../TestOMSnapshotMoveTableKeysResponse.java | 199 -- .../om/service/TestKeyDeletingService.java | 162 +- .../om/service/TestOpenKeyCleanupService.java | 2 - .../service/TestSnapshotDeletingService.java | 38 +- .../ozone/om/snapshot/TestSnapshotChain.java | 32 - .../om/snapshot/TestSnapshotDiffManager.java | 1 + .../ozone/om/snapshot/TestSnapshotInfo.java | 47 - .../TestSnapshotRequestAndResponse.java | 255 -- .../hadoop/fs/ozone/BasicOzoneFileSystem.java | 5 +- .../BasicRootedOzoneClientAdapterImpl.java | 123 +- .../fs/ozone/BasicRootedOzoneFileSystem.java | 5 +- .../hadoop/fs/ozone/FileStatusAdapter.java | 2 +- .../hadoop/fs/ozone/OzoneFSInputStream.java | 1 + hadoop-ozone/pom.xml | 23 - .../schema/ContainerSchemaDefinition.java | 33 +- .../apache/hadoop/ozone/recon/ReconUtils.java | 4 +- .../recon/api/AccessHeatMapEndpoint.java | 5 +- .../ozone/recon/api/ContainerEndpoint.java | 25 +- .../ozone/recon/api/OMDBInsightEndpoint.java | 5 +- .../recon/api/OMDBInsightSearchEndpoint.java | 6 +- .../recon/api/TriggerDBSyncEndpoint.java | 1 - .../recon/api/handlers/BucketHandler.java | 2 +- .../types/UnhealthyContainersResponse.java | 13 + .../ozone/recon/fsck/ContainerHealthTask.java | 153 +- .../ContainerHealthSchemaManager.java | 4 +- .../recovery/ReconOMMetadataManager.java | 2 +- .../ReconStorageContainerManagerFacade.java | 9 +- .../spi/ReconContainerMetadataManager.java | 16 +- .../ReconContainerMetadataManagerImpl.java | 12 +- .../recon/tasks/ContainerKeyMapperTask.java | 6 +- .../recon/tasks/OmUpdateEventValidator.java | 2 + .../hadoop/ozone/recon/tasks/ReconOmTask.java | 4 +- .../recon/tasks/ReconTaskController.java | 2 +- .../webapps/recon/ozone-recon-web/api/db.json | 2 +- .../recon/ozone-recon-web/package.json | 7 +- .../recon/ozone-recon-web/pnpm-lock.yaml | 1258 ++++--- .../webapps/recon/ozone-recon-web/src/app.tsx | 7 +- .../autoReloadPanel/autoReloadPanel.tsx | 3 +- .../src/components/navBar/navBar.less | 2 +- .../src/utils/axiosRequestHelper.tsx | 6 +- .../decommissioningSummary.tsx | 139 - .../duBreadcrumbNav/duBreadcrumbNav.tsx | 173 - .../v2/components/duMetadata/duMetadata.tsx | 389 -- .../v2/components/duPieChart/duPieChart.tsx | 211 -- .../src/v2/components/eChart/eChart.tsx | 15 +- .../src/v2/components/navBar/navBar.less | 65 - .../src/v2/components/navBar/navBar.tsx | 180 - .../src/v2/components/search/search.tsx | 6 - .../src/v2/components/select/multiSelect.tsx | 56 +- .../src/v2/components/select/singleSelect.tsx | 4 +- .../v2/components/storageBar/storageBar.less | 45 - .../v2/components/storageBar/storageBar.tsx | 49 +- .../src/v2/components/tables/bucketsTable.tsx | 267 -- .../v2/components/tables/datanodesTable.tsx | 314 -- .../v2/components/tables/pipelinesTable.tsx | 211 -- .../src/v2/components/tables/volumesTable.tsx | 179 - .../src/v2/pages/buckets/buckets.less | 41 - .../src/v2/pages/buckets/buckets.tsx | 345 -- .../src/v2/pages/datanodes/datanodes.less | 52 - .../src/v2/pages/datanodes/datanodes.tsx | 309 -- .../src/v2/pages/diskUsage/diskUsage.less | 59 - .../src/v2/pages/diskUsage/diskUsage.tsx | 144 - .../src/v2/pages/overview/overview.tsx | 22 +- .../src/v2/pages/pipelines/pipelines.less | 48 - .../src/v2/pages/pipelines/pipelines.tsx | 160 - .../src/v2/pages/volumes/volumes.tsx | 169 +- .../ozone-recon-web/src/v2/routes-v2.tsx | 20 - .../src/v2/types/bucket.types.ts | 28 +- .../src/v2/types/datanode.types.ts | 167 - .../src/v2/types/diskUsage.types.ts | 41 - .../src/v2/types/pipelines.types.ts | 62 - .../src/v2/types/volume.types.ts | 10 +- .../src/v2/utils/momentUtils.ts | 63 - .../src/views/insights/insights.tsx | 8 +- .../src/views/insights/om/om.tsx | 10 +- .../recon/ozone-recon-web/vite.config.ts | 9 +- .../recon/api/TestContainerEndpoint.java | 34 +- .../recon/api/filters/TestAdminFilter.java | 12 +- .../recon/fsck/TestContainerHealthTask.java | 283 +- ...estContainerHealthTaskRecordGenerator.java | 62 +- .../hadoop/ozone/s3/AuthorizationFilter.java | 9 +- .../hadoop/ozone/s3/OzoneClientCache.java | 2 + .../ozone/s3/endpoint/BucketEndpoint.java | 4 - ...eteMultipartUploadRequestUnmarshaller.java | 11 +- .../MultiDeleteRequestUnmarshaller.java | 6 +- .../PutBucketAclRequestUnmarshaller.java | 4 +- .../ozone/s3/exception/OS3Exception.java | 5 - .../s3/signature/AWSSignatureProcessor.java | 2 +- .../hadoop/ozone/s3/signature/Credential.java | 3 +- .../apache/hadoop/ozone/s3/util/S3Utils.java | 9 - .../ozone/client/ClientProtocolStub.java | 16 + hadoop-ozone/tools/pom.xml | 4 - .../ozone/admin/nssummary/NSSummaryAdmin.java | 1 + .../admin/om/CancelPrepareSubCommand.java | 2 +- .../apache/hadoop/ozone/debug/DBScanner.java | 269 +- .../hadoop/ozone/debug/ValueSchema.java | 17 +- .../ozone/freon/BaseFreonGenerator.java | 13 - .../hadoop/ozone/freon/ContentGenerator.java | 15 +- .../ozone/freon/DNRPCLoadGenerator.java | 10 +- .../ozone/freon/HadoopDirTreeGenerator.java | 2 +- .../hadoop/ozone/freon/OmBucketGenerator.java | 5 - .../ozone/freon/RandomKeyGenerator.java | 18 +- .../hadoop/ozone/freon/S3BucketGenerator.java | 6 +- .../hadoop/ozone/fsck/ContainerMapper.java | 4 +- .../ozone/repair/RecoverSCMCertificate.java | 261 -- .../ozone/repair/quota/QuotaRepair.java | 121 - .../ozone/repair/quota/QuotaStatus.java | 80 - .../ozone/repair/quota/QuotaTrigger.java | 100 - .../ozone/repair/quota/package-info.java | 22 - .../apache/hadoop/ozone/shell/OzoneRatis.java | 59 - .../hadoop/ozone/shell/StoreTypeOption.java | 2 +- .../ozone/shell/checknative/CheckNative.java | 22 +- .../ozone/shell/keys/PutKeyHandler.java | 13 +- .../shell/volume/DeleteVolumeHandler.java | 6 +- .../org/apache/hadoop/ozone/utils/Filter.java | 107 - .../ozone/checknative/TestCheckNative.java | 18 - .../hadoop/ozone/shell/TestOzoneRatis.java | 172 - pom.xml | 67 +- 442 files changed, 5905 insertions(+), 15005 deletions(-) delete mode 100644 dev-support/pom.xml rename hadoop-hdds/{common => client}/src/main/java/org/apache/hadoop/hdds/scm/client/ClientTrustManager.java (100%) delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/MappedBufferManager.java delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestMappedBufferManager.java delete mode 100644 hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/pipeline/TestClosePipelinesSubCommand.java delete mode 100644 hadoop-ozone/dist/src/main/compose/upgrade/upgrades/non-rolling-upgrade/callbacks/1.5.0/callback.sh delete mode 100644 hadoop-ozone/dist/src/main/compose/xcompat/krb5.conf delete mode 100644 hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-ldb.robot delete mode 100644 hadoop-ozone/dist/src/main/smoketest/freon/hsync.robot delete mode 100644 hadoop-ozone/dist/src/main/smoketest/hsync/upgrade-hsync-check.robot rename hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/{TestSnapshotDeletingServiceIntegrationTest.java => TestSnapshotDeletingService.java} (52%) delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveTableKeysRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotMoveTableKeysResponse.java create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestTrashService.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotMoveTableKeysRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotMoveTableKeysResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotRequestAndResponse.java delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/decommissioningSummary/decommissioningSummary.tsx delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/duBreadcrumbNav/duBreadcrumbNav.tsx delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/duMetadata/duMetadata.tsx delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/duPieChart/duPieChart.tsx delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/navBar/navBar.less delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/navBar/navBar.tsx delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/storageBar/storageBar.less delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/bucketsTable.tsx delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/datanodesTable.tsx delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/pipelinesTable.tsx delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/volumesTable.tsx delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/buckets/buckets.less delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/buckets/buckets.tsx delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/datanodes/datanodes.less delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/datanodes/datanodes.tsx delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/diskUsage/diskUsage.less delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/diskUsage/diskUsage.tsx delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/pipelines/pipelines.less delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/pipelines/pipelines.tsx delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/datanode.types.ts delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/diskUsage.types.ts delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/pipelines.types.ts delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/utils/momentUtils.ts delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RecoverSCMCertificate.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaRepair.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaStatus.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaTrigger.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/package-info.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneRatis.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/utils/Filter.java delete mode 100644 hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneRatis.java diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 42ef94a0b36..bd0a12edd93 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -218,7 +218,7 @@ jobs: distribution: 'temurin' java-version: ${{ matrix.java }} - name: Compile Ozone using Java ${{ matrix.java }} - run: hadoop-ozone/dev-support/checks/build.sh -Pdist -Dskip.npx -Dskip.installnpx -Dmaven.javadoc.failOnWarnings=${{ matrix.java != 8 }} -Djavac.version=${{ matrix.java }} ${{ inputs.ratis_args }} + run: hadoop-ozone/dev-support/checks/build.sh -Pdist -Dskip.npx -Dskip.installnpx -Djavac.version=${{ matrix.java }} ${{ inputs.ratis_args }} env: OZONE_WITH_COVERAGE: false DEVELOCITY_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} diff --git a/.mvn/extensions.xml b/.mvn/extensions.xml index 20f1c034c58..ac1c913fd50 100644 --- a/.mvn/extensions.xml +++ b/.mvn/extensions.xml @@ -24,7 +24,7 @@ com.gradle develocity-maven-extension - 1.22.1 + 1.21.5 com.gradle diff --git a/NOTICE.txt b/NOTICE.txt index cc4e3c58b39..7a1e855f6a3 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -1,5 +1,5 @@ Apache Ozone -Copyright 2024 The Apache Software Foundation +Copyright 2022 The Apache Software Foundation This product includes software developed at The Apache Software Foundation (http://www.apache.org/). diff --git a/dev-support/ci/selective_ci_checks.bats b/dev-support/ci/selective_ci_checks.bats index 12a7987ffb4..a95a981bdd3 100644 --- a/dev-support/ci/selective_ci_checks.bats +++ b/dev-support/ci/selective_ci_checks.bats @@ -429,15 +429,3 @@ load bats-assert/load.bash assert_output -p needs-integration-tests=false assert_output -p needs-kubernetes-tests=false } - -@test "properties file in resources" { - run dev-support/ci/selective_ci_checks.sh 71b8bdd8becf72d6f7d4e7986895504b8259b3e5 - - assert_output -p 'basic-checks=["rat","checkstyle","native"]' - assert_output -p needs-build=false - assert_output -p needs-compile=false - assert_output -p needs-compose-tests=false - assert_output -p needs-dependency-check=false - assert_output -p needs-integration-tests=true - assert_output -p needs-kubernetes-tests=false -} diff --git a/dev-support/ci/selective_ci_checks.sh b/dev-support/ci/selective_ci_checks.sh index f6b06326a32..e512b4a5d62 100755 --- a/dev-support/ci/selective_ci_checks.sh +++ b/dev-support/ci/selective_ci_checks.sh @@ -373,7 +373,6 @@ function check_needs_checkstyle() { "^hadoop-hdds/dev-support/checkstyle" "pom.xml" "src/..../java" - "src/..../resources/.*\.properties" ) local ignore_array=( "^hadoop-ozone/dist" diff --git a/dev-support/pom.xml b/dev-support/pom.xml deleted file mode 100644 index e11e3b32ee4..00000000000 --- a/dev-support/pom.xml +++ /dev/null @@ -1,98 +0,0 @@ - - - - - ozone-main - org.apache.ozone - 1.5.0-SNAPSHOT - - 4.0.0 - ozone-dev-support - Helper module for sharing resources among projects - Apache Ozone Dev Support - - - false - - - - - ${project.build.directory}/extra-resources - META-INF - - LICENSE.txt - NOTICE.txt - - - - - - org.apache.maven.plugins - maven-site-plugin - - true - - - - - org.apache.maven.plugins - maven-resources-plugin - - - copy-resources - validate - - copy-resources - - - ${project.build.directory}/extra-resources - - - ../ - - LICENSE.txt - NOTICE.txt - - - - - - - - - - org.apache.maven.plugins - maven-remote-resources-plugin - - - process-resources - - bundle - - - - - ${project.build.outputDirectory} - - META-INF/LICENSE.txt - META-INF/NOTICE.txt - - - - - - diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java index 5426bbc4981..873f6f67348 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java @@ -247,49 +247,28 @@ public enum ChecksumCombineMode { tags = ConfigTag.CLIENT) private String fsDefaultBucketLayout = "FILE_SYSTEM_OPTIMIZED"; - // ozone.client.hbase.enhancements.allowed - @Config(key = "hbase.enhancements.allowed", - defaultValue = "false", - description = "When set to false, client-side HBase enhancement-related Ozone (experimental) features " + - "are disabled (not allowed to be enabled) regardless of whether those configs are set.\n" + - "\n" + - "Here is the list of configs and values overridden when this config is set to false:\n" + - "1. ozone.fs.hsync.enabled = false\n" + - "2. ozone.client.incremental.chunk.list = false\n" + - "3. ozone.client.stream.putblock.piggybacking = false\n" + - "4. ozone.client.key.write.concurrency = 1\n" + - "\n" + - "A warning message will be printed if any of the above configs are overridden by this.", - tags = ConfigTag.CLIENT) - private boolean hbaseEnhancementsAllowed = false; - - // ozone.client.incremental.chunk.list @Config(key = "incremental.chunk.list", - defaultValue = "false", + defaultValue = "true", type = ConfigType.BOOLEAN, description = "Client PutBlock request can choose incremental chunk " + "list rather than full chunk list to optimize performance. " + - "Critical to HBase. EC does not support this feature. " + - "Can be enabled only when ozone.client.hbase.enhancements.allowed = true", + "Critical to HBase. EC does not support this feature.", tags = ConfigTag.CLIENT) - private boolean incrementalChunkList = false; + private boolean incrementalChunkList = true; - // ozone.client.stream.putblock.piggybacking @Config(key = "stream.putblock.piggybacking", - defaultValue = "false", + defaultValue = "true", type = ConfigType.BOOLEAN, - description = "Allow PutBlock to be piggybacked in WriteChunk requests if the chunk is small. " + - "Can be enabled only when ozone.client.hbase.enhancements.allowed = true", + description = "Allow PutBlock to be piggybacked in WriteChunk " + + "requests if the chunk is small.", tags = ConfigTag.CLIENT) - private boolean enablePutblockPiggybacking = false; + private boolean enablePutblockPiggybacking = true; - // ozone.client.key.write.concurrency @Config(key = "key.write.concurrency", defaultValue = "1", description = "Maximum concurrent writes allowed on each key. " + "Defaults to 1 which matches the behavior before HDDS-9844. " + - "For unlimited write concurrency, set this to -1 or any negative integer value. " + - "Any value other than 1 is effective only when ozone.client.hbase.enhancements.allowed = true", + "For unlimited write concurrency, set this to -1 or any negative integer value.", tags = ConfigTag.CLIENT) private int maxConcurrentWritePerKey = 1; @@ -319,34 +298,6 @@ public void validate() { OzoneConfigKeys.OZONE_CLIENT_BYTES_PER_CHECKSUM_MIN_SIZE; } - // Verify client configs related to HBase enhancements - // Enforce check on ozone.client.hbase.enhancements.allowed - if (!hbaseEnhancementsAllowed) { - // ozone.client.hbase.enhancements.allowed = false - if (incrementalChunkList) { - LOG.warn("Ignoring ozone.client.incremental.chunk.list = true " + - "because HBase enhancements are disallowed. " + - "To enable it, set ozone.client.hbase.enhancements.allowed = true."); - incrementalChunkList = false; - LOG.debug("Final ozone.client.incremental.chunk.list = {}", incrementalChunkList); - } - if (enablePutblockPiggybacking) { - LOG.warn("Ignoring ozone.client.stream.putblock.piggybacking = true " + - "because HBase enhancements are disallowed. " + - "To enable it, set ozone.client.hbase.enhancements.allowed = true."); - enablePutblockPiggybacking = false; - LOG.debug("Final ozone.client.stream.putblock.piggybacking = {}", enablePutblockPiggybacking); - } - if (maxConcurrentWritePerKey != 1) { - LOG.warn("Ignoring ozone.client.key.write.concurrency = {} " + - "because HBase enhancements are disallowed. " + - "To enable it, set ozone.client.hbase.enhancements.allowed = true.", - maxConcurrentWritePerKey); - maxConcurrentWritePerKey = 1; - LOG.debug("Final ozone.client.key.write.concurrency = {}", maxConcurrentWritePerKey); - } - // Note: ozone.fs.hsync.enabled is enforced by OzoneFSUtils#canEnableHsync, not here - } } public long getStreamBufferFlushSize() { @@ -535,14 +486,6 @@ public void setDatastreamPipelineMode(boolean datastreamPipelineMode) { this.datastreamPipelineMode = datastreamPipelineMode; } - public void setHBaseEnhancementsAllowed(boolean isHBaseEnhancementsEnabled) { - this.hbaseEnhancementsAllowed = isHBaseEnhancementsEnabled; - } - - public boolean getHBaseEnhancementsAllowed() { - return this.hbaseEnhancementsAllowed; - } - public void setIncrementalChunkList(boolean enable) { this.incrementalChunkList = enable; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ClientTrustManager.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ClientTrustManager.java similarity index 100% rename from hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ClientTrustManager.java rename to hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ClientTrustManager.java diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/AbstractCommitWatcher.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/AbstractCommitWatcher.java index 7641de1274d..61bc73420e6 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/AbstractCommitWatcher.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/AbstractCommitWatcher.java @@ -124,6 +124,7 @@ XceiverClientReply watchOnLastIndex() throws IOException { * * @param commitIndex log index to watch for * @return minimum commit index replicated to all nodes + * @throws IOException IOException in case watch gets timed out */ CompletableFuture watchForCommitAsync(long commitIndex) { final MemoizedSupplier> supplier diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java index 48c77f2c863..d5423d4ec0b 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java @@ -364,6 +364,7 @@ public void writeOnRetry(long len) throws IOException { * it is a no op. * @param bufferFull flag indicating whether bufferFull condition is hit or * its called as part flush/close + * @return minimum commit index replicated to all nodes * @throws IOException IOException in case watch gets timed out */ public void watchForCommit(boolean bufferFull) throws IOException { diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java index 59795dd0f05..e88b097c499 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java @@ -198,7 +198,9 @@ public BlockOutputStream( blkIDBuilder.build()).addMetadata(keyValue); this.pipeline = pipeline; // tell DataNode I will send incremental chunk list - this.supportIncrementalChunkList = canEnableIncrementalChunkList(); + // EC does not support incremental chunk list. + this.supportIncrementalChunkList = config.getIncrementalChunkList() && + this instanceof RatisBlockOutputStream && allDataNodesSupportPiggybacking(); LOG.debug("incrementalChunkList is {}", supportIncrementalChunkList); if (supportIncrementalChunkList) { this.containerBlockData.addMetadata(INCREMENTAL_CHUNK_LIST_KV); @@ -235,51 +237,11 @@ public BlockOutputStream( config.getBytesPerChecksum()); this.clientMetrics = clientMetrics; this.streamBufferArgs = streamBufferArgs; - this.allowPutBlockPiggybacking = canEnablePutblockPiggybacking(); + this.allowPutBlockPiggybacking = config.getEnablePutblockPiggybacking() && + allDataNodesSupportPiggybacking(); LOG.debug("PutBlock piggybacking is {}", allowPutBlockPiggybacking); } - /** - * Helper method to check if incremental chunk list can be enabled. - * Prints debug messages if it cannot be enabled. - */ - private boolean canEnableIncrementalChunkList() { - boolean confEnableIncrementalChunkList = config.getIncrementalChunkList(); - if (!confEnableIncrementalChunkList) { - return false; - } - - if (!(this instanceof RatisBlockOutputStream)) { - // Note: EC does not support incremental chunk list - LOG.debug("Unable to enable incrementalChunkList because BlockOutputStream is not a RatisBlockOutputStream"); - return false; - } - if (!allDataNodesSupportPiggybacking()) { - // Not all datanodes support piggybacking and incremental chunk list. - LOG.debug("Unable to enable incrementalChunkList because not all datanodes support piggybacking"); - return false; - } - return confEnableIncrementalChunkList; - } - - /** - * Helper method to check if PutBlock piggybacking can be enabled. - * Prints debug message if it cannot be enabled. - */ - private boolean canEnablePutblockPiggybacking() { - boolean confEnablePutblockPiggybacking = config.getEnablePutblockPiggybacking(); - if (!confEnablePutblockPiggybacking) { - return false; - } - - if (!allDataNodesSupportPiggybacking()) { - // Not all datanodes support piggybacking and incremental chunk list. - LOG.debug("Unable to enable PutBlock piggybacking because not all datanodes support piggybacking"); - return false; - } - return confEnablePutblockPiggybacking; - } - private boolean allDataNodesSupportPiggybacking() { // return true only if all DataNodes in the pipeline are on a version // that supports PutBlock piggybacking. @@ -413,8 +375,10 @@ private void doFlushOrWatchIfNeeded() throws IOException { } private void recordWatchForCommitAsync(CompletableFuture putBlockResultFuture) { - final CompletableFuture flushFuture = putBlockResultFuture.thenCompose(x -> watchForCommit(x.commitIndex)); + recordFlushFuture(watchForCommitAsync(putBlockResultFuture)); + } + private void recordFlushFuture(CompletableFuture flushFuture) { Preconditions.checkState(Thread.holdsLock(this)); this.lastFlushFuture = flushFuture; this.allPendingFlushFutures = allPendingFlushFutures.thenCombine(flushFuture, (last, curr) -> null); @@ -480,8 +444,7 @@ public synchronized void writeOnRetry(long len) throws IOException { writeChunk(buffer); putBlockFuture = executePutBlock(false, false); } - CompletableFuture watchForCommitAsync = - putBlockFuture.thenCompose(x -> watchForCommit(x.commitIndex)); + CompletableFuture watchForCommitAsync = watchForCommitAsync(putBlockFuture); try { watchForCommitAsync.get(); } catch (InterruptedException e) { @@ -514,44 +477,33 @@ void releaseBuffersOnException() { } /** - * Send a watch request to wait until the given index became committed. - * When watch is not needed (e.g. EC), this is a NOOP. - * - * @param index the log index to wait for. - * @return the future of the reply. + * Watch for a specific commit index. */ - CompletableFuture sendWatchForCommit(long index) { - return CompletableFuture.completedFuture(null); + XceiverClientReply sendWatchForCommit(long commitIndex) + throws IOException { + return null; } - private CompletableFuture watchForCommit(long commitIndex) { + private void watchForCommit(long commitIndex) throws IOException { + checkOpen(); try { - checkOpen(); - } catch (IOException e) { - throw new FlushRuntimeException(e); - } - - LOG.debug("Entering watchForCommit commitIndex = {}", commitIndex); - return sendWatchForCommit(commitIndex) - .thenAccept(this::checkReply) - .exceptionally(e -> { - throw new FlushRuntimeException(setIoException(e)); - }) - .whenComplete((r, e) -> LOG.debug("Leaving watchForCommit commitIndex = {}", commitIndex)); - } - - private void checkReply(XceiverClientReply reply) { - if (reply == null) { - return; - } - final List dnList = reply.getDatanodes(); - if (dnList.isEmpty()) { - return; + LOG.debug("Entering watchForCommit commitIndex = {}", commitIndex); + final XceiverClientReply reply = sendWatchForCommit(commitIndex); + if (reply != null) { + List dnList = reply.getDatanodes(); + if (!dnList.isEmpty()) { + Pipeline pipe = xceiverClient.getPipeline(); + + LOG.warn("Failed to commit BlockId {} on {}. Failed nodes: {}", + blockID, pipe, dnList); + failedServers.addAll(dnList); + } + } + } catch (IOException ioe) { + setIoException(ioe); + throw getIoException(); } - - LOG.warn("Failed to commit BlockId {} on {}. Failed nodes: {}", - blockID, xceiverClient.getPipeline(), dnList); - failedServers.addAll(dnList); + LOG.debug("Leaving watchForCommit commitIndex = {}", commitIndex); } void updateCommitInfo(XceiverClientReply reply, List buffers) { @@ -771,6 +723,16 @@ private synchronized CompletableFuture handleFlushInternalSynchronized(boo return lastFlushFuture; } + private CompletableFuture watchForCommitAsync(CompletableFuture putBlockResultFuture) { + return putBlockResultFuture.thenAccept(x -> { + try { + watchForCommit(x.commitIndex); + } catch (IOException e) { + throw new FlushRuntimeException(e); + } + }); + } + @Override public void close() throws IOException { if (xceiverClientFactory != null && xceiverClient != null) { @@ -809,7 +771,7 @@ void validateResponse( } - public IOException setIoException(Throwable e) { + public void setIoException(Exception e) { IOException ioe = getIoException(); if (ioe == null) { IOException exception = new IOException(EXCEPTION_MSG + e.toString(), e); @@ -820,7 +782,6 @@ public IOException setIoException(Throwable e) { "so subsequent request also encounters " + "Storage Container Exception {}", ioe, e); } - return getIoException(); } void cleanup() { diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ECBlockOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ECBlockOutputStream.java index 7776e245be0..12ca9978c68 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ECBlockOutputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ECBlockOutputStream.java @@ -38,13 +38,9 @@ import java.io.IOException; import java.nio.ByteBuffer; -import java.util.Arrays; import java.util.ArrayList; -import java.util.Comparator; import java.util.List; import java.util.Objects; -import java.util.Optional; -import java.util.Map; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionException; import java.util.concurrent.ExecutionException; @@ -146,34 +142,8 @@ ContainerCommandResponseProto> executePutBlock(boolean close, } if (checksumBlockData != null) { - - // For the same BlockGroupLength, we need to find the larger value of Block DataSize. - // This is because we do not send empty chunks to the DataNode, so the larger value is more accurate. - Map> maxDataSizeByGroup = Arrays.stream(blockData) - .filter(Objects::nonNull) - .collect(Collectors.groupingBy(BlockData::getBlockGroupLength, - Collectors.maxBy(Comparator.comparingLong(BlockData::getSize)))); - BlockData maxBlockData = maxDataSizeByGroup.get(blockGroupLength).get(); - - // When calculating the checksum size, - // We need to consider both blockGroupLength and the actual size of blockData. - // - // We use the smaller value to determine the size of the ChunkList. - // - // 1. In most cases, blockGroupLength is equal to the size of blockData. - // 2. Occasionally, blockData is not fully filled; if a chunk is empty, - // it is not sent to the DN, resulting in blockData size being smaller than blockGroupLength. - // 3. In cases with 'dirty data', - // if an error occurs when writing to the EC-Stripe (e.g., DN reports Container Closed), - // and the length confirmed with OM is smaller, blockGroupLength may be smaller than blockData size. - long blockDataSize = Math.min(maxBlockData.getSize(), blockGroupLength); - int chunkSize = (int) Math.ceil(((double) blockDataSize / repConfig.getEcChunkSize())); - List checksumBlockDataChunks = checksumBlockData.getChunks(); - if (chunkSize > 0) { - checksumBlockDataChunks = checksumBlockData.getChunks().subList(0, chunkSize); - } - List currentChunks = getContainerBlockData().getChunksList(); + List checksumBlockDataChunks = checksumBlockData.getChunks(); Preconditions.checkArgument( currentChunks.size() == checksumBlockDataChunks.size(), @@ -299,7 +269,7 @@ public CompletableFuture executePutBlock(boolean close, throw ce; }); } catch (IOException | ExecutionException e) { - throw new IOException(EXCEPTION_MSG + e, e); + throw new IOException(EXCEPTION_MSG + e.toString(), e); } catch (InterruptedException ex) { Thread.currentThread().interrupt(); handleInterruptedException(ex, false); diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ExtendedInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ExtendedInputStream.java index e3f7f043a9e..3e78abbf485 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ExtendedInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ExtendedInputStream.java @@ -75,6 +75,7 @@ public synchronized int read(ByteBuffer byteBuffer) throws IOException { * readWithStrategy implementation, as it will never be called by the tests. * * @param strategy + * @return * @throws IOException */ protected abstract int readWithStrategy(ByteReaderStrategy strategy) throws diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/RatisBlockOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/RatisBlockOutputStream.java index 0f95716bf9a..d32c37eba6c 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/RatisBlockOutputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/RatisBlockOutputStream.java @@ -102,8 +102,8 @@ void releaseBuffersOnException() { } @Override - CompletableFuture sendWatchForCommit(long index) { - return commitWatcher.watchForCommitAsync(index); + XceiverClientReply sendWatchForCommit(long commitIndex) throws IOException { + return commitWatcher.watchForCommit(commitIndex); } @Override diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactory.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactory.java index 8287a5a78bb..d347dee8512 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactory.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactory.java @@ -43,6 +43,7 @@ public interface BlockInputStreamFactory { * @param blockInfo The blockInfo representing the block. * @param pipeline The pipeline to be used for reading the block * @param token The block Access Token + * @param verifyChecksum Whether to verify checksums or not. * @param xceiverFactory Factory to create the xceiver in the client * @param refreshFunction Function to refresh the block location if needed * @return BlockExtendedInputStream of the correct type. diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactoryImpl.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactoryImpl.java index d9cadc948a6..8a87234a770 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactoryImpl.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactoryImpl.java @@ -71,6 +71,7 @@ public BlockInputStreamFactoryImpl(ByteBufferPool byteBufferPool, * @param blockInfo The blockInfo representing the block. * @param pipeline The pipeline to be used for reading the block * @param token The block Access Token + * @param verifyChecksum Whether to verify checksums or not. * @param xceiverFactory Factory to create the xceiver in the client * @param refreshFunction Function to refresh the pipeline if needed * @return BlockExtendedInputStream of the correct type. diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStream.java index 83abb937b03..6342de2c338 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStream.java @@ -152,6 +152,7 @@ protected int calculateExpectedDataBlocks(ECReplicationConfig rConfig) { * Using the current position, returns the index of the blockStream we should * be reading from. This is the index in the internal array holding the * stream reference. The block group index will be one greater than this. + * @return */ protected int currentStreamIndex() { return (int)((position / ecChunkSize) % repConfig.getData()); @@ -205,6 +206,7 @@ protected BlockExtendedInputStream getOrOpenStream(int locationIndex) throws IOE * to the replicaIndex given based on the EC pipeline fetched from SCM. * @param replicaIndex * @param refreshFunc + * @return */ protected Function ecPipelineRefreshFunction( int replicaIndex, Function refreshFunc) { @@ -239,6 +241,7 @@ protected Function ecPipelineRefreshFunction( * potentially partial last stripe. Note that the internal block index is * numbered starting from 1. * @param index - Index number of the internal block, starting from 1 + * @return */ protected long internalBlockLength(int index) { long lastStripe = blockInfo.getLength() % stripeSize; @@ -341,6 +344,7 @@ protected boolean shouldRetryFailedRead(int failedIndex) { * strategy buffer. This call may read from several internal BlockInputStreams * if there is sufficient space in the buffer. * @param strategy + * @return * @throws IOException */ @Override @@ -405,6 +409,7 @@ protected void seekStreamIfNecessary(BlockExtendedInputStream stream, * group length. * @param stream Stream to read from * @param strategy The ReaderStrategy to read data into + * @return * @throws IOException */ private int readFromStream(BlockExtendedInputStream stream, diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactory.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactory.java index aca3cfed465..66e7a31337a 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactory.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactory.java @@ -45,6 +45,7 @@ public interface ECBlockInputStreamFactory { * know are bad and should not be used. * @param repConfig The replication Config * @param blockInfo The blockInfo representing the block. + * @param verifyChecksum Whether to verify checksums or not. * @param xceiverFactory Factory to create the xceiver in the client * @param refreshFunction Function to refresh the block location if needed * @return BlockExtendedInputStream of the correct type. diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactoryImpl.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactoryImpl.java index 41c46aad379..01d0b0a7b7e 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactoryImpl.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactoryImpl.java @@ -68,6 +68,7 @@ private ECBlockInputStreamFactoryImpl(BlockInputStreamFactory streamFactory, * know are bad and should not be used. * @param repConfig The replication Config * @param blockInfo The blockInfo representing the block. + * @param verifyChecksum Whether to verify checksums or not. * @param xceiverFactory Factory to create the xceiver in the client * @param refreshFunction Function to refresh the pipeline if needed * @return BlockExtendedInputStream of the correct type. diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockReconstructedStripeInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockReconstructedStripeInputStream.java index 229cc3f3e36..31f94e0acad 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockReconstructedStripeInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockReconstructedStripeInputStream.java @@ -85,7 +85,7 @@ * Parity elements long. Missing or not needed elements should be set to null * in the array. The elements should be assigned to the array in EC index order. * - * Assuming we have n missing data locations, where n {@literal <=} parity locations, the + * Assuming we have n missing data locations, where n <= parity locations, the * ByteBuffers passed in from the client are either assigned to the decoder * input array, or they are assigned to the decoder output array, where * reconstructed data is written. The required number of parity buffers will be diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/TestOzoneClientConfig.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/TestOzoneClientConfig.java index 920d1e19e1b..0dd29cb50a4 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/TestOzoneClientConfig.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/TestOzoneClientConfig.java @@ -22,8 +22,6 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_BYTES_PER_CHECKSUM_MIN_SIZE; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertTrue; class TestOzoneClientConfig { @@ -38,42 +36,4 @@ void missingSizeSuffix() { assertEquals(OZONE_CLIENT_BYTES_PER_CHECKSUM_MIN_SIZE, subject.getBytesPerChecksum()); } - - @Test - void testClientHBaseEnhancementsAllowedTrue() { - // When ozone.client.hbase.enhancements.allowed = true, - // related client configs should be effective as-is. - OzoneConfiguration conf = new OzoneConfiguration(); - conf.setBoolean("ozone.client.hbase.enhancements.allowed", true); - - // Note: ozone.fs.hsync.enabled is checked by OzoneFSUtils.canEnableHsync(), thus not checked here - conf.setBoolean("ozone.client.incremental.chunk.list", true); - conf.setBoolean("ozone.client.stream.putblock.piggybacking", true); - conf.setInt("ozone.client.key.write.concurrency", -1); - - OzoneClientConfig subject = conf.getObject(OzoneClientConfig.class); - - assertTrue(subject.getIncrementalChunkList()); - assertTrue(subject.getEnablePutblockPiggybacking()); - assertEquals(-1, subject.getMaxConcurrentWritePerKey()); - } - - @Test - void testClientHBaseEnhancementsAllowedFalse() { - // When ozone.client.hbase.enhancements.allowed = false, - // related client configs should be reverted back to default. - OzoneConfiguration conf = new OzoneConfiguration(); - conf.setBoolean("ozone.client.hbase.enhancements.allowed", false); - - // Note: ozone.fs.hsync.enabled is checked by OzoneFSUtils.canEnableHsync(), thus not checked here - conf.setBoolean("ozone.client.incremental.chunk.list", true); - conf.setBoolean("ozone.client.stream.putblock.piggybacking", true); - conf.setInt("ozone.client.key.write.concurrency", -1); - - OzoneClientConfig subject = conf.getObject(OzoneClientConfig.class); - - assertFalse(subject.getIncrementalChunkList()); - assertFalse(subject.getEnablePutblockPiggybacking()); - assertEquals(1, subject.getMaxConcurrentWritePerKey()); - } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java index f8c4a5e6fab..ff0cef43c9e 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java @@ -651,7 +651,7 @@ public static File createDir(String dirPath) { * Utility string formatter method to display SCM roles. * * @param nodes - * @return String + * @return */ public static String format(List nodes) { StringBuilder sb = new StringBuilder(); @@ -681,8 +681,7 @@ public static int roundupMb(long bytes) { /** * Unwrap exception to check if it is some kind of access control problem - * ({@link org.apache.hadoop.security.AccessControlException} or - * {@link org.apache.hadoop.security.token.SecretManager.InvalidToken}) + * ({@link AccessControlException} or {@link SecretManager.InvalidToken}) * or a RpcException. */ public static Throwable getUnwrappedException(Exception ex) { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/JavaUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/JavaUtils.java index 804e6552488..63c29ba7c91 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/JavaUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/JavaUtils.java @@ -30,24 +30,12 @@ public final class JavaUtils { * is equal or greater than the parameter. * * @param version 8, 9, 10 etc. - * @return comparison with system property, always true for any int up to 8 + * @return comparison with system property, always true for 8 */ public static boolean isJavaVersionAtLeast(int version) { return JAVA_SPEC_VER >= version; } - /** - * Query to see if major version of Java specification of the system - * is equal or less than the parameter. - * - * @param version 8, 9, 10 etc. - * @return comparison with system property - */ - public static boolean isJavaVersionAtMost(int version) { - return JAVA_SPEC_VER <= version; - } - - /** * Private constructor. */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/annotation/InterfaceStability.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/annotation/InterfaceStability.java index 6e9ee946790..4251344139a 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/annotation/InterfaceStability.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/annotation/InterfaceStability.java @@ -27,9 +27,9 @@ * class or method not changing over time. Currently the stability can be * {@link Stable}, {@link Evolving} or {@link Unstable}.
* - *
  • All classes that are annotated with {@link InterfaceAudience.Public} or - * {@link InterfaceAudience.LimitedPrivate} must have InterfaceStability annotation.
  • - *
  • Classes that are {@link InterfaceAudience.Private} are to be considered unstable unless + *
    • All classes that are annotated with {@link Public} or + * {@link LimitedPrivate} must have InterfaceStability annotation.
    • + *
    • Classes that are {@link Private} are to be considered unstable unless * a different InterfaceStability annotation states otherwise.
    • *
    • Incompatible changes must not be made to classes marked as stable.
    • *
    diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/DecommissionUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/DecommissionUtils.java index 20755a6e0ec..c176ad1464e 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/DecommissionUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/DecommissionUtils.java @@ -97,6 +97,7 @@ public static JsonNode getBeansJsonNode(String metricsJson) throws IOException { * Returns the number of decommissioning nodes. * * @param jsonNode + * @return */ public static int getNumDecomNodes(JsonNode jsonNode) { int numDecomNodes; @@ -117,6 +118,7 @@ public static int getNumDecomNodes(JsonNode jsonNode) { * @param numDecomNodes * @param countsMap * @param errMsg + * @return * @throws IOException */ @Nullable diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/ReconConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/ReconConfigKeys.java index eb6142ea67d..3ed9f4e58e1 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/ReconConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/ReconConfigKeys.java @@ -20,13 +20,11 @@ /** * This class contains constants for Recon related configuration keys used in - * SCM and Datanode. + * SCM & Datanode. */ public final class ReconConfigKeys { /** - * This class contains constants for Recon related configuration keys used in - * SCM and Datanode. * Never constructed. */ private ReconConfigKeys() { @@ -73,7 +71,7 @@ private ReconConfigKeys() { * Recon administrator users delimited by a comma. * This is the list of users who can access admin only information from recon. * Users defined in - * {@link org.apache.hadoop.ozone.OzoneConfigKeys#OZONE_ADMINISTRATORS} + * {@link org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS} * will always be able to access all recon information regardless of this * setting. */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java index dd78faf6827..3ef9317ced0 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java @@ -206,7 +206,8 @@ public int getScmDefaultLayoutVersionOnInit() { * required for SCMSecurityProtocol where the KerberosInfo references * the old configuration with * the annotation shown below:- - * {@code @KerberosInfo(serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY)} + * @KerberosInfo(serverPrincipal = ScmConfigKeys + * .HDDS_SCM_KERBEROS_PRINCIPAL_KEY) */ public static class ConfigStrings { public static final String HDDS_SCM_KERBEROS_PRINCIPAL_KEY = diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java index db789783c7c..36d4dbd45a2 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java @@ -144,10 +144,6 @@ public final class ScmConfigKeys { "ozone.chunk.read.mapped.buffer.threshold"; public static final String OZONE_CHUNK_READ_MAPPED_BUFFER_THRESHOLD_DEFAULT = "32KB"; - public static final String OZONE_CHUNK_READ_MAPPED_BUFFER_MAX_COUNT_KEY = - "ozone.chunk.read.mapped.buffer.max.count"; - // this max_count could not be greater than Linux platform max_map_count which by default is 65530. - public static final int OZONE_CHUNK_READ_MAPPED_BUFFER_MAX_COUNT_DEFAULT = 0; public static final String OZONE_SCM_CONTAINER_LAYOUT_KEY = "ozone.scm.container.layout"; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java index 77079a7bddc..648e2586ae2 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java @@ -396,6 +396,7 @@ StartContainerBalancerResponseProto startContainerBalancer( * Force generates new secret keys (rotate). * * @param force boolean flag that forcefully rotates the key on demand + * @return * @throws IOException */ boolean rotateSecretKeys(boolean force) throws IOException; @@ -413,7 +414,7 @@ StartContainerBalancerResponseProto startContainerBalancer( * considered to be failed if it has been sent more than MAX_RETRY limit * and its count is reset to -1. * - * @param count Maximum num of returned transactions, if {@literal < 0}. return all. + * @param count Maximum num of returned transactions, if < 0. return all. * @param startTxId The least transaction id to start with. * @return a list of failed deleted block transactions. * @throws IOException diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManagerReport.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManagerReport.java index 45bc77d1d8f..df8e9d45e13 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManagerReport.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManagerReport.java @@ -146,6 +146,7 @@ public long getReportTimeStamp() { /** * Return a map of all stats and their value as a long. + * @return */ public Map getStats() { Map result = new HashMap<>(); @@ -158,6 +159,7 @@ public Map getStats() { /** * Return a map of all samples, with the stat as the key and the samples * for the stat as a List of Long. + * @return */ public Map> getSamples() { Map> result = new HashMap<>(); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMNodeInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMNodeInfo.java index 66fe7d18783..a5e443a598d 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMNodeInfo.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMNodeInfo.java @@ -67,6 +67,7 @@ public class SCMNodeInfo { /** * Build SCM Node information from configuration. * @param conf + * @return */ public static List buildNodeInfo(ConfigurationSource conf) { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java index 779f2456be6..332dddac25c 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java @@ -307,13 +307,10 @@ public void remove(Node node) { * @param loc string location of a node. If loc starts with "/", it's a * absolute path, otherwise a relative path. Following examples * are all accepted, - *
    -   *            {@code
        *            1.  /dc1/rm1/rack1          -> an inner node
        *            2.  /dc1/rm1/rack1/node1    -> a leaf node
        *            3.  rack1/node1             -> a relative path to this node
    -   *            }
    -   *            
    + * * @return null if the node is not found */ @Override diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java index 31e83f82d69..1f3d0f02e6d 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java @@ -789,9 +789,6 @@ public List sortByDistanceCost(Node reader, List shuffledNodes = new ArrayList<>(nodes.subList(0, activeLen)); shuffleOperation.accept(shuffledNodes); - if (LOG.isDebugEnabled()) { - LOG.debug("Sorted datanodes {}, result: {}", nodes, shuffledNodes); - } return shuffledNodes; } // Sort weights for the nodes array @@ -818,9 +815,6 @@ public List sortByDistanceCost(Node reader, Preconditions.checkState(ret.size() == activeLen, "Wrong number of nodes sorted!"); - if (LOG.isDebugEnabled()) { - LOG.debug("Sorted datanodes {} for client {}, result: {}", nodes, reader, ret); - } return ret; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java index 54a32e9c340..1486f05f55c 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java @@ -243,6 +243,7 @@ public int getReplicaIndex(DatanodeDetails dn) { /** * Get the replicaIndex Map. + * @return */ public Map getReplicaIndexes() { return this.getNodes().stream().collect(Collectors.toMap(Function.identity(), this::getReplicaIndex)); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java index 7cdf8b8eed9..e91b50b4145 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java @@ -337,7 +337,7 @@ Pipeline createReplicationPipeline(HddsProtos.ReplicationType type, * considered to be failed if it has been sent more than MAX_RETRY limit * and its count is reset to -1. * - * @param count Maximum num of returned transactions, if {@literal < 0}. return all. + * @param count Maximum num of returned transactions, if < 0. return all. * @param startTxId The least transaction id to start with. * @return a list of failed deleted block transactions. * @throws IOException diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClient.java index e74bb1f621a..79db6985e76 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClient.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClient.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hdds.security.x509.certificate.client; -import org.apache.hadoop.hdds.scm.client.ClientTrustManager; import org.apache.hadoop.hdds.security.exception.OzoneSecurityException; import org.apache.hadoop.hdds.security.ssl.ReloadingX509KeyManager; import org.apache.hadoop.hdds.security.ssl.ReloadingX509TrustManager; @@ -129,6 +128,23 @@ X509Certificate getCertificate(String certSerialId) */ Set getAllCaCerts(); + /** + * Return the pem encoded CA certificate list. + *

    + * If initialized return list of pem encoded CA certificates, else return + * null. + * + * @return list of pem encoded CA certificates. + */ + List getCAList(); + + /** + * Update and returns the pem encoded CA certificate list. + * @return list of pem encoded CA certificates. + * @throws IOException + */ + List updateCAList() throws IOException; + /** * Verifies a digital Signature, given the signature and the certificate of * the signer. @@ -160,32 +176,10 @@ default void assertValidKeysAndCertificate() throws OzoneSecurityException { } } - /** - * Gets a KeyManager containing this CertificateClient's key material and trustchain. - * During certificate rotation this KeyManager is automatically updated with the new keys/certificates. - * - * @return A KeyManager containing keys and the trustchain for this CertificateClient. - * @throws CertificateException - */ ReloadingX509KeyManager getKeyManager() throws CertificateException; - /** - * Gets a TrustManager containing the trusted certificates of this CertificateClient. - * During certificate rotation this TrustManager is automatically updated with the new certificates. - * - * @return A TrustManager containing trusted certificates for this CertificateClient. - * @throws CertificateException - */ ReloadingX509TrustManager getTrustManager() throws CertificateException; - /** - * Creates a ClientTrustManager instance using the trusted certificates of this certificate client. - * - * @return The new ClientTrustManager instance. - * @throws IOException - */ - ClientTrustManager createClientTrustManager() throws IOException; - /** * Register a receiver that will be called after the certificate renewed. * diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java index 66685b4bbbd..31aaca568e4 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java @@ -131,7 +131,7 @@ public static InetSocketAddress updateListenAddress(OzoneConfiguration conf, * Fall back to OZONE_METADATA_DIRS if not defined. * * @param conf - * @return File + * @return */ public static File getScmDbDir(ConfigurationSource conf) { File metadataDir = getDirectoryFromConfig(conf, diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LeakDetector.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LeakDetector.java index 9579d4e73bf..477a291f928 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LeakDetector.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LeakDetector.java @@ -31,6 +31,7 @@ /** * Simple general resource leak detector using {@link ReferenceQueue} and {@link java.lang.ref.WeakReference} to * observe resource object life-cycle and assert proper resource closure before they are GCed. + * *

    * Example usage: * @@ -42,18 +43,16 @@ * // report leaks, don't refer to the original object (MyResource) here. * System.out.println("MyResource is not closed before being discarded."); * }); - * } - * } - * - *

    - *   {@code @Override
    + *
    + *   @Override
      *   public void close() {
      *     // proper resources cleanup...
      *     // inform tracker that this object is closed properly.
      *     leakTracker.close();
      *   }
    - *  }
    - * 
    + * } + * + * } */ public class LeakDetector { private static final Logger LOG = LoggerFactory.getLogger(LeakDetector.class); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DelegatedCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DelegatedCodec.java index 8d6f3c32e53..dff0b015ed5 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DelegatedCodec.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DelegatedCodec.java @@ -23,9 +23,9 @@ import java.io.IOException; /** - * A {@link org.apache.hadoop.hdds.utils.db.Codec} to serialize/deserialize objects by delegation. + * A {@link Codec} to serialize/deserialize objects by delegation. * - * @param The object type of this {@link org.apache.hadoop.hdds.utils.db.Codec}. + * @param The object type of this {@link Codec}. * @param The object type of the {@link #delegate}. */ public class DelegatedCodec implements Codec { @@ -53,8 +53,8 @@ public enum CopyType { * Construct a {@link Codec} using the given delegate. * * @param delegate the delegate {@link Codec} - * @param forward a function to convert {@code DELEGATE} to {@code T}. - * @param backward a function to convert {@code T} back to {@code DELEGATE}. + * @param forward a function to convert {@link DELEGATE} to {@link T}. + * @param backward a function to convert {@link T} back to {@link DELEGATE}. * @param copyType How to {@link #copyObject(Object)}? */ public DelegatedCodec(Codec delegate, diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java index df0fdc59a4a..c61502ff4a8 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java @@ -120,14 +120,6 @@ public final class OzoneConfigKeys { public static final String OZONE_FS_DATASTREAM_AUTO_THRESHOLD_DEFAULT = "4MB"; - /** - * Flag to allow server-side HBase-related features and enhancements to be enabled. - */ - public static final String OZONE_HBASE_ENHANCEMENTS_ALLOWED - = "ozone.hbase.enhancements.allowed"; - public static final boolean OZONE_HBASE_ENHANCEMENTS_ALLOWED_DEFAULT - = false; - /** * Flag to enable hsync/hflush. */ @@ -543,6 +535,10 @@ public final class OzoneConfigKeys { public static final int OZONE_MANAGER_STRIPED_LOCK_SIZE_DEFAULT = 512; + public static final String OZONE_CLIENT_LIST_TRASH_KEYS_MAX = + "ozone.client.list.trash.keys.max"; + public static final int OZONE_CLIENT_LIST_TRASH_KEYS_MAX_DEFAULT = 1000; + public static final String OZONE_HTTP_BASEDIR = "ozone.http.basedir"; public static final String OZONE_HTTP_POLICY_KEY = diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index 101507b502e..b34a5d8387b 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java @@ -40,6 +40,7 @@ public final class OzoneConsts { public static final String SCM_CERT_SERIAL_ID = "scmCertSerialId"; public static final String PRIMARY_SCM_NODE_ID = "primaryScmNodeId"; + public static final String OZONE_SIMPLE_ROOT_USER = "root"; public static final String OZONE_SIMPLE_HDFS_USER = "hdfs"; public static final String STORAGE_ID = "storageID"; @@ -75,6 +76,12 @@ public final class OzoneConsts { "EEE, dd MMM yyyy HH:mm:ss zzz"; public static final String OZONE_TIME_ZONE = "GMT"; + public static final String OZONE_COMPONENT = "component"; + public static final String OZONE_FUNCTION = "function"; + public static final String OZONE_RESOURCE = "resource"; + public static final String OZONE_USER = "user"; + public static final String OZONE_REQUEST = "request"; + // OM Http server endpoints public static final String OZONE_OM_SERVICE_LIST_HTTP_ENDPOINT = "/serviceList"; @@ -94,9 +101,14 @@ public final class OzoneConsts { public static final String CONTAINER_EXTENSION = ".container"; + public static final String CONTAINER_META = ".meta"; + + // Refer to {@link ContainerReader} for container storage layout on disk. + public static final String CONTAINER_PREFIX = "containers"; public static final String CONTAINER_META_PATH = "metadata"; public static final String CONTAINER_TEMPORARY_CHUNK_PREFIX = "tmp"; public static final String CONTAINER_CHUNK_NAME_DELIMITER = "."; + public static final String CONTAINER_ROOT_PREFIX = "repository"; public static final String FILE_HASH = "SHA-256"; public static final String MD5_HASH = "MD5"; @@ -116,6 +128,7 @@ public final class OzoneConsts { * level DB names used by SCM and data nodes. */ public static final String CONTAINER_DB_SUFFIX = "container.db"; + public static final String PIPELINE_DB_SUFFIX = "pipeline.db"; public static final String DN_CONTAINER_DB = "-dn-" + CONTAINER_DB_SUFFIX; public static final String OM_DB_NAME = "om.db"; public static final String SCM_DB_NAME = "scm.db"; @@ -174,8 +187,10 @@ public final class OzoneConsts { public static final String OM_USER_PREFIX = "$"; public static final String OM_S3_PREFIX = "S3:"; public static final String OM_S3_CALLER_CONTEXT_PREFIX = "S3Auth:S3G|"; + public static final String OM_S3_VOLUME_PREFIX = "s3"; public static final String OM_S3_SECRET = "S3Secret:"; public static final String OM_PREFIX = "Prefix:"; + public static final String OM_TENANT = "Tenant:"; /** * Max chunk size limit. @@ -183,6 +198,11 @@ public final class OzoneConsts { public static final int OZONE_SCM_CHUNK_MAX_SIZE = 32 * 1024 * 1024; + /** + * Max OM Quota size of Long.MAX_VALUE. + */ + public static final long MAX_QUOTA_IN_BYTES = Long.MAX_VALUE; + /** * Quota RESET default is -1, which means quota is not set. */ @@ -194,20 +214,36 @@ public final class OzoneConsts { */ public enum Units { TB, GB, MB, KB, B } + /** + * Max number of keys returned per list buckets operation. + */ + public static final int MAX_LISTBUCKETS_SIZE = 1024; + + /** + * Max number of keys returned per list keys operation. + */ + public static final int MAX_LISTKEYS_SIZE = 1024; + + /** + * Max number of volumes returned per list volumes operation. + */ + public static final int MAX_LISTVOLUMES_SIZE = 1024; + + public static final int INVALID_PORT = -1; + /** * Object ID to identify reclaimable uncommitted blocks. */ public static final long OBJECT_ID_RECLAIM_BLOCKS = 0L; + /** * Default SCM Datanode ID file name. */ public static final String OZONE_SCM_DATANODE_ID_FILE_DEFAULT = "datanode.id"; - /** - * The ServiceListJSONServlet context attribute where OzoneManager - * instance gets stored. - */ + // The ServiceListJSONServlet context attribute where OzoneManager + // instance gets stored. public static final String OM_CONTEXT_ATTRIBUTE = "ozone.om"; public static final String SCM_CONTEXT_ATTRIBUTE = "ozone.scm"; @@ -272,8 +308,12 @@ private OzoneConsts() { public static final String KEY_PREFIX = "keyPrefix"; public static final String ACL = "acl"; public static final String ACLS = "acls"; + public static final String USER_ACL = "userAcl"; + public static final String ADD_ACLS = "addAcls"; + public static final String REMOVE_ACLS = "removeAcls"; public static final String MAX_NUM_OF_BUCKETS = "maxNumOfBuckets"; public static final String HAS_SNAPSHOT = "hasSnapshot"; + public static final String TO_KEY_NAME = "toKeyName"; public static final String STORAGE_TYPE = "storageType"; public static final String RESOURCE_TYPE = "resourceType"; public static final String IS_VERSION_ENABLED = "isVersionEnabled"; @@ -283,6 +323,7 @@ private OzoneConsts() { public static final String REPLICATION_TYPE = "replicationType"; public static final String REPLICATION_FACTOR = "replicationFactor"; public static final String REPLICATION_CONFIG = "replicationConfig"; + public static final String KEY_LOCATION_INFO = "keyLocationInfo"; public static final String MULTIPART_LIST = "multipartList"; public static final String UPLOAD_ID = "uploadID"; public static final String PART_NUMBER_MARKER = "partNumberMarker"; @@ -337,6 +378,10 @@ private OzoneConsts() { public static final String JAVA_TMP_DIR = "java.io.tmpdir"; public static final String LOCALHOST = "localhost"; + + public static final int S3_BUCKET_MIN_LENGTH = 3; + public static final int S3_BUCKET_MAX_LENGTH = 64; + public static final int S3_SECRET_KEY_MIN_LENGTH = 8; public static final int S3_REQUEST_HEADER_METADATA_SIZE_LIMIT_KB = 2; @@ -353,6 +398,7 @@ private OzoneConsts() { public static final String GDPR_ALGORITHM_NAME = "AES"; public static final int GDPR_DEFAULT_RANDOM_SECRET_LENGTH = 16; public static final Charset GDPR_CHARSET = StandardCharsets.UTF_8; + public static final String GDPR_LENGTH = "length"; public static final String GDPR_SECRET = "secret"; public static final String GDPR_ALGORITHM = "algorithm"; @@ -363,7 +409,7 @@ private OzoneConsts() { * contains illegal characters when creating/renaming key. * * Avoid the following characters in a key name: - * {@literal "\", "{", "}", "<", ">", "^", "%", "~", "#", "|", "`", "[", "]"}, Quotation + * "\", "{", "}", "<", ">", "^", "%", "~", "#", "|", "`", "[", "]", Quotation * marks and Non-printable ASCII characters (128–255 decimal characters). * https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html */ @@ -380,6 +426,13 @@ private OzoneConsts() { public static final String CONTAINER_DB_TYPE_ROCKSDB = "RocksDB"; + // SCM HA + public static final String SCM_SERVICE_ID_DEFAULT = "scmServiceIdDefault"; + + // SCM Ratis snapshot file to store the last applied index + public static final String SCM_RATIS_SNAPSHOT_INDEX = "scmRatisSnapshotIndex"; + + public static final String SCM_RATIS_SNAPSHOT_TERM = "scmRatisSnapshotTerm"; // An on-disk transient marker file used when replacing DB with checkpoint public static final String DB_TRANSIENT_MARKER = "dbInconsistentMarker"; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java index 982b559c7a5..eec2ceeb5e8 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java @@ -44,8 +44,6 @@ public enum OzoneManagerVersion implements ComponentVersion { ATOMIC_REWRITE_KEY(6, "OzoneManager version that supports rewriting key as atomic operation"), HBASE_SUPPORT(7, "OzoneManager version that supports HBase integration"), - LIGHTWEIGHT_LIST_STATUS(8, "OzoneManager version that supports lightweight" - + " listStatus API."), FUTURE_VERSION(-1, "Used internally in the client when the server side is " + " newer and an unknown server version has arrived to the client."); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBufferImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBufferImpl.java index a5235978327..1d596bf7007 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBufferImpl.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBufferImpl.java @@ -44,14 +44,12 @@ public class ChecksumByteBufferImpl implements ChecksumByteBuffer { static { Field f = null; - if (JavaUtils.isJavaVersionAtMost(8)) { - try { - f = ByteBuffer.class - .getDeclaredField("isReadOnly"); - f.setAccessible(true); - } catch (NoSuchFieldException e) { - LOG.error("No isReadOnly field in ByteBuffer", e); - } + try { + f = ByteBuffer.class + .getDeclaredField("isReadOnly"); + f.setAccessible(true); + } catch (NoSuchFieldException e) { + LOG.error("No isReadOnly field in ByteBuffer", e); } IS_READY_ONLY_FIELD = f; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBuffer.java index a24d39e5dac..058934c2f27 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBuffer.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBuffer.java @@ -39,12 +39,13 @@ static ChunkBuffer allocate(int capacity) { return allocate(capacity, 0); } - /** Similar to {@link ByteBuffer#allocate(int)} + /** + * Similar to {@link ByteBuffer#allocate(int)} * except that it can specify the increment. * * @param increment * the increment size so that this buffer is allocated incrementally. - * When increment {@literal <= 0}, entire buffer is allocated in the beginning. + * When increment <= 0, entire buffer is allocated in the beginning. */ static ChunkBuffer allocate(int capacity, int increment) { if (increment > 0 && increment < capacity) { @@ -59,8 +60,7 @@ static ChunkBuffer wrap(ByteBuffer buffer) { return new ChunkBufferImplWithByteBuffer(buffer); } - /** Wrap the given list of {@link ByteBuffer}s as a {@link ChunkBuffer}, - * with a function called when buffers are released.*/ + /** Wrap the given list of {@link ByteBuffer}s as a {@link ChunkBuffer}. */ static ChunkBuffer wrap(List buffers) { Objects.requireNonNull(buffers, "buffers == null"); if (buffers.size() == 1) { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java index ea5c5453f3f..4bd170df8e8 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java @@ -23,7 +23,6 @@ import org.apache.hadoop.hdds.utils.db.Codec; import org.apache.hadoop.hdds.utils.db.DelegatedCodec; import org.apache.hadoop.hdds.utils.db.Proto3Codec; -import org.apache.hadoop.ozone.OzoneConsts; import java.io.IOException; import java.util.Collections; @@ -281,14 +280,4 @@ public void appendTo(StringBuilder sb) { sb.append(", size=").append(size); sb.append("]"); } - - public long getBlockGroupLength() { - String lenStr = getMetadata() - .get(OzoneConsts.BLOCK_GROUP_LEN_KEY_IN_PUT_BLOCK); - // If we don't have the length, then it indicates a problem with the stripe. - // All replica should carry the length, so if it is not there, we return 0, - // which will cause us to set the length of the block to zero and not - // attempt to reconstruct it. - return (lenStr == null) ? 0 : Long.parseLong(lenStr); - } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfoList.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfoList.java index 832ab40d30f..fdf40af9e09 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfoList.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfoList.java @@ -27,7 +27,7 @@ /** * Helper class to convert between protobuf lists and Java lists of - * {@link org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo} objects. + * {@link ContainerProtos.ChunkInfo} objects. *

    * This class is immutable. */ @@ -49,7 +49,7 @@ public ChunkInfoList(List chunks) { } /** - * @return A new {@link #ChunkInfoList} created from protobuf data. + * @return A new {@link ChunkInfoList} created from protobuf data. */ public static ChunkInfoList getFromProtoBuf( ContainerProtos.ChunkInfoList chunksProto) { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutVersionInstanceFactory.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutVersionInstanceFactory.java index b94dd024b2d..83e63a2a322 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutVersionInstanceFactory.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutVersionInstanceFactory.java @@ -37,16 +37,18 @@ /** * Generic factory which stores different instances of Type 'T' sharded by - * a key and version. A single key can be associated with different versions + * a key & version. A single key can be associated with different versions * of 'T'. + * * Why does this class exist? * A typical use case during upgrade is to have multiple versions of a class * / method / object and chose them based on current layout * version at runtime. Before finalizing, an older version is typically * needed, and after finalize, a newer version is needed. This class serves * this purpose in a generic way. + * * For example, we can create a Factory to create multiple versions of - * OMRequests sharded by Request Type and Layout Version Supported. + * OMRequests sharded by Request Type & Layout Version Supported. */ public class LayoutVersionInstanceFactory { @@ -69,7 +71,7 @@ public class LayoutVersionInstanceFactory { /** * Register an instance with a given factory key (key + version). * For safety reasons we dont allow (1) re-registering, (2) registering an - * instance with version > SLV. + * instance with version > SLV. * * @param lvm LayoutVersionManager * @param key VersionFactoryKey key to associate with instance. @@ -136,15 +138,13 @@ private boolean isValid(LayoutVersionManager lvm, int version) { } /** - *

        * From the list of versioned instances for a given "key", this
        * returns the "floor" value corresponding to the given version.
    -   * For example, if we have key = "CreateKey",  entry -> [(1, CreateKeyV1),
    -   * (3, CreateKeyV2), and if the passed in key = CreateKey & version = 2, we
    +   * For example, if we have key = "CreateKey",  entry -> [(1, CreateKeyV1),
    +   * (3, CreateKeyV2), and if the passed in key = CreateKey & version = 2, we
        * return CreateKeyV1.
        * Since this is a priority queue based implementation, we use a O(1) peek()
        * lookup to get the current valid version.
    -   * 
    * @param lvm LayoutVersionManager * @param key Key and Version. * @return instance. diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutVersionManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutVersionManager.java index a765c2c9455..3137d756e6b 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutVersionManager.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutVersionManager.java @@ -74,6 +74,7 @@ public interface LayoutVersionManager { /** * Generic API for returning a registered handler for a given type. * @param type String type + * @return */ default Object getHandler(String type) { return null; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/UpgradeFinalizer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/UpgradeFinalizer.java index 19c0498aa7a..44ae94870e3 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/UpgradeFinalizer.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/UpgradeFinalizer.java @@ -50,14 +50,14 @@ public interface UpgradeFinalizer { * Represents the current state in which the service is with regards to * finalization after an upgrade. * The state transitions are the following: - * {@code ALREADY_FINALIZED} - no entry no exit from this status without restart. + * ALREADY_FINALIZED - no entry no exit from this status without restart. * After an upgrade: - * {@code FINALIZATION_REQUIRED -(finalize)-> STARTING_FINALIZATION - * -> FINALIZATION_IN_PROGRESS -> FINALIZATION_DONE} from finalization done + * FINALIZATION_REQUIRED -(finalize)-> STARTING_FINALIZATION + * -> FINALIZATION_IN_PROGRESS -> FINALIZATION_DONE from finalization done * there is no more move possible, after a restart the service can end up in: - * {@code FINALIZATION_REQUIRED}, if the finalization failed and have not reached - * {@code FINALIZATION_DONE}, - * - or it can be {@code ALREADY_FINALIZED} if the finalization was successfully done. + * - FINALIZATION_REQUIRED, if the finalization failed and have not reached + * FINALIZATION_DONE, + * - or it can be ALREADY_FINALIZED if the finalization was successfully done. */ enum Status { ALREADY_FINALIZED, diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/VersionFactoryKey.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/VersionFactoryKey.java index 6465cc85501..bda45f5a745 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/VersionFactoryKey.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/VersionFactoryKey.java @@ -20,7 +20,7 @@ /** * "Key" element to the Version specific instance factory. Currently it has 2 - * dimensions -> a 'key' string and a version. This is to support a factory + * dimensions -> a 'key' string and a version. This is to support a factory * which returns an instance for a given "key" and "version". */ public class VersionFactoryKey { diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 9b0ff0e9625..20c1bed89be 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -860,15 +860,6 @@ The default read threshold to use memory mapped buffers. - - ozone.chunk.read.mapped.buffer.max.count - 0 - OZONE, SCM, CONTAINER, PERFORMANCE - - The default max count of memory mapped buffers allowed for a DN. - Default 0 means no mapped buffers allowed for data read. - - ozone.scm.container.layout FILE_PER_BLOCK @@ -3415,6 +3406,14 @@ unhealthy will each have their own limit. + + ozone.client.list.trash.keys.max + 1000 + OZONE, CLIENT + + The maximum number of keys to return for a list trash request. + + ozone.http.basedir @@ -3743,15 +3742,6 @@ - - ozone.snapshot.deep.cleaning.enabled - false - OZONE, PERFORMANCE, OM - - Flag to enable/disable snapshot deep cleaning. - - - ozone.scm.event.ContainerReport.thread.pool.size 10 @@ -4234,27 +4224,12 @@ - - ozone.hbase.enhancements.allowed - false - OZONE, OM - - When set to false, server-side HBase enhancement-related Ozone (experimental) features - are disabled (not allowed to be enabled) regardless of whether those configs are set. - - Here is the list of configs and values overridden when this config is set to false: - 1. ozone.fs.hsync.enabled = false - - A warning message will be printed if any of the above configs are overridden by this. - - ozone.fs.hsync.enabled false - OZONE, CLIENT, OM + OZONE, CLIENT - Enable hsync/hflush on the Ozone Manager and/or client side. Disabled by default. - Can be enabled only when ozone.hbase.enhancements.allowed = true + Enable hsync/hflush. By default they are disabled. diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationSource.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationSource.java index 0d6c0c90878..b1a20c9aecb 100644 --- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationSource.java +++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationSource.java @@ -108,7 +108,7 @@ default String[] getTrimmedStrings(String name) { /** * Gets the configuration entries where the key contains the prefix. This * method will strip the prefix from the key in the return Map. - * Example: {@code somePrefix.key->value} will be {@code key->value} in the returned map. + * Example: somePrefix.key->value will be key->value in the returned map. * @param keyPrefix Prefix to search. * @return Map containing keys that match and their values. */ diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/checksum/DNContainerOperationClient.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/checksum/DNContainerOperationClient.java index 3c08e58f9bf..969add4a15c 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/checksum/DNContainerOperationClient.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/checksum/DNContainerOperationClient.java @@ -33,7 +33,9 @@ import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; import org.apache.hadoop.hdds.security.SecurityConfig; import org.apache.hadoop.hdds.security.symmetric.SecretKeySignerClient; +import org.apache.hadoop.hdds.security.x509.certificate.client.CACertificateProvider; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; +import org.apache.hadoop.hdds.utils.HAUtils; import org.apache.hadoop.ozone.OzoneSecurityUtil; import jakarta.annotation.Nonnull; import org.apache.hadoop.ozone.container.common.helpers.TokenHelper; @@ -71,7 +73,11 @@ private static XceiverClientManager createClientManager( throws IOException { ClientTrustManager trustManager = null; if (OzoneSecurityUtil.isSecurityEnabled(conf)) { - trustManager = certificateClient.createClientTrustManager(); + CACertificateProvider localCaCerts = + () -> HAUtils.buildCAX509List(certificateClient, conf); + CACertificateProvider remoteCacerts = + () -> HAUtils.buildCAX509List(null, conf); + trustManager = new ClientTrustManager(remoteCacerts, localCaCerts); } DatanodeConfiguration dnConf = conf.getObject(DatanodeConfiguration.class); return new XceiverClientManager(conf, diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/checksum/ReconcileContainerTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/checksum/ReconcileContainerTask.java index d14dc666b71..ac42efd45ad 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/checksum/ReconcileContainerTask.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/checksum/ReconcileContainerTask.java @@ -46,16 +46,6 @@ public ReconcileContainerTask(ContainerController controller, this.dnClient = dnClient; } - @Override - protected String getMetricName() { - return "ContainerReconciliations"; - } - - @Override - protected String getMetricDescriptionSegment() { - return "Container Reconciliations"; - } - @Override public void runTask() { long start = Time.monotonicNow(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java index 15cc6245ddb..b5dfd07d576 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java @@ -189,6 +189,7 @@ public int containerCount() { * Send FCR which will not contain removed containers. * * @param context StateContext + * @return */ public void handleVolumeFailures(StateContext context) { AtomicBoolean failedVolume = new AtomicBoolean(false); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java index 28aa3d8588f..417fb443eef 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java @@ -649,7 +649,7 @@ public Handler getHandler(ContainerProtos.ContainerType containerType) { @Override public void setClusterId(String clusterId) { - Preconditions.checkNotNull(clusterId, "clusterId cannot be null"); + Preconditions.checkNotNull(clusterId, "clusterId Cannot be null"); if (this.clusterId == null) { this.clusterId = clusterId; for (Map.Entry handlerMap : handlers.entrySet()) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java index 2e11cde3d9e..d6ca2d120e6 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java @@ -35,7 +35,7 @@ /** * Map: containerId {@literal ->} (localId {@literal ->} {@link BlockData}). * The outer container map does not entail locking for a better performance. - * The inner {@code BlockDataMap} is synchronized. + * The inner {@link BlockDataMap} is synchronized. * * This class will maintain list of open keys per container when closeContainer * command comes, it should autocommit all open keys of a open container before diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicyTemplate.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicyTemplate.java index bb47b5b9b6f..c584ba79037 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicyTemplate.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicyTemplate.java @@ -90,7 +90,7 @@ public final List chooseContainerForBlockDeletion( /** * Abstract step for ordering the container data to be deleted. * Subclass need to implement the concrete ordering implementation - * in descending order (more prioritized -> less prioritized) + * in descending order (more prioritized -> less prioritized) * @param candidateContainers candidate containers to be ordered */ protected abstract void orderByDescendingPriority( diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java index f075b6f67ca..d02bae0a35a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java @@ -75,6 +75,7 @@ void validateContainerCommand( /** * Returns the handler for the specified containerType. * @param containerType + * @return */ Handler getHandler(ContainerProtos.ContainerType containerType); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java index 9d157cc9912..55fcbcdb3cc 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java @@ -218,6 +218,7 @@ public DatanodeStateMachine(HddsDatanodeService hddsDatanodeService, ReplicationSupervisorMetrics.create(supervisor); ecReconstructionMetrics = ECReconstructionMetrics.create(); + ecReconstructionCoordinator = new ECReconstructionCoordinator( conf, certClient, secretKeyClient, context, ecReconstructionMetrics, threadNamePrefix); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java index bc703ac6a55..8533f7384d4 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java @@ -18,6 +18,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutorService; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; @@ -57,11 +58,11 @@ public class CloseContainerCommandHandler implements CommandHandler { private final AtomicLong invocationCount = new AtomicLong(0); private final AtomicInteger queuedCount = new AtomicInteger(0); - private final ThreadPoolExecutor executor; + private final ExecutorService executor; private long totalTime; /** - * Constructs a close container command handler. + * Constructs a ContainerReport handler. */ public CloseContainerCommandHandler( int threadPoolSize, int queueSize, String threadNamePrefix) { @@ -219,14 +220,4 @@ public long getTotalRunTime() { public int getQueuedCount() { return queuedCount.get(); } - - @Override - public int getThreadPoolMaxPoolSize() { - return executor.getMaximumPoolSize(); - } - - @Override - public int getThreadPoolActivePoolSize() { - return executor.getActiveCount(); - } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java index c3f8da74c7a..9035b79c670 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java @@ -56,6 +56,11 @@ public final class CommandDispatcher { private CommandDispatcher(OzoneContainer container, SCMConnectionManager connectionManager, StateContext context, CommandHandler... handlers) { + Preconditions.checkNotNull(context); + Preconditions.checkNotNull(handlers); + Preconditions.checkArgument(handlers.length > 0); + Preconditions.checkNotNull(container); + Preconditions.checkNotNull(connectionManager); this.context = context; this.container = container; this.connectionManager = connectionManager; @@ -72,7 +77,6 @@ private CommandDispatcher(OzoneContainer container, SCMConnectionManager commandHandlerMetrics = CommandHandlerMetrics.create(handlerMap); } - @VisibleForTesting public CommandHandler getCloseContainerHandler() { return handlerMap.get(Type.closeContainerCommand); } @@ -197,12 +201,11 @@ public Builder setContext(StateContext stateContext) { * @return Command Dispatcher. */ public CommandDispatcher build() { - Preconditions.checkNotNull(this.connectionManager, - "Missing scm connection manager."); - Preconditions.checkNotNull(this.container, "Missing ozone container."); - Preconditions.checkNotNull(this.context, "Missing state context."); - Preconditions.checkArgument(this.handlerList.size() > 0, - "The number of command handlers must be greater than 0."); + Preconditions.checkNotNull(this.connectionManager, "Missing connection" + + " manager."); + Preconditions.checkNotNull(this.container, "Missing container."); + Preconditions.checkNotNull(this.context, "Missing context."); + Preconditions.checkArgument(this.handlerList.size() > 0); return new CommandDispatcher(this.container, this.connectionManager, this.context, handlerList.toArray( new CommandHandler[handlerList.size()])); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java index bd7431c6145..747749066e3 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java @@ -168,12 +168,12 @@ public int getQueuedCount() { @Override public int getThreadPoolMaxPoolSize() { - return executor.getMaximumPoolSize(); + return ((ThreadPoolExecutor)executor).getMaximumPoolSize(); } @Override public int getThreadPoolActivePoolSize() { - return executor.getActiveCount(); + return ((ThreadPoolExecutor)executor).getActiveCount(); } /** diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteContainerCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteContainerCommandHandler.java index b76e306e1c0..ead81c32e5b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteContainerCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteContainerCommandHandler.java @@ -36,6 +36,7 @@ import java.io.IOException; import java.time.Clock; import java.util.OptionalLong; +import java.util.concurrent.ExecutorService; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; @@ -52,7 +53,7 @@ public class DeleteContainerCommandHandler implements CommandHandler { private final AtomicInteger invocationCount = new AtomicInteger(0); private final AtomicInteger timeoutCount = new AtomicInteger(0); private final AtomicLong totalTime = new AtomicLong(0); - private final ThreadPoolExecutor executor; + private final ExecutorService executor; private final Clock clock; private int maxQueueSize; @@ -69,7 +70,7 @@ public DeleteContainerCommandHandler( } protected DeleteContainerCommandHandler(Clock clock, - ThreadPoolExecutor executor, int queueSize) { + ExecutorService executor, int queueSize) { this.executor = executor; this.clock = clock; maxQueueSize = queueSize; @@ -130,7 +131,7 @@ private void handleInternal(SCMCommand command, StateContext context, @Override public int getQueuedCount() { - return executor.getQueue().size(); + return ((ThreadPoolExecutor)executor).getQueue().size(); } @Override @@ -159,16 +160,6 @@ public long getTotalRunTime() { return totalTime.get(); } - @Override - public int getThreadPoolMaxPoolSize() { - return executor.getMaximumPoolSize(); - } - - @Override - public int getThreadPoolActivePoolSize() { - return executor.getActiveCount(); - } - @Override public void stop() { try { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java index caa6b9df121..b6ab4748fe3 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java @@ -495,7 +495,7 @@ public Builder setEndpointStateMachine(EndpointStateMachine rpcEndPoint) { /** * Sets the LayoutVersionManager. * - * @param lvm config + * @param versionMgr - config * @return Builder */ public Builder setLayoutVersionManager(HDDSLayoutVersionManager lvm) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java index 969756b40f8..71f95cc4d32 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java @@ -244,7 +244,7 @@ public Builder setConfig(ConfigurationSource config) { /** * Sets the LayoutVersionManager. * - * @param lvm config + * @param versionMgr - config * @return Builder. */ public Builder setLayoutVersionManager(HDDSLayoutVersionManager lvm) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java index be566f84fc9..b3398de07ad 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java @@ -83,7 +83,6 @@ import org.apache.ratis.protocol.RaftGroupId; import org.apache.ratis.protocol.RaftGroupMemberId; import org.apache.ratis.protocol.RaftPeerId; -import org.apache.ratis.protocol.RaftPeer; import org.apache.ratis.protocol.exceptions.StateMachineException; import org.apache.ratis.server.RaftServer; import org.apache.ratis.server.protocol.TermIndex; @@ -234,7 +233,7 @@ public ContainerStateMachine(HddsDatanodeService hddsDatanodeService, RaftGroupI // cache with FIFO eviction, and if element not found, this needs // to be obtained from disk for slow follower stateMachineDataCache = new ResourceCache<>( - (index, data) -> data.size(), + (index, data) -> ((ByteString)data).size(), pendingRequestsBytesLimit, (p) -> { if (p.wasEvicted()) { @@ -705,10 +704,9 @@ private ExecutorService getChunkExecutor(WriteChunkRequestProto req) { return chunkExecutors.get(i); } - /** - * {@link #writeStateMachineData} - * calls are not synchronized with each other - * and also with {@code applyTransaction(TransactionContext)}. + /* + * writeStateMachineData calls are not synchronized with each other + * and also with applyTransaction. */ @Override public CompletableFuture write(LogEntryProto entry, TransactionContext trx) { @@ -826,7 +824,7 @@ public CompletableFuture flush(long index) { } /** - * This method is used by the Leader to read state machine data for sending appendEntries to followers. + * This method is used by the Leader to read state machine date for sending appendEntries to followers. * It will first get the data from {@link #stateMachineDataCache}. * If the data is not in the cache, it will read from the file by dispatching a command * @@ -1163,8 +1161,8 @@ public void evictStateMachineCache() { } @Override - public void notifyFollowerSlowness(RoleInfoProto roleInfoProto, RaftPeer follower) { - ratisServer.handleFollowerSlowness(gid, roleInfoProto, follower); + public void notifyFollowerSlowness(RoleInfoProto roleInfoProto) { + ratisServer.handleNodeSlowness(gid, roleInfoProto); } @Override @@ -1199,7 +1197,7 @@ public void notifyGroupRemove() { try { containerController.markContainerForClose(cid); containerController.quasiCloseContainer(cid, - "Ratis group removed. Group id: " + gid); + "Ratis group removed"); } catch (IOException e) { LOG.debug("Failed to quasi-close container {}", cid); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java index a4c14343985..7899cdcc0e6 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java @@ -104,7 +104,6 @@ import org.apache.ratis.server.RaftServerRpc; import org.apache.ratis.server.protocol.TermIndex; import org.apache.ratis.server.storage.RaftStorage; -import org.apache.ratis.util.Preconditions; import org.apache.ratis.util.SizeInBytes; import org.apache.ratis.util.TimeDuration; import org.apache.ratis.util.TraditionalBinaryPrefix; @@ -162,18 +161,19 @@ private static long nextCallId() { private int clientPort; private int dataStreamPort; private final RaftServer server; - private final String name; private final List chunkExecutors; private final ContainerDispatcher dispatcher; private final ContainerController containerController; private final ClientId clientId = ClientId.randomId(); private final StateContext context; + private final long nodeFailureTimeoutMs; private boolean isStarted = false; private final DatanodeDetails datanodeDetails; private final ConfigurationSource conf; // TODO: Remove the gids set when Ratis supports an api to query active // pipelines private final ConcurrentMap activePipelines = new ConcurrentHashMap<>(); + private final RaftPeerId raftPeerId; // Timeout used while calling submitRequest directly. private final long requestTimeout; private final boolean shouldDeleteRatisLogDirectory; @@ -197,14 +197,14 @@ private XceiverServerRatis(HddsDatanodeService hddsDatanodeService, DatanodeDeta this.context = context; this.dispatcher = dispatcher; this.containerController = containerController; + this.raftPeerId = RatisHelper.toRaftPeerId(dd); String threadNamePrefix = datanodeDetails.threadNamePrefix(); chunkExecutors = createChunkExecutors(conf, threadNamePrefix); + nodeFailureTimeoutMs = ratisServerConfig.getFollowerSlownessTimeout(); shouldDeleteRatisLogDirectory = ratisServerConfig.shouldDeleteRatisLogDirectory(); RaftProperties serverProperties = newRaftProperties(); - final RaftPeerId raftPeerId = RatisHelper.toRaftPeerId(dd); - this.name = getClass().getSimpleName() + "(" + raftPeerId + ")"; this.server = RaftServer.newBuilder().setServerId(raftPeerId) .setProperties(serverProperties) @@ -474,7 +474,7 @@ private void setStateMachineDataConfigurations(RaftProperties properties) { // NOTE : the default value for the retry count in ratis is -1, // which means retry indefinitely. - final int syncTimeoutRetryDefault = (int) ratisServerConfig.getFollowerSlownessTimeout() / + int syncTimeoutRetryDefault = (int) nodeFailureTimeoutMs / dataSyncTimeout.toIntExact(TimeUnit.MILLISECONDS); int numSyncRetries = conf.getInt( OzoneConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES, @@ -558,7 +558,7 @@ private static Parameters createTlsParameters(SecurityConfig conf, @Override public void start() throws IOException { if (!isStarted) { - LOG.info("Starting {}", name); + LOG.info("Starting {} {}", getClass().getSimpleName(), server.getId()); for (ThreadPoolExecutor executor : chunkExecutors) { executor.prestartAllCoreThreads(); } @@ -581,11 +581,11 @@ public void start() throws IOException { } } - private int getRealPort(InetSocketAddress address, Port.Name portName) { + private int getRealPort(InetSocketAddress address, Port.Name name) { int realPort = address.getPort(); - final Port port = DatanodeDetails.newPort(portName, realPort); - datanodeDetails.setPort(port); - LOG.info("{} is started using port {}", name, port); + datanodeDetails.setPort(DatanodeDetails.newPort(name, realPort)); + LOG.info("{} {} is started using port {} for {}", + getClass().getSimpleName(), server.getId(), realPort, name); return realPort; } @@ -593,7 +593,7 @@ private int getRealPort(InetSocketAddress address, Port.Name portName) { public void stop() { if (isStarted) { try { - LOG.info("Closing {}", name); + LOG.info("Stopping {} {}", getClass().getSimpleName(), server.getId()); // shutdown server before the executors as while shutting down, // some of the tasks would be executed using the executors. server.close(); @@ -602,7 +602,7 @@ public void stop() { } isStarted = false; } catch (IOException e) { - LOG.error("Failed to close {}.", name, e); + LOG.error("XceiverServerRatis Could not be stopped gracefully.", e); } } } @@ -706,40 +706,45 @@ private GroupInfoRequest createGroupInfoRequest( nextCallId()); } - private void handlePipelineFailure(RaftGroupId groupId, RoleInfoProto roleInfoProto, String reason) { - final RaftPeerId raftPeerId = RaftPeerId.valueOf(roleInfoProto.getSelf().getId()); - Preconditions.assertEquals(getServer().getId(), raftPeerId, "raftPeerId"); - final StringBuilder b = new StringBuilder() - .append(name).append(" with datanodeId ").append(RatisHelper.toDatanodeId(raftPeerId)) - .append("handlePipelineFailure ").append(" for ").append(reason) - .append(": ").append(roleInfoProto.getRole()) - .append(" elapsed time=").append(roleInfoProto.getRoleElapsedTimeMs()).append("ms"); - + private void handlePipelineFailure(RaftGroupId groupId, + RoleInfoProto roleInfoProto) { + String msg; + UUID datanode = RatisHelper.toDatanodeId(roleInfoProto.getSelf()); + RaftPeerId id = RaftPeerId.valueOf(roleInfoProto.getSelf().getId()); switch (roleInfoProto.getRole()) { case CANDIDATE: - final long lastLeaderElapsedTime = roleInfoProto.getCandidateInfo().getLastLeaderElapsedTimeMs(); - b.append(", lastLeaderElapsedTime=").append(lastLeaderElapsedTime).append("ms"); + msg = datanode + " is in candidate state for " + + roleInfoProto.getCandidateInfo().getLastLeaderElapsedTimeMs() + "ms"; break; case FOLLOWER: - b.append(", outstandingOp=").append(roleInfoProto.getFollowerInfo().getOutstandingOp()); + msg = datanode + " closes pipeline when installSnapshot from leader " + + "because leader snapshot doesn't contain any data to replay, " + + "all the log entries prior to the snapshot might have been purged." + + "So follower should not try to install snapshot from leader but" + + "can close the pipeline here. It's in follower state for " + + roleInfoProto.getRoleElapsedTimeMs() + "ms"; break; case LEADER: - final long followerSlownessTimeoutMs = ratisServerConfig.getFollowerSlownessTimeout(); - for (RaftProtos.ServerRpcProto follower : roleInfoProto.getLeaderInfo().getFollowerInfoList()) { - final long lastRpcElapsedTimeMs = follower.getLastRpcElapsedTimeMs(); - final boolean slow = lastRpcElapsedTimeMs > followerSlownessTimeoutMs; - final RaftPeerId followerId = RaftPeerId.valueOf(follower.getId().getId()); - b.append("\n Follower ").append(followerId) - .append(" with datanodeId ").append(RatisHelper.toDatanodeId(followerId)) - .append(" is ").append(slow ? "slow" : " responding") - .append(" with lastRpcElapsedTime=").append(lastRpcElapsedTimeMs).append("ms"); + StringBuilder sb = new StringBuilder(); + sb.append(datanode).append(" has not seen follower/s"); + for (RaftProtos.ServerRpcProto follower : roleInfoProto.getLeaderInfo() + .getFollowerInfoList()) { + if (follower.getLastRpcElapsedTimeMs() > nodeFailureTimeoutMs) { + sb.append(" ").append(RatisHelper.toDatanodeId(follower.getId())) + .append(" for ").append(follower.getLastRpcElapsedTimeMs()) + .append("ms"); + } } + msg = sb.toString(); break; default: - throw new IllegalStateException("Unexpected role " + roleInfoProto.getRole()); + LOG.error("unknown state: {}", roleInfoProto.getRole()); + throw new IllegalStateException("node" + id + " is in illegal role " + + roleInfoProto.getRole()); } - triggerPipelineClose(groupId, b.toString(), ClosePipelineInfo.Reason.PIPELINE_FAILED); + triggerPipelineClose(groupId, msg, + ClosePipelineInfo.Reason.PIPELINE_FAILED); } private void triggerPipelineClose(RaftGroupId groupId, String detail, @@ -864,12 +869,12 @@ public void removeGroup(HddsProtos.PipelineID pipelineId) processReply(reply); } - void handleFollowerSlowness(RaftGroupId groupId, RoleInfoProto roleInfoProto, RaftPeer follower) { - handlePipelineFailure(groupId, roleInfoProto, "slow follower " + follower.getId()); + void handleNodeSlowness(RaftGroupId groupId, RoleInfoProto roleInfoProto) { + handlePipelineFailure(groupId, roleInfoProto); } void handleNoLeader(RaftGroupId groupId, RoleInfoProto roleInfoProto) { - handlePipelineFailure(groupId, roleInfoProto, "no leader"); + handlePipelineFailure(groupId, roleInfoProto); } void handleApplyTransactionFailure(RaftGroupId groupId, @@ -896,9 +901,10 @@ void handleApplyTransactionFailure(RaftGroupId groupId, void handleInstallSnapshotFromLeader(RaftGroupId groupId, RoleInfoProto roleInfoProto, TermIndex firstTermIndexInLog) { - LOG.warn("handleInstallSnapshotFromLeader for firstTermIndexInLog={}, terminating pipeline: {}", + LOG.warn("Install snapshot notification received from Leader with " + + "termIndex: {}, terminating pipeline: {}", firstTermIndexInLog, groupId); - handlePipelineFailure(groupId, roleInfoProto, "install snapshot notification"); + handlePipelineFailure(groupId, roleInfoProto); } /** @@ -944,7 +950,7 @@ void handleLeaderChangedNotification(RaftGroupMemberId groupMemberId, LOG.info("Leader change notification received for group: {} with new " + "leaderId: {}", groupMemberId.getGroupId(), raftPeerId1); // Save the reported leader to be sent with the report to SCM - final boolean leaderForGroup = server.getId().equals(raftPeerId1); + boolean leaderForGroup = this.raftPeerId.equals(raftPeerId1); activePipelines.compute(groupMemberId.getGroupId(), (key, value) -> value == null ? new ActivePipelineContext(leaderForGroup, false) : new ActivePipelineContext(leaderForGroup, value.isPendingClose())); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java index c58aab2e5ba..b22b9148bb1 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java @@ -199,7 +199,7 @@ public void shutdown() { /** * Delete all files under - * volume/hdds/cluster-id/tmp/deleted-containers. + * /hdds//tmp/deleted-containers. * This is the directory where containers are moved when they are deleted * from the system, but before being removed from the filesystem. This * makes the deletion atomic. diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java index 3d1be9791ec..af890269255 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java @@ -46,18 +46,16 @@ * - fsCapacity: reported total capacity from local fs. * - minVolumeFreeSpace (mvfs) : determines the free space for closing containers.This is like adding a few reserved bytes to reserved space. - Dn's will send close container action to SCM at this limit, and it is + Dn's will send close container action to SCM at this limit & it is configurable. * - *
    - * {@code
    + *
      * |----used----|   (avail)   |++mvfs++|++++reserved+++++++|
      * |<-     capacity                  ->|
      *              |     fsAvail      |-------other-----------|
      * |<-                   fsCapacity                      ->|
    - * }
    - *
    + *
      * What we could directly get from local fs:
      *     fsCapacity, fsAvail, (fsUsed = fsCapacity - fsAvail)
      * We could get from config:
    @@ -80,13 +78,11 @@
      * then we should use DedicatedDiskSpaceUsage for
      * `hdds.datanode.du.factory.classname`,
      * Then it is much simpler, since we don't care about other usage:
    - * {@code
    + *
      *  |----used----|             (avail)/fsAvail              |
      *  |<-              capacity/fsCapacity                  ->|
    - * }
      *
      *  We have avail == fsAvail.
    - *  
    */ public final class VolumeInfo { @@ -157,14 +153,11 @@ public long getCapacity() { } /** - *
    -   * {@code
        * Calculate available space use method A.
        * |----used----|   (avail)   |++++++++reserved++++++++|
        * |<-     capacity         ->|
    +   *
        * A) avail = capacity - used
    -   * }
    -   * 
    */ public long getAvailable() { return usage.getAvailable(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java index 733dc7964f1..7e138b05716 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java @@ -77,15 +77,11 @@ public long getUsedSpace() { } /** - *
    -   * {@code
        * Calculate available space use method B.
        * |----used----|   (avail)   |++++++++reserved++++++++|
        *              |     fsAvail      |-------other-------|
    -   *                          ->|~~~~|<-
    +   *                          ->|~~~~|<-
        *                      remainingReserved
    -   * }
    -   * 
    * B) avail = fsAvail - Max(reserved - other, 0); */ public SpaceUsageSource getCurrentUsage() { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECContainerOperationClient.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECContainerOperationClient.java index 487e6d37b28..9dedd65565f 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECContainerOperationClient.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECContainerOperationClient.java @@ -26,10 +26,12 @@ import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.client.ClientTrustManager; +import org.apache.hadoop.hdds.security.x509.certificate.client.CACertificateProvider; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; +import org.apache.hadoop.hdds.utils.HAUtils; import org.apache.hadoop.ozone.OzoneSecurityUtil; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.security.token.Token; @@ -67,17 +69,21 @@ public ECContainerOperationClient(ConfigurationSource conf, } @Nonnull - private static XceiverClientManager createClientManager(ConfigurationSource conf, CertificateClient certificateClient) + private static XceiverClientManager createClientManager( + ConfigurationSource conf, CertificateClient certificateClient) throws IOException { ClientTrustManager trustManager = null; if (OzoneSecurityUtil.isSecurityEnabled(conf)) { - trustManager = certificateClient.createClientTrustManager(); + CACertificateProvider localCaCerts = + () -> HAUtils.buildCAX509List(certificateClient, conf); + CACertificateProvider remoteCacerts = + () -> HAUtils.buildCAX509List(null, conf); + trustManager = new ClientTrustManager(remoteCacerts, localCaCerts); } - XceiverClientManager.ScmClientConfig scmClientConfig = new XceiverClientManager.XceiverClientManagerConfigBuilder() - .setMaxCacheSize(256) - .setStaleThresholdMs(10 * 1000) - .build(); - return new XceiverClientManager(conf, scmClientConfig, trustManager); + return new XceiverClientManager(conf, + new XceiverClientManager.XceiverClientManagerConfigBuilder() + .setMaxCacheSize(256).setStaleThresholdMs(10 * 1000).build(), + trustManager); } public BlockData[] listBlock(long containerId, DatanodeDetails dn, diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java index f1e1d0d900b..7e64766b41c 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.io.ByteBufferPool; import org.apache.hadoop.io.ElasticByteBufferPool; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.io.BlockInputStreamFactory; import org.apache.hadoop.ozone.client.io.BlockInputStreamFactoryImpl; import org.apache.hadoop.ozone.client.io.ECBlockInputStreamProxy; @@ -370,7 +371,7 @@ private void logBlockGroupDetails(BlockLocationInfo blockLocationInfo, .append(" block length: ") .append(data.getSize()) .append(" block group length: ") - .append(data.getBlockGroupLength()) + .append(getBlockDataLength(data)) .append(" chunk list: \n"); int cnt = 0; for (ContainerProtos.ChunkInfo chunkInfo : data.getChunks()) { @@ -572,7 +573,7 @@ private long calcEffectiveBlockGroupLen(BlockData[] blockGroup, continue; } - long putBlockLen = blockGroup[i].getBlockGroupLength(); + long putBlockLen = getBlockDataLength(blockGroup[i]); // Use safe length is the minimum of the lengths recorded across the // stripe blockGroupLen = Math.min(putBlockLen, blockGroupLen); @@ -580,6 +581,16 @@ private long calcEffectiveBlockGroupLen(BlockData[] blockGroup, return blockGroupLen == Long.MAX_VALUE ? 0 : blockGroupLen; } + private long getBlockDataLength(BlockData blockData) { + String lenStr = blockData.getMetadata() + .get(OzoneConsts.BLOCK_GROUP_LEN_KEY_IN_PUT_BLOCK); + // If we don't have the length, then it indicates a problem with the stripe. + // All replica should carry the length, so if it is not there, we return 0, + // which will cause us to set the length of the block to zero and not + // attempt to reconstruct it. + return (lenStr == null) ? 0 : Long.parseLong(lenStr); + } + public ECReconstructionMetrics getECReconstructionMetrics() { return this.metrics; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinatorTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinatorTask.java index a50a125f6d4..6d32f3a3f3e 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinatorTask.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinatorTask.java @@ -46,16 +46,6 @@ public ECReconstructionCoordinatorTask( debugString = reconstructionCommandInfo.toString(); } - @Override - public String getMetricName() { - return "ECReconstructions"; - } - - @Override - public String getMetricDescriptionSegment() { - return "EC reconstructions"; - } - @Override public void runTask() { // Implement the coordinator logic to handle a container group diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java index b4ff62e52d2..cea6737c7c9 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java @@ -935,6 +935,7 @@ private ContainerReplicaProto.State getHddsState() /** * Returns container DB file. + * @return */ public File getContainerDBFile() { return KeyValueContainerLocationUtil.getContainerDBFile(containerData); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java index 708038bd13f..ccc24dad0f9 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java @@ -431,6 +431,7 @@ public KeyPrefixFilter getDeletingBlockKeyFilter() { /** * Schema v3 use a prefix as startKey, * for other schemas just return null. + * @return */ public String startKeyEmpty() { if (hasSchema(SCHEMA_V3)) { @@ -442,6 +443,7 @@ public String startKeyEmpty() { /** * Schema v3 use containerID as key prefix, * for other schemas just return null. + * @return */ public String containerPrefix() { if (hasSchema(SCHEMA_V3)) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java index d587748e6f8..d1028727648 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java @@ -137,7 +137,6 @@ import org.apache.hadoop.ozone.container.common.interfaces.ScanResult; import static org.apache.hadoop.ozone.ClientVersion.EC_REPLICA_INDEX_REQUIRED_IN_BLOCK_REQUEST; -import static org.apache.hadoop.ozone.OzoneConsts.INCREMENTAL_CHUNK_LIST; import org.apache.hadoop.util.Time; import org.apache.ratis.statemachine.StateMachine; @@ -596,13 +595,9 @@ ContainerCommandResponseProto handlePutBlock( boolean endOfBlock = false; if (!request.getPutBlock().hasEof() || request.getPutBlock().getEof()) { - // There are two cases where client sends empty put block with eof. - // (1) An EC empty file. In this case, the block/chunk file does not exist, - // so no need to flush/close the file. - // (2) Ratis output stream in incremental chunk list mode may send empty put block - // to close the block, in which case we need to flush/close the file. - if (!request.getPutBlock().getBlockData().getChunksList().isEmpty() || - blockData.getMetadata().containsKey(INCREMENTAL_CHUNK_LIST)) { + // in EC, we will be doing empty put block. + // So, let's flush only when there are any chunks + if (!request.getPutBlock().getBlockData().getChunksList().isEmpty()) { chunkManager.finishWriteChunks(kvContainer, blockData); } endOfBlock = true; @@ -997,9 +992,6 @@ ContainerCommandResponseProto handleWriteChunk( // of order. blockData.setBlockCommitSequenceId(dispatcherContext.getLogIndex()); boolean eob = writeChunk.getBlock().getEof(); - if (eob) { - chunkManager.finishWriteChunks(kvContainer, blockData); - } blockManager.putBlock(kvContainer, blockData, eob); blockDataProto = blockData.getProtoBufMessage(); final long numBytes = blockDataProto.getSerializedSize(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java index 945efbcf6ea..7773b54f794 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java @@ -99,6 +99,7 @@ public static DatanodeStore getUncachedDatanodeStore( * opened by this thread, the other thread will get a RocksDB exception. * @param containerData The container data * @param conf Configuration + * @return * @throws IOException */ public static DatanodeStore getUncachedDatanodeStore( diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java index dc048ac16aa..0fac45571c7 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java @@ -39,7 +39,6 @@ import java.util.EnumSet; import java.util.List; import java.util.Set; -import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.locks.ReadWriteLock; import java.util.function.ToLongFunction; @@ -51,7 +50,6 @@ import org.apache.hadoop.ozone.common.utils.BufferUtils; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.apache.hadoop.ozone.container.keyvalue.impl.MappedBufferManager; import org.apache.hadoop.util.Time; import com.google.common.annotations.VisibleForTesting; @@ -202,12 +200,11 @@ private static long writeDataToChannel(FileChannel channel, ChunkBuffer data, } } - @SuppressWarnings("checkstyle:parameternumber") public static ChunkBuffer readData(long len, int bufferCapacity, - File file, long off, HddsVolume volume, int readMappedBufferThreshold, boolean mmapEnabled, - MappedBufferManager mappedBufferManager) throws StorageContainerException { - if (mmapEnabled && len > readMappedBufferThreshold && bufferCapacity > readMappedBufferThreshold) { - return readData(file, bufferCapacity, off, len, volume, mappedBufferManager); + File file, long off, HddsVolume volume, int readMappedBufferThreshold) + throws StorageContainerException { + if (len > readMappedBufferThreshold) { + return readData(file, bufferCapacity, off, len, volume); } else if (len == 0) { return ChunkBuffer.wrap(Collections.emptyList()); } @@ -259,52 +256,25 @@ private static void readData(File file, long offset, long len, * @return a list of {@link MappedByteBuffer} containing the data. */ private static ChunkBuffer readData(File file, int chunkSize, - long offset, long length, HddsVolume volume, MappedBufferManager mappedBufferManager) + long offset, long length, HddsVolume volume) throws StorageContainerException { - final int bufferNum = Math.toIntExact((length - 1) / chunkSize) + 1; - if (!mappedBufferManager.getQuota(bufferNum)) { - // proceed with normal buffer - final ByteBuffer[] buffers = BufferUtils.assignByteBuffers(length, - chunkSize); - readData(file, offset, length, c -> c.position(offset).read(buffers), volume); - Arrays.stream(buffers).forEach(ByteBuffer::flip); - return ChunkBuffer.wrap(Arrays.asList(buffers)); - } else { - try { - // proceed with mapped buffer - final List buffers = new ArrayList<>(bufferNum); - readData(file, offset, length, channel -> { - long readLen = 0; - while (readLen < length) { - final int n = Math.toIntExact(Math.min(length - readLen, chunkSize)); - final long finalOffset = offset + readLen; - final AtomicReference exception = new AtomicReference<>(); - ByteBuffer mapped = mappedBufferManager.computeIfAbsent(file.getAbsolutePath(), finalOffset, n, - () -> { - try { - return channel.map(FileChannel.MapMode.READ_ONLY, finalOffset, n); - } catch (IOException e) { - LOG.error("Failed to map file {} with offset {} and length {}", file, finalOffset, n); - exception.set(e); - return null; - } - }); - if (mapped == null) { - throw exception.get(); - } - LOG.debug("mapped: offset={}, readLen={}, n={}, {}", finalOffset, readLen, n, mapped.getClass()); - readLen += mapped.remaining(); - buffers.add(mapped); - } - return readLen; - }, volume); - return ChunkBuffer.wrap(buffers); - } catch (Throwable e) { - mappedBufferManager.releaseQuota(bufferNum); - throw e; + final List buffers = new ArrayList<>( + Math.toIntExact((length - 1) / chunkSize) + 1); + readData(file, offset, length, channel -> { + long readLen = 0; + while (readLen < length) { + final int n = Math.toIntExact(Math.min(length - readLen, chunkSize)); + final ByteBuffer mapped = channel.map( + FileChannel.MapMode.READ_ONLY, offset + readLen, n); + LOG.debug("mapped: offset={}, readLen={}, n={}, {}", + offset, readLen, n, mapped.getClass()); + readLen += mapped.remaining(); + buffers.add(mapped); } - } + return readLen; + }, volume); + return ChunkBuffer.wrap(buffers); } /** diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java index dd719a81fb3..b287d9ac133 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java @@ -439,13 +439,13 @@ public static boolean isSameSchemaVersion(String schema, String other) { /** * Moves container directory to a new location - * under "volume/hdds/cluster-id/tmp/deleted-containers" + * under "/hdds//tmp/deleted-containers" * and updates metadata and chunks path. * Containers will be moved under it before getting deleted * to avoid, in case of failure, having artifact leftovers * on the default container path on the disk. * - * Delete operation for Schema < V3 + * Delete operation for Schema < V3 * 1. Container is marked DELETED * 2. Container is removed from memory container set * 3. Container DB handler from cache is removed and closed @@ -460,6 +460,7 @@ public static boolean isSameSchemaVersion(String schema, String other) { * 5. Container is deleted from tmp directory. * * @param keyValueContainerData + * @return true if renaming was successful */ public static void moveToDeletedContainerDir( KeyValueContainerData keyValueContainerData, diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java index 6232b843567..7b3852011d3 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java @@ -64,7 +64,6 @@ public class BlockManagerImpl implements BlockManager { // Default Read Buffer capacity when Checksum is not present private final int defaultReadBufferCapacity; private final int readMappedBufferThreshold; - private final int readMappedBufferMaxCount; /** * Constructs a Block Manager. @@ -80,9 +79,6 @@ public BlockManagerImpl(ConfigurationSource conf) { this.readMappedBufferThreshold = config.getBufferSize( ScmConfigKeys.OZONE_CHUNK_READ_MAPPED_BUFFER_THRESHOLD_KEY, ScmConfigKeys.OZONE_CHUNK_READ_MAPPED_BUFFER_THRESHOLD_DEFAULT); - this.readMappedBufferMaxCount = config.getInt( - ScmConfigKeys.OZONE_CHUNK_READ_MAPPED_BUFFER_MAX_COUNT_KEY, - ScmConfigKeys.OZONE_CHUNK_READ_MAPPED_BUFFER_MAX_COUNT_DEFAULT); } @Override @@ -308,11 +304,6 @@ public int getReadMappedBufferThreshold() { return readMappedBufferThreshold; } - /** @return the max count of memory mapped buffers for read. */ - public int getReadMappedBufferMaxCount() { - return readMappedBufferMaxCount; - } - /** * Deletes an existing block. * As Deletion is handled by BlockDeletingService, diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java index aa5d52f3cee..288a2d3e331 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java @@ -46,6 +46,7 @@ private ChunkManagerFactory() { * @param conf Configuration * @param manager This parameter will be used only for read data of * FILE_PER_CHUNK layout file. Can be null for other cases. + * @return */ public static ChunkManager createChunkManager(ConfigurationSource conf, BlockManager manager, VolumeSet volSet) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java index 4ca578d7717..a87b184ccec 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java @@ -75,8 +75,6 @@ public class FilePerBlockStrategy implements ChunkManager { private final OpenFiles files = new OpenFiles(); private final int defaultReadBufferCapacity; private final int readMappedBufferThreshold; - private final int readMappedBufferMaxCount; - private final MappedBufferManager mappedBufferManager; private final VolumeSet volumeSet; public FilePerBlockStrategy(boolean sync, BlockManager manager, @@ -86,15 +84,7 @@ public FilePerBlockStrategy(boolean sync, BlockManager manager, manager.getDefaultReadBufferCapacity(); this.readMappedBufferThreshold = manager == null ? 0 : manager.getReadMappedBufferThreshold(); - this.readMappedBufferMaxCount = manager == null ? 0 - : manager.getReadMappedBufferMaxCount(); - LOG.info("ozone.chunk.read.mapped.buffer.max.count is load with {}", readMappedBufferMaxCount); this.volumeSet = volSet; - if (this.readMappedBufferMaxCount > 0) { - mappedBufferManager = new MappedBufferManager(this.readMappedBufferMaxCount); - } else { - mappedBufferManager = null; - } } private static void checkLayoutVersion(Container container) { @@ -202,10 +192,10 @@ public ChunkBuffer readChunk(Container container, BlockID blockID, final long len = info.getLen(); long offset = info.getOffset(); - int bufferCapacity = ChunkManager.getBufferCapacityForChunkRead(info, + int bufferCapacity = ChunkManager.getBufferCapacityForChunkRead(info, defaultReadBufferCapacity); return ChunkUtils.readData(len, bufferCapacity, chunkFile, offset, volume, - readMappedBufferThreshold, readMappedBufferMaxCount > 0, mappedBufferManager); + readMappedBufferThreshold); } @Override diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java index 6ac88cad7f5..a649f573bf0 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java @@ -67,8 +67,6 @@ public class FilePerChunkStrategy implements ChunkManager { private final BlockManager blockManager; private final int defaultReadBufferCapacity; private final int readMappedBufferThreshold; - private final int readMappedBufferMaxCount; - private final MappedBufferManager mappedBufferManager; private final VolumeSet volumeSet; public FilePerChunkStrategy(boolean sync, BlockManager manager, @@ -79,15 +77,7 @@ public FilePerChunkStrategy(boolean sync, BlockManager manager, manager.getDefaultReadBufferCapacity(); this.readMappedBufferThreshold = manager == null ? 0 : manager.getReadMappedBufferThreshold(); - this.readMappedBufferMaxCount = manager == null ? 0 - : manager.getReadMappedBufferMaxCount(); - LOG.info("ozone.chunk.read.mapped.buffer.max.count is load with {}", readMappedBufferMaxCount); this.volumeSet = volSet; - if (this.readMappedBufferMaxCount > 0) { - mappedBufferManager = new MappedBufferManager(this.readMappedBufferMaxCount); - } else { - mappedBufferManager = null; - } } private static void checkLayoutVersion(Container container) { @@ -275,7 +265,7 @@ public ChunkBuffer readChunk(Container container, BlockID blockID, long offset = info.getOffset() - chunkFileOffset; Preconditions.checkState(offset >= 0); return ChunkUtils.readData(len, bufferCapacity, file, offset, volume, - readMappedBufferThreshold, readMappedBufferMaxCount > 0, mappedBufferManager); + readMappedBufferThreshold); } } catch (StorageContainerException ex) { //UNABLE TO FIND chunk is not a problem as we will try with the diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/MappedBufferManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/MappedBufferManager.java deleted file mode 100644 index be2751925c7..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/MappedBufferManager.java +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.keyvalue.impl; - -import com.google.common.util.concurrent.Striped; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.lang.ref.WeakReference; -import java.nio.ByteBuffer; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.Semaphore; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.locks.Lock; -import java.util.function.Supplier; - -/** - * A Manager who manages the mapped buffers to under a predefined total count, also support reuse mapped buffers. - */ -public class MappedBufferManager { - - private static ConcurrentHashMap> mappedBuffers = - new ConcurrentHashMap>(); - private static final Logger LOG = LoggerFactory.getLogger(MappedBufferManager.class); - private final Semaphore semaphore; - private final int capacity; - private final AtomicBoolean cleanupInProgress = new AtomicBoolean(false); - private final Striped lock; - - public MappedBufferManager(int capacity) { - this.capacity = capacity; - this.semaphore = new Semaphore(capacity); - this.lock = Striped.lazyWeakLock(1024); - } - - public boolean getQuota(int permits) { - boolean ret = semaphore.tryAcquire(permits); - if (ret) { - if (LOG.isDebugEnabled()) { - LOG.debug("quota is decreased by {} to total {}", permits, semaphore.availablePermits()); - } - } else { - if (cleanupInProgress.compareAndSet(false, true)) { - CompletableFuture.runAsync(() -> { - int p = 0; - try { - for (String key : mappedBuffers.keySet()) { - ByteBuffer buf = mappedBuffers.get(key).get(); - if (buf == null) { - mappedBuffers.remove(key); - p++; - } - } - if (p > 0) { - releaseQuota(p); - } - } finally { - cleanupInProgress.set(false); - } - }); - } - } - return ret; - } - - public void releaseQuota(int permits) { - semaphore.release(permits); - if (LOG.isDebugEnabled()) { - LOG.debug("quota is increased by {} to total {}", permits, semaphore.availablePermits()); - } - } - - public int availableQuota() { - return semaphore.availablePermits(); - } - - public ByteBuffer computeIfAbsent(String file, long position, long size, - Supplier supplier) { - String key = file + "-" + position + "-" + size; - Lock fileLock = lock.get(key); - fileLock.lock(); - try { - WeakReference refer = mappedBuffers.get(key); - if (refer != null && refer.get() != null) { - // reuse the mapped buffer - if (LOG.isDebugEnabled()) { - LOG.debug("find buffer for key {}", key); - } - releaseQuota(1); - return refer.get(); - } - - ByteBuffer buffer = supplier.get(); - if (buffer != null) { - mappedBuffers.put(key, new WeakReference<>(buffer)); - if (LOG.isDebugEnabled()) { - LOG.debug("add buffer for key {}", key); - } - } - return buffer; - } finally { - fileLock.unlock(); - } - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/StreamDataChannelBase.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/StreamDataChannelBase.java index 601e7b2712c..8df856d4b93 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/StreamDataChannelBase.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/StreamDataChannelBase.java @@ -99,9 +99,7 @@ public void setLinked() { linked.set(true); } - /** - * @return true if {@link org.apache.ratis.statemachine.StateMachine.DataChannel} is already linked. - */ + /** @return true iff {@link StateMachine.DataChannel} is already linked. */ public boolean cleanUp() { if (linked.get()) { // already linked, nothing to do. diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java index 256d357a31d..6dd8590bdf3 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java @@ -99,9 +99,6 @@ void finalizeBlock(Container container, BlockID blockId) /** @return the threshold to read using memory mapped buffers. */ int getReadMappedBufferThreshold(); - /** @return the max count of memory mapped buffers to read. */ - int getReadMappedBufferMaxCount(); - /** * Shutdown ContainerManager. */ diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java index 88aeb3c174d..26719d7f035 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java @@ -300,9 +300,9 @@ protected static void checkTableStatus(Table table, String name) /** * Block Iterator for KeyValue Container. This block iterator returns blocks - * which match with the {@link org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter}. If no + * which match with the {@link MetadataKeyFilters.KeyPrefixFilter}. If no * filter is specified, then default filter used is - * {@link org.apache.hadoop.hdds.utils.MetadataKeyFilters#getUnprefixedKeyFilter()} + * {@link MetadataKeyFilters#getUnprefixedKeyFilter()} */ @InterfaceAudience.Public public static class KeyValueBlockIterator implements @@ -405,9 +405,9 @@ public void close() throws IOException { /** * Block localId Iterator for KeyValue Container. * This Block localId iterator returns localIds - * which match with the {@link org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter}. If no + * which match with the {@link MetadataKeyFilters.KeyPrefixFilter}. If no * filter is specified, then default filter used is - * {@link org.apache.hadoop.hdds.utils.MetadataKeyFilters#getUnprefixedKeyFilter()} + * {@link MetadataKeyFilters#getUnprefixedKeyFilter()} */ @InterfaceAudience.Public public static class KeyValueBlockLocalIdIterator implements diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneChunkInfoListCodec.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneChunkInfoListCodec.java index 1be5a3819c8..4beb2075432 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneChunkInfoListCodec.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneChunkInfoListCodec.java @@ -25,8 +25,7 @@ import java.io.IOException; /** - * Codec for parsing {@link org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfoList} - * objects from data + * Codec for parsing {@link ContainerProtos.ChunkInfoList} objects from data * that may have been written using schema version one. Before upgrading * schema versions, deleted block IDs were stored with a duplicate copy of * their ID as the value in the database. After upgrading the code, any diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java index 84ddba759fe..a49cb7278a7 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java @@ -167,6 +167,7 @@ public void closeContainer(final long containerId) throws IOException { * Returns the Container given a container id. * * @param containerId ID of the container + * @return Container */ public void addFinalizedBlock(final long containerId, final long localId) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java index 8ae838a7e53..cb7db07c24f 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java @@ -483,10 +483,10 @@ public void start(String clusterId) throws IOException { replicationServer.start(); datanodeDetails.setPort(Name.REPLICATION, replicationServer.getPort()); - hddsDispatcher.init(); - hddsDispatcher.setClusterId(clusterId); writeChannel.start(); readChannel.start(); + hddsDispatcher.init(); + hddsDispatcher.setClusterId(clusterId); blockDeletingService.start(); recoveringContainerScrubbingService.start(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/AbstractReplicationTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/AbstractReplicationTask.java index f4bf54a3d82..72fa88b35d9 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/AbstractReplicationTask.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/AbstractReplicationTask.java @@ -70,10 +70,6 @@ protected AbstractReplicationTask(long containerID, this.term = term; queued = Instant.now(clock); } - - protected abstract String getMetricName(); - - protected abstract String getMetricDescriptionSegment(); public long getContainerId() { return containerId; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java index 92ff4b6d8d6..5ceea125e81 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java @@ -26,7 +26,6 @@ import java.util.Objects; import java.util.OptionalLong; import java.util.Set; -import java.util.Collections; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutorService; import java.util.concurrent.PriorityBlockingQueue; @@ -72,17 +71,11 @@ public final class ReplicationSupervisor { private final StateContext context; private final Clock clock; - private final Map requestCounter = new ConcurrentHashMap<>(); - private final Map successCounter = new ConcurrentHashMap<>(); - private final Map failureCounter = new ConcurrentHashMap<>(); - private final Map timeoutCounter = new ConcurrentHashMap<>(); - private final Map skippedCounter = new ConcurrentHashMap<>(); - - private static final Map METRICS_MAP; - - static { - METRICS_MAP = new HashMap<>(); - } + private final AtomicLong requestCounter = new AtomicLong(); + private final AtomicLong successCounter = new AtomicLong(); + private final AtomicLong failureCounter = new AtomicLong(); + private final AtomicLong timeoutCounter = new AtomicLong(); + private final AtomicLong skippedCounter = new AtomicLong(); /** * A set of container IDs that are currently being downloaded @@ -195,10 +188,6 @@ public static Builder newBuilder() { return new Builder(); } - public static Map getMetricsMap() { - return Collections.unmodifiableMap(METRICS_MAP); - } - private ReplicationSupervisor(StateContext context, ExecutorService executor, ReplicationConfig replicationConfig, DatanodeConfiguration datanodeConfig, Clock clock, IntConsumer executorThreadUpdater) { @@ -232,19 +221,6 @@ public void addTask(AbstractReplicationTask task) { return; } - if (requestCounter.get(task.getMetricName()) == null) { - synchronized (this) { - if (requestCounter.get(task.getMetricName()) == null) { - requestCounter.put(task.getMetricName(), new AtomicLong(0)); - successCounter.put(task.getMetricName(), new AtomicLong(0)); - failureCounter.put(task.getMetricName(), new AtomicLong(0)); - timeoutCounter.put(task.getMetricName(), new AtomicLong(0)); - skippedCounter.put(task.getMetricName(), new AtomicLong(0)); - METRICS_MAP.put(task.getMetricName(), task.getMetricDescriptionSegment()); - } - } - } - if (inFlight.add(task)) { if (task.getPriority() != ReplicationCommandPriority.LOW) { // Low priority tasks are not included in the replication queue sizes @@ -354,14 +330,14 @@ public TaskRunner(AbstractReplicationTask task) { @Override public void run() { try { - requestCounter.get(task.getMetricName()).incrementAndGet(); + requestCounter.incrementAndGet(); final long now = clock.millis(); final long deadline = task.getDeadline(); if (deadline > 0 && now > deadline) { LOG.info("Ignoring {} since the deadline has passed ({} < {})", this, Instant.ofEpochMilli(deadline), Instant.ofEpochMilli(now)); - timeoutCounter.get(task.getMetricName()).incrementAndGet(); + timeoutCounter.incrementAndGet(); return; } @@ -388,18 +364,18 @@ public void run() { task.runTask(); if (task.getStatus() == Status.FAILED) { LOG.warn("Failed {}", this); - failureCounter.get(task.getMetricName()).incrementAndGet(); + failureCounter.incrementAndGet(); } else if (task.getStatus() == Status.DONE) { LOG.info("Successful {}", this); - successCounter.get(task.getMetricName()).incrementAndGet(); + successCounter.incrementAndGet(); } else if (task.getStatus() == Status.SKIPPED) { LOG.info("Skipped {}", this); - skippedCounter.get(task.getMetricName()).incrementAndGet(); + skippedCounter.incrementAndGet(); } } catch (Exception e) { task.setStatus(Status.FAILED); LOG.warn("Failed {}", this, e); - failureCounter.get(task.getMetricName()).incrementAndGet(); + failureCounter.incrementAndGet(); } finally { inFlight.remove(task); decrementTaskCounter(task); @@ -443,12 +419,7 @@ public boolean equals(Object o) { } public long getReplicationRequestCount() { - return getCount(requestCounter); - } - - public long getReplicationRequestCount(String metricsName) { - AtomicLong counter = requestCounter.get(metricsName); - return counter != null ? counter.get() : 0; + return requestCounter.get(); } public long getQueueSize() { @@ -467,48 +438,20 @@ public long getMaxReplicationStreams() { } } - private long getCount(Map counter) { - long total = 0; - for (Map.Entry entry : counter.entrySet()) { - total += entry.getValue().get(); - } - return total; - } - public long getReplicationSuccessCount() { - return getCount(successCounter); - } - - public long getReplicationSuccessCount(String metricsName) { - AtomicLong counter = successCounter.get(metricsName); - return counter != null ? counter.get() : 0; + return successCounter.get(); } public long getReplicationFailureCount() { - return getCount(failureCounter); - } - - public long getReplicationFailureCount(String metricsName) { - AtomicLong counter = failureCounter.get(metricsName); - return counter != null ? counter.get() : 0; + return failureCounter.get(); } public long getReplicationTimeoutCount() { - return getCount(timeoutCounter); - } - - public long getReplicationTimeoutCount(String metricsName) { - AtomicLong counter = timeoutCounter.get(metricsName); - return counter != null ? counter.get() : 0; + return timeoutCounter.get(); } public long getReplicationSkippedCount() { - return getCount(skippedCounter); - } - - public long getReplicationSkippedCount(String metricsName) { - AtomicLong counter = skippedCounter.get(metricsName); - return counter != null ? counter.get() : 0; + return skippedCounter.get(); } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisorMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisorMetrics.java index a1763976af9..671e985d7ad 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisorMetrics.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisorMetrics.java @@ -71,47 +71,16 @@ public void getMetrics(MetricsCollector collector, boolean all) { .addGauge(Interns.info("numRequestedReplications", "Number of requested replications"), supervisor.getReplicationRequestCount()) - .addGauge(Interns.info("numSuccessReplications", - "Number of successful replications"), - supervisor.getReplicationSuccessCount()) - .addGauge(Interns.info("numFailureReplications", - "Number of failure replications"), - supervisor.getReplicationFailureCount()) .addGauge(Interns.info("numTimeoutReplications", "Number of replication requests timed out before being processed"), supervisor.getReplicationTimeoutCount()) .addGauge(Interns.info("numSkippedReplications", "Number of replication requests skipped as the container is " - + "already present"), - supervisor.getReplicationSkippedCount()) + + "already present"), supervisor.getReplicationSkippedCount()) .addGauge(Interns.info("maxReplicationStreams", "Maximum number of " + "concurrent replication tasks which can run simultaneously"), supervisor.getMaxReplicationStreams()); - Map metricsMap = ReplicationSupervisor.getMetricsMap(); - if (!metricsMap.isEmpty()) { - metricsMap.forEach((metricsName, descriptionSegment) -> { - if (!metricsName.equals("")) { - builder.addGauge(Interns.info("numRequested" + metricsName, - "Number of requested " + descriptionSegment), - supervisor.getReplicationRequestCount(metricsName)) - .addGauge(Interns.info("numSuccess" + metricsName, - "Number of successful " + descriptionSegment), - supervisor.getReplicationSuccessCount(metricsName)) - .addGauge(Interns.info("numFailure" + metricsName, - "Number of failure " + descriptionSegment), - supervisor.getReplicationFailureCount(metricsName)) - .addGauge(Interns.info("numTimeout" + metricsName, - "Number of " + descriptionSegment + " timed out before being processed"), - supervisor.getReplicationTimeoutCount(metricsName)) - .addGauge(Interns.info("numSkipped" + metricsName, - "Number of " + descriptionSegment + " skipped as the container is " - + "already present"), - supervisor.getReplicationSkippedCount(metricsName)); - } - }); - } - Map tasks = supervisor.getInFlightReplicationSummary(); for (Map.Entry entry : tasks.entrySet()) { builder.addGauge(Interns.info("numInflight" + entry.getKey(), diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationTask.java index 2168f324c24..ca0ca98906c 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationTask.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationTask.java @@ -65,16 +65,6 @@ protected ReplicationTask( replicator); } - @Override - public String getMetricName() { - return "ContainerReplications"; - } - - @Override - public String getMetricDescriptionSegment() { - return "container replications"; - } - @Override public boolean equals(Object o) { if (this == o) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/stream/StreamingSource.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/stream/StreamingSource.java index e49f3c3d6e5..5fdfc931b99 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/stream/StreamingSource.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/stream/StreamingSource.java @@ -27,9 +27,9 @@ public interface StreamingSource { /** * - * @param id custom identifier + * @param id: custom identifier * - * @return map of files which should be copied (logical name -> real path) + * @return map of files which should be copied (logical name -> real path) */ Map getFilesToStream(String id) throws InterruptedException; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReconstructECContainersCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReconstructECContainersCommand.java index ada80c980f6..f6633cb9d37 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReconstructECContainersCommand.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReconstructECContainersCommand.java @@ -17,13 +17,13 @@ */ package org.apache.hadoop.ozone.protocol.commands; -import java.util.Arrays; import java.util.List; import java.util.Objects; import java.util.stream.Collectors; import com.google.protobuf.ByteString; import org.apache.hadoop.hdds.HddsIdFactory; +import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; @@ -140,7 +140,7 @@ public String toString() { .collect(Collectors.joining(", "))).append("]") .append(", targets: ").append(getTargetDatanodes()) .append(", missingIndexes: ").append( - Arrays.toString(missingContainerIndexes.toByteArray())); + StringUtils.bytes2String(missingContainerIndexes.asReadOnlyByteBuffer())); return sb.toString(); } /** diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java index d6b44f2a641..eeb99b5a3db 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java @@ -122,6 +122,7 @@ private SCMDatanodeResponse submitRequest(Type type, /** * Returns SCM version. * + * @param unused - set to null and unused. * @return Version info. */ @Override diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java index a3b60aa36da..219645c8edc 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java @@ -35,7 +35,6 @@ import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; import org.apache.ozone.test.GenericTestUtils; -import org.junit.jupiter.api.Test; import java.io.IOException; import java.util.UUID; @@ -44,8 +43,6 @@ import static org.apache.hadoop.ozone.OzoneConsts.GB; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.Mockito.any; import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; @@ -295,28 +292,4 @@ private void waitTillFinishExecution( GenericTestUtils.waitFor(() -> closeHandler.getQueuedCount() <= 0, 10, 3000); } - - @Test - public void testThreadPoolPoolSize() { - assertEquals(1, subject.getThreadPoolMaxPoolSize()); - assertEquals(0, subject.getThreadPoolActivePoolSize()); - - CloseContainerCommandHandler closeContainerCommandHandler = - new CloseContainerCommandHandler(10, 10, ""); - closeContainerCommandHandler.handle(new CloseContainerCommand( - CONTAINER_ID + 1, PipelineID.randomId()), - ozoneContainer, context, null); - closeContainerCommandHandler.handle(new CloseContainerCommand( - CONTAINER_ID + 2, PipelineID.randomId()), - ozoneContainer, context, null); - closeContainerCommandHandler.handle(new CloseContainerCommand( - CONTAINER_ID + 3, PipelineID.randomId()), - ozoneContainer, context, null); - closeContainerCommandHandler.handle(new CloseContainerCommand( - CONTAINER_ID + 4, PipelineID.randomId()), - ozoneContainer, context, null); - assertEquals(10, closeContainerCommandHandler.getThreadPoolMaxPoolSize()); - assertTrue(closeContainerCommandHandler.getThreadPoolActivePoolSize() > 0); - } - } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerCommandHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerCommandHandler.java index 5ee31b97fd6..49c34828fbd 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerCommandHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerCommandHandler.java @@ -19,14 +19,6 @@ import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.LinkedBlockingQueue; - -import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; @@ -40,6 +32,7 @@ import java.time.ZoneId; import java.util.OptionalLong; +import static com.google.common.util.concurrent.MoreExecutors.newDirectExecutorService; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; @@ -70,14 +63,8 @@ public void setup() { } @Test - public void testExpiredCommandsAreNotProcessed() - throws IOException, InterruptedException { - CountDownLatch latch1 = new CountDownLatch(1); - ThreadFactory threadFactory = new ThreadFactoryBuilder().build(); - ThreadPoolWithLockExecutor executor = new ThreadPoolWithLockExecutor( - threadFactory, latch1); - DeleteContainerCommandHandler handler = new DeleteContainerCommandHandler( - clock, executor, 100); + public void testExpiredCommandsAreNotProcessed() throws IOException { + DeleteContainerCommandHandler handler = createSubject(clock, 1000); DeleteContainerCommand command1 = new DeleteContainerCommand(1L); command1.setDeadline(clock.millis() + 10000); @@ -88,14 +75,9 @@ public void testExpiredCommandsAreNotProcessed() clock.fastForward(15000); handler.handle(command1, ozoneContainer, null, null); - latch1.await(); assertEquals(1, handler.getTimeoutCount()); - CountDownLatch latch2 = new CountDownLatch(2); - executor.setLatch(latch2); handler.handle(command2, ozoneContainer, null, null); handler.handle(command3, ozoneContainer, null, null); - latch2.await(); - assertEquals(1, handler.getTimeoutCount()); assertEquals(3, handler.getInvocationCount()); verify(controller, times(0)) @@ -107,8 +89,7 @@ public void testExpiredCommandsAreNotProcessed() } @Test - public void testCommandForCurrentTermIsExecuted() - throws IOException, InterruptedException { + public void testCommandForCurrentTermIsExecuted() throws IOException { // GIVEN DeleteContainerCommand command = new DeleteContainerCommand(1L); command.setTerm(1); @@ -116,17 +97,10 @@ public void testCommandForCurrentTermIsExecuted() when(context.getTermOfLeaderSCM()) .thenReturn(OptionalLong.of(command.getTerm())); - TestClock testClock = new TestClock(Instant.now(), ZoneId.systemDefault()); - CountDownLatch latch = new CountDownLatch(1); - ThreadFactory threadFactory = new ThreadFactoryBuilder().build(); - ThreadPoolWithLockExecutor executor = new ThreadPoolWithLockExecutor( - threadFactory, latch); - DeleteContainerCommandHandler subject = new DeleteContainerCommandHandler( - testClock, executor, 100); + DeleteContainerCommandHandler subject = createSubject(); // WHEN subject.handle(command, ozoneContainer, context, null); - latch.await(); // THEN verify(controller, times(1)) @@ -189,10 +163,8 @@ private static DeleteContainerCommandHandler createSubject() { private static DeleteContainerCommandHandler createSubject( TestClock clock, int queueSize) { - ThreadFactory threadFactory = new ThreadFactoryBuilder().build(); - ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors. - newFixedThreadPool(1, threadFactory); - return new DeleteContainerCommandHandler(clock, executor, queueSize); + return new DeleteContainerCommandHandler(clock, + newDirectExecutorService(), queueSize); } private static DeleteContainerCommandHandler createSubjectWithPoolSize( @@ -200,21 +172,4 @@ private static DeleteContainerCommandHandler createSubjectWithPoolSize( return new DeleteContainerCommandHandler(1, clock, queueSize, ""); } - static class ThreadPoolWithLockExecutor extends ThreadPoolExecutor { - private CountDownLatch countDownLatch; - ThreadPoolWithLockExecutor(ThreadFactory threadFactory, CountDownLatch latch) { - super(1, 1, 0, TimeUnit.MILLISECONDS, - new LinkedBlockingQueue(), threadFactory); - this.countDownLatch = latch; - } - - void setLatch(CountDownLatch latch) { - this.countDownLatch = latch; - } - - @Override - protected void afterExecute(Runnable r, Throwable t) { - countDownLatch.countDown(); - } - } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/helpers/TestChunkUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/helpers/TestChunkUtils.java index 5454f9e8a9b..d04f3a5167f 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/helpers/TestChunkUtils.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/helpers/TestChunkUtils.java @@ -42,7 +42,6 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.ozone.common.ChunkBuffer; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; -import org.apache.hadoop.ozone.container.keyvalue.impl.MappedBufferManager; import org.apache.ozone.test.GenericTestUtils; import org.apache.commons.io.FileUtils; @@ -71,7 +70,6 @@ class TestChunkUtils { private static final int BUFFER_CAPACITY = 1 << 20; private static final int MAPPED_BUFFER_THRESHOLD = 32 << 10; private static final Random RANDOM = new Random(); - private static final MappedBufferManager MAPPED_BUFFER_MANAGER = new MappedBufferManager(100); @TempDir private File tempDir; @@ -80,7 +78,7 @@ static ChunkBuffer readData(File file, long off, long len) throws StorageContainerException { LOG.info("off={}, len={}", off, len); return ChunkUtils.readData(len, BUFFER_CAPACITY, file, off, null, - MAPPED_BUFFER_THRESHOLD, true, MAPPED_BUFFER_MANAGER); + MAPPED_BUFFER_THRESHOLD); } @Test diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/AbstractTestChunkManager.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/AbstractTestChunkManager.java index d9b95f199dd..0c373cb0dbf 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/AbstractTestChunkManager.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/AbstractTestChunkManager.java @@ -34,13 +34,8 @@ import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.io.TempDir; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import java.io.BufferedReader; import java.io.File; -import java.io.IOException; -import java.io.InputStreamReader; import java.nio.Buffer; import java.nio.ByteBuffer; import java.util.UUID; @@ -58,8 +53,6 @@ * Helpers for ChunkManager implementation tests. */ public abstract class AbstractTestChunkManager { - private static final Logger LOG = - LoggerFactory.getLogger(AbstractTestChunkManager.class); private HddsVolume hddsVolume; private KeyValueContainerData keyValueContainerData; @@ -135,55 +128,6 @@ protected void checkChunkFileCount(int expected) { assertEquals(expected, files.length); } - /** - * Helper method to check if a file is in use. - */ - public static boolean isFileNotInUse(String filePath) { - try { - Process process = new ProcessBuilder("fuser", filePath).start(); - try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream(), UTF_8))) { - String output = reader.readLine(); // If fuser returns no output, the file is not in use - if (output == null) { - return true; - } - LOG.debug("File is in use: {}", filePath); - return false; - } finally { - process.destroy(); - } - } catch (IOException e) { - LOG.warn("Failed to check if file is in use: {}", filePath, e); - return false; // On failure, assume the file is in use - } - } - - protected boolean checkChunkFilesClosed() { - return checkChunkFilesClosed(keyValueContainerData.getChunksPath()); - } - - /** - * check that all files under chunk path are closed. - */ - public static boolean checkChunkFilesClosed(String path) { - //As in Setup, we try to create container, these paths should exist. - assertNotNull(path); - - File dir = new File(path); - assertTrue(dir.exists()); - - File[] files = dir.listFiles(); - assertNotNull(files); - for (File file : files) { - assertTrue(file.exists()); - assertTrue(file.isFile()); - // check that the file is closed. - if (!isFileNotInUse(file.getAbsolutePath())) { - return false; - } - } - return true; - } - protected void checkWriteIOStats(long length, long opCount) { VolumeIOStats volumeIOStats = hddsVolume.getVolumeIOStats(); assertEquals(length, volumeIOStats.getWriteBytes()); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java index d4a12f577e9..47d24874749 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java @@ -27,7 +27,6 @@ import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager; import org.junit.jupiter.api.Test; -import org.mockito.Mockito; import java.io.File; import java.io.IOException; @@ -40,9 +39,7 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; -import static org.mockito.Mockito.when; /** * Common test cases for ChunkManager implementation tests. @@ -225,26 +222,4 @@ public void testWriteAndReadChunkMultipleTimes() throws Exception { checkReadIOStats(len * count, count); } - @Test - public void testFinishWrite() throws Exception { - // GIVEN - ChunkManager chunkManager = createTestSubject(); - checkChunkFileCount(0); - checkWriteIOStats(0, 0); - - chunkManager.writeChunk(getKeyValueContainer(), getBlockID(), - getChunkInfo(), getData(), - WRITE_STAGE); - - BlockData blockData = Mockito.mock(BlockData.class); - when(blockData.getBlockID()).thenReturn(getBlockID()); - - chunkManager.finishWriteChunks(getKeyValueContainer(), blockData); - assertTrue(checkChunkFilesClosed()); - - // THEN - checkChunkFileCount(1); - checkWriteIOStats(getChunkInfo().getLen(), 1); - } - } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestMappedBufferManager.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestMappedBufferManager.java deleted file mode 100644 index 22406975986..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestMappedBufferManager.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.keyvalue.impl; - -import org.junit.jupiter.api.Test; - -import java.nio.ByteBuffer; - -import static org.junit.jupiter.api.Assertions.assertEquals; - -/** - * Test for MappedBufferManager. - */ -public class TestMappedBufferManager { - - @Test - public void testComputeIfAbsent() { - MappedBufferManager manager = new MappedBufferManager(100); - String file = "/CID-fd49f4a7-670d-43c5-a177-8ac03aafceb2/current/containerDir0/2/chunks/113750153625600065.block"; - long position = 0; - int size = 1024; - ByteBuffer buffer1 = ByteBuffer.allocate(size); - ByteBuffer buffer2 = ByteBuffer.allocate(size + 1); - ByteBuffer byteBuffer1 = manager.computeIfAbsent(file, position, size, () -> buffer1); - assertEquals(buffer1, byteBuffer1); - // buffer should be reused - String file2 = "/CID-fd49f4a7-670d-43c5-a177-8ac03aafceb2/current/containerDir0/2/chunks/113750153625600065.block"; - ByteBuffer byteBuffer2 = manager.computeIfAbsent(file2, position, size, () -> buffer2); - assertEquals(buffer1, byteBuffer2); - } -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java index ef37c226653..1f69db78d62 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java @@ -27,7 +27,6 @@ import java.time.Instant; import java.time.ZoneId; import java.util.List; -import java.util.SortedMap; import java.util.UUID; import java.util.concurrent.AbstractExecutorService; import java.util.concurrent.CountDownLatch; @@ -47,8 +46,6 @@ import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ReplicationCommandPriority; -import org.apache.hadoop.hdds.security.symmetric.SecretKeySignerClient; -import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; import org.apache.hadoop.metrics2.impl.MetricsCollectorImpl; import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; @@ -58,9 +55,7 @@ import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.ec.reconstruction.ECReconstructionCommandInfo; -import org.apache.hadoop.ozone.container.ec.reconstruction.ECReconstructionCoordinator; import org.apache.hadoop.ozone.container.ec.reconstruction.ECReconstructionCoordinatorTask; -import org.apache.hadoop.ozone.container.ec.reconstruction.ECReconstructionMetrics; import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; @@ -114,8 +109,6 @@ public class TestReplicationSupervisor { }; private final AtomicReference replicatorRef = new AtomicReference<>(); - private final AtomicReference ecReplicatorRef = - new AtomicReference<>(); private ContainerSet set; @@ -142,7 +135,6 @@ public void setUp() throws Exception { @AfterEach public void cleanup() { replicatorRef.set(null); - ecReplicatorRef.set(null); } @ContainerLayoutTestInfo.ContainerTest @@ -402,107 +394,6 @@ public void taskWithObsoleteTermIsDropped(ContainerLayoutVersion layout) { assertEquals(0, supervisor.getReplicationSuccessCount()); } - @ContainerLayoutTestInfo.ContainerTest - public void testMultipleReplication(ContainerLayoutVersion layout, - @TempDir File tempFile) throws IOException { - this.layoutVersion = layout; - OzoneConfiguration conf = new OzoneConfiguration(); - // GIVEN - ReplicationSupervisor replicationSupervisor = - supervisorWithReplicator(FakeReplicator::new); - ReplicationSupervisor ecReconstructionSupervisor = supervisorWithECReconstruction(); - ReplicationSupervisorMetrics replicationMetrics = - ReplicationSupervisorMetrics.create(replicationSupervisor); - ReplicationSupervisorMetrics ecReconstructionMetrics = - ReplicationSupervisorMetrics.create(ecReconstructionSupervisor); - try { - //WHEN - replicationSupervisor.addTask(createTask(1L)); - ecReconstructionSupervisor.addTask(createECTaskWithCoordinator(2L)); - replicationSupervisor.addTask(createTask(1L)); - replicationSupervisor.addTask(createTask(3L)); - ecReconstructionSupervisor.addTask(createECTaskWithCoordinator(4L)); - - SimpleContainerDownloader moc = mock(SimpleContainerDownloader.class); - Path res = Paths.get("file:/tmp/no-such-file"); - when(moc.getContainerDataFromReplicas(anyLong(), anyList(), - any(Path.class), any())).thenReturn(res); - - final String testDir = tempFile.getPath(); - MutableVolumeSet volumeSet = mock(MutableVolumeSet.class); - when(volumeSet.getVolumesList()).thenReturn(singletonList( - new HddsVolume.Builder(testDir).conf(conf).build())); - ContainerController mockedCC = mock(ContainerController.class); - ContainerImporter importer = new ContainerImporter(conf, set, mockedCC, volumeSet); - ContainerReplicator replicator = new DownloadAndImportReplicator( - conf, set, importer, moc); - replicatorRef.set(replicator); - replicationSupervisor.addTask(createTask(5L)); - - ReplicateContainerCommand cmd1 = createCommand(6L); - cmd1.setDeadline(clock.millis() + 10000); - ReplicationTask task1 = new ReplicationTask(cmd1, replicatorRef.get()); - clock.fastForward(15000); - replicationSupervisor.addTask(task1); - - ReconstructECContainersCommand cmd2 = createReconstructionCmd(7L); - cmd2.setDeadline(clock.millis() + 10000); - ECReconstructionCoordinatorTask task2 = new ECReconstructionCoordinatorTask( - ecReplicatorRef.get(), new ECReconstructionCommandInfo(cmd2)); - clock.fastForward(15000); - ecReconstructionSupervisor.addTask(task2); - ecReconstructionSupervisor.addTask(createECTask(8L)); - ecReconstructionSupervisor.addTask(createECTask(9L)); - - //THEN - assertEquals(2, replicationSupervisor.getReplicationSuccessCount()); - assertEquals(2, replicationSupervisor.getReplicationSuccessCount( - task1.getMetricName())); - assertEquals(1, replicationSupervisor.getReplicationFailureCount()); - assertEquals(1, replicationSupervisor.getReplicationFailureCount( - task1.getMetricName())); - assertEquals(1, replicationSupervisor.getReplicationSkippedCount()); - assertEquals(1, replicationSupervisor.getReplicationSkippedCount( - task1.getMetricName())); - assertEquals(1, replicationSupervisor.getReplicationTimeoutCount()); - assertEquals(1, replicationSupervisor.getReplicationTimeoutCount( - task1.getMetricName())); - assertEquals(5, replicationSupervisor.getReplicationRequestCount()); - assertEquals(5, replicationSupervisor.getReplicationRequestCount( - task1.getMetricName())); - assertEquals(0, replicationSupervisor.getReplicationRequestCount( - task2.getMetricName())); - - assertEquals(2, ecReconstructionSupervisor.getReplicationSuccessCount()); - assertEquals(2, ecReconstructionSupervisor.getReplicationSuccessCount( - task2.getMetricName())); - assertEquals(1, ecReconstructionSupervisor.getReplicationTimeoutCount()); - assertEquals(1, ecReconstructionSupervisor.getReplicationTimeoutCount( - task2.getMetricName())); - assertEquals(2, ecReconstructionSupervisor.getReplicationFailureCount()); - assertEquals(2, ecReconstructionSupervisor.getReplicationFailureCount( - task2.getMetricName())); - assertEquals(5, ecReconstructionSupervisor.getReplicationRequestCount()); - assertEquals(5, ecReconstructionSupervisor.getReplicationRequestCount( - task2.getMetricName())); - assertEquals(0, ecReconstructionSupervisor.getReplicationRequestCount( - task1.getMetricName())); - - MetricsCollectorImpl replicationMetricsCollector = new MetricsCollectorImpl(); - replicationMetrics.getMetrics(replicationMetricsCollector, true); - assertEquals(1, replicationMetricsCollector.getRecords().size()); - - MetricsCollectorImpl ecReconstructionMetricsCollector = new MetricsCollectorImpl(); - ecReconstructionMetrics.getMetrics(ecReconstructionMetricsCollector, true); - assertEquals(1, ecReconstructionMetricsCollector.getRecords().size()); - } finally { - replicationMetrics.unRegister(); - ecReconstructionMetrics.unRegister(); - replicationSupervisor.stop(); - ecReconstructionSupervisor.stop(); - } - } - @ContainerLayoutTestInfo.ContainerTest public void testPriorityOrdering(ContainerLayoutVersion layout) throws InterruptedException { @@ -585,16 +476,6 @@ private static class BlockingTask extends AbstractReplicationTask { this.waitForCompleteLatch = waitForCompletion; } - @Override - protected String getMetricName() { - return "Blockings"; - } - - @Override - protected String getMetricDescriptionSegment() { - return "blockings"; - } - @Override public void runTask() { runningLatch.countDown(); @@ -621,16 +502,6 @@ private static class OrderedTask extends AbstractReplicationTask { setPriority(priority); } - @Override - protected String getMetricName() { - return "Ordereds"; - } - - @Override - protected String getMetricDescriptionSegment() { - return "ordereds"; - } - @Override public void runTask() { completeList.add(name); @@ -660,22 +531,6 @@ private ReplicationSupervisor supervisorWith( return supervisor; } - private ReplicationSupervisor supervisorWithECReconstruction() throws IOException { - ConfigurationSource conf = new OzoneConfiguration(); - ExecutorService executor = newDirectExecutorService(); - ReplicationServer.ReplicationConfig repConf = - conf.getObject(ReplicationServer.ReplicationConfig.class); - ReplicationSupervisor supervisor = ReplicationSupervisor.newBuilder() - .stateContext(context).replicationConfig(repConf).executor(executor) - .clock(clock).build(); - - FakeECReconstructionCoordinator coordinator = new FakeECReconstructionCoordinator( - new OzoneConfiguration(), null, null, context, - ECReconstructionMetrics.create(), "", supervisor); - ecReplicatorRef.set(coordinator); - return supervisor; - } - private ReplicationTask createTask(long containerId) { ReplicateContainerCommand cmd = createCommand(containerId); return new ReplicationTask(cmd, replicatorRef.get()); @@ -683,13 +538,7 @@ private ReplicationTask createTask(long containerId) { private ECReconstructionCoordinatorTask createECTask(long containerId) { return new ECReconstructionCoordinatorTask(null, - createReconstructionCmdInfo(containerId)); - } - - private ECReconstructionCoordinatorTask createECTaskWithCoordinator(long containerId) { - ECReconstructionCommandInfo ecReconstructionCommandInfo = createReconstructionCmdInfo(containerId); - return new ECReconstructionCoordinatorTask(ecReplicatorRef.get(), - ecReconstructionCommandInfo); + createReconstructionCmd(containerId)); } private static ReplicateContainerCommand createCommand(long containerId) { @@ -699,20 +548,18 @@ private static ReplicateContainerCommand createCommand(long containerId) { return cmd; } - private static ECReconstructionCommandInfo createReconstructionCmdInfo( + private static ECReconstructionCommandInfo createReconstructionCmd( long containerId) { - return new ECReconstructionCommandInfo(createReconstructionCmd(containerId)); - } - - private static ReconstructECContainersCommand createReconstructionCmd( - long containerId) { - List sources = - new ArrayList<>(); - sources.add(new ReconstructECContainersCommand.DatanodeDetailsAndReplicaIndex( - MockDatanodeDetails.randomDatanodeDetails(), 1)); - sources.add(new ReconstructECContainersCommand.DatanodeDetailsAndReplicaIndex( + List sources + = new ArrayList<>(); + sources.add(new ReconstructECContainersCommand + .DatanodeDetailsAndReplicaIndex( + MockDatanodeDetails.randomDatanodeDetails(), 1)); + sources.add(new ReconstructECContainersCommand + .DatanodeDetailsAndReplicaIndex( MockDatanodeDetails.randomDatanodeDetails(), 2)); - sources.add(new ReconstructECContainersCommand.DatanodeDetailsAndReplicaIndex( + sources.add(new ReconstructECContainersCommand + .DatanodeDetailsAndReplicaIndex( MockDatanodeDetails.randomDatanodeDetails(), 3)); byte[] missingIndexes = new byte[1]; @@ -720,44 +567,14 @@ private static ReconstructECContainersCommand createReconstructionCmd( List target = singletonList( MockDatanodeDetails.randomDatanodeDetails()); - ReconstructECContainersCommand cmd = new ReconstructECContainersCommand(containerId, sources, target, - Proto2Utils.unsafeByteString(missingIndexes), - new ECReplicationConfig(3, 2)); - cmd.setTerm(CURRENT_TERM); - return cmd; - } - - /** - * A fake coordinator that simulates successful reconstruction of ec containers. - */ - private class FakeECReconstructionCoordinator extends ECReconstructionCoordinator { - - private final OzoneConfiguration conf = new OzoneConfiguration(); - private final ReplicationSupervisor supervisor; - - FakeECReconstructionCoordinator(ConfigurationSource conf, - CertificateClient certificateClient, SecretKeySignerClient secretKeyClient, - StateContext context, ECReconstructionMetrics metrics, String threadNamePrefix, - ReplicationSupervisor supervisor) - throws IOException { - super(conf, certificateClient, secretKeyClient, context, metrics, threadNamePrefix); - this.supervisor = supervisor; - } - - @Override - public void reconstructECContainerGroup(long containerID, - ECReplicationConfig repConfig, SortedMap sourceNodeMap, - SortedMap targetNodeMap) { - assertEquals(1, supervisor.getTotalInFlightReplications()); - - KeyValueContainerData kvcd = new KeyValueContainerData( - containerID, layoutVersion, 100L, - UUID.randomUUID().toString(), UUID.randomUUID().toString()); - KeyValueContainer kvc = new KeyValueContainer(kvcd, conf); - assertDoesNotThrow(() -> { - set.addContainer(kvc); - }); - } + ReconstructECContainersCommand cmd = + new ReconstructECContainersCommand(containerId, + sources, + target, + Proto2Utils.unsafeByteString(missingIndexes), + new ECReplicationConfig(3, 2)); + + return new ECReconstructionCommandInfo(cmd); } /** diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/protocol/commands/TestReconstructionECContainersCommands.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/protocol/commands/TestReconstructionECContainersCommands.java index 519a24a2a5c..f4e4ec6a253 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/protocol/commands/TestReconstructionECContainersCommands.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/protocol/commands/TestReconstructionECContainersCommands.java @@ -26,12 +26,10 @@ import org.junit.jupiter.api.Test; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.stream.Collectors; -import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -55,8 +53,11 @@ public void testExceptionIfSourceAndMissingNotSameLength() { @Test public void protobufConversion() { - byte[] missingIndexes = {1, 2}; - final ByteString missingContainerIndexes = Proto2Utils.unsafeByteString(missingIndexes); + final ByteString missingContainerIndexes = Proto2Utils.unsafeByteString(new byte[]{1, 2}); + List srcNodesIndexes = new ArrayList<>(); + for (int i = 0; i < srcNodesIndexes.size(); i++) { + srcNodesIndexes.add(i + 1L); + } ECReplicationConfig ecReplicationConfig = new ECReplicationConfig(3, 2); final List dnDetails = getDNDetails(5); @@ -69,10 +70,6 @@ public void protobufConversion() { ReconstructECContainersCommand reconstructECContainersCommand = new ReconstructECContainersCommand(1L, sources, targets, missingContainerIndexes, ecReplicationConfig); - - assertThat(reconstructECContainersCommand.toString()) - .contains("missingIndexes: " + Arrays.toString(missingIndexes)); - StorageContainerDatanodeProtocolProtos.ReconstructECContainersCommandProto proto = reconstructECContainersCommand.getProto(); diff --git a/hadoop-hdds/dev-support/checkstyle/checkstyle.xml b/hadoop-hdds/dev-support/checkstyle/checkstyle.xml index 288085ef948..3a69c793c26 100644 --- a/hadoop-hdds/dev-support/checkstyle/checkstyle.xml +++ b/hadoop-hdds/dev-support/checkstyle/checkstyle.xml @@ -88,7 +88,6 @@ - diff --git a/hadoop-hdds/docs/content/concept/Datanodes.md b/hadoop-hdds/docs/content/concept/Datanodes.md index cf246712f68..47c09a798fc 100644 --- a/hadoop-hdds/docs/content/concept/Datanodes.md +++ b/hadoop-hdds/docs/content/concept/Datanodes.md @@ -76,15 +76,3 @@ blocks that get reported. That is a 40x reduction in the block reports. This extra indirection helps tremendously with scaling Ozone. SCM has far less block data to process and the namespace service (Ozone Manager) as a different service are critical to scaling Ozone. - - -## Notable configurations - -key | default |

    description
    -----|---------|------------ -dfs.container.ratis.datanode.storage.dir | none | This directory is used for storing Ratis metadata like logs. -ozone.scm.datanode.id.dir | none | The path that datanodes will use to store the datanode ID. -hdds.datanode.dir | none | Determines where HDDS data will be stored on the local filesystem. -hdds.datanode.dir.du.reserved | none | Reserved space in bytes per volume. Always leave this much space free for non dfs use. -ozone.metadata.dirs | none | Directory to store persisted data (RocksDB). -ozone.recon.address | 0.0.0.0:9891 | RPC address of the Recon. Use to connect Recon. diff --git a/hadoop-hdds/docs/content/concept/Datanodes.zh.md b/hadoop-hdds/docs/content/concept/Datanodes.zh.md index 32071c9e51e..8f129df7b9b 100644 --- a/hadoop-hdds/docs/content/concept/Datanodes.zh.md +++ b/hadoop-hdds/docs/content/concept/Datanodes.zh.md @@ -49,15 +49,3 @@ Ozone 的存储容器是一个自包含的超级块,容器中包含一系列 SCM 如何获得容器的位置?这一点和现有的 HDFS 十分相似。数据节点会定期发送类似于块报告的容器报告,容器报告比块报告的内容简洁的多,比如,对于一个存储容量为 196 TB 的集群,Ozone 大概会拥有四万个容器,相比于 HDFS 的一百五十万个块,块报告数量缩减为四十分之一。 这种间接管理的方式大大地提高了 Ozone 的扩展性,因为 SCM 需要处理的块数据大大减少,且命名服务(OM)作为一个独特的服务主体对于扩展 Ozone 具有重要意义。 - - -## 需要关注的配置项 - -配置项 |默认值 |
    描述
    -----|---------|------------ -dfs.container.ratis.datanode.storage.dir | none | 该目录用于存储 Ratis 元数据,如日志。 -ozone.scm.datanode.id.dir | none | 数据节点上用于存储数据节点 ID 的路径。 -hdds.datanode.dir | none | 此配置决定数据节点上的数据将存储在本地文件系统的哪个位置。 -hdds.datanode.dir.du.reserved | none | 每个卷保留的存储空间(以字节为单位)。始终为非DFS用途保留这么多空闲空间。 -ozone.metadata.dirs | none | 用于存储持久化数据(RocksDB)的目录。 -ozone.recon.address | 0.0.0.0:9891 | Recon的RPC地址。 使用 连接到Recon。 \ No newline at end of file diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/DatanodeRatisGrpcConfig.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/DatanodeRatisGrpcConfig.java index fde555208b3..5b283c3a1a3 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/DatanodeRatisGrpcConfig.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/DatanodeRatisGrpcConfig.java @@ -31,6 +31,23 @@ @ConfigGroup(prefix = HDDS_DATANODE_RATIS_PREFIX_KEY + "." + GrpcConfigKeys.PREFIX) public class DatanodeRatisGrpcConfig { + @Config(key = "message.size.max", + defaultValue = "32MB", + type = ConfigType.SIZE, + tags = {OZONE, CLIENT, PERFORMANCE}, + description = "Maximum message size allowed to be received by Grpc " + + "Channel (Server)." + ) + private int maximumMessageSize = 32 * 1024 * 1024; + + public int getMaximumMessageSize() { + return maximumMessageSize; + } + + public void setMaximumMessageSize(int maximumMessageSize) { + this.maximumMessageSize = maximumMessageSize; + } + @Config(key = "flow.control.window", defaultValue = "5MB", type = ConfigType.SIZE, diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/SCMSecurityProtocol.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/SCMSecurityProtocol.java index 0cb39482e98..cbb4f3fc2ee 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/SCMSecurityProtocol.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/SCMSecurityProtocol.java @@ -118,6 +118,7 @@ String getSCMCertificate(ScmNodeDetailsProto scmNodeDetails, /** * Get Root CA certificate. + * @return * @throws IOException */ String getRootCACertificate() throws IOException; diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java index 71918308f14..a938d53c7c4 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java @@ -330,6 +330,7 @@ public SCMGetCertResponseProto getCACert() throws IOException { * @param role - node type: OM/SCM/DN. * @param startSerialId - start cert serial id. * @param count - max number of certificates returned in a batch. + * @return * @throws IOException */ @Override diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java index d9b198d4b14..1f114304cca 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java @@ -307,7 +307,7 @@ public boolean addSCM(AddSCMRequest request) throws IOException { } /** * Sort the datanodes based on distance from client. - * @return list of datanodes; + * @return List * @throws IOException */ @Override diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/TokenVerifier.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/TokenVerifier.java index da651160d04..e7e029f7087 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/TokenVerifier.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/TokenVerifier.java @@ -50,7 +50,8 @@ void verify(Token token, ContainerCommandRequestProtoOrBuilder cmd) throws SCMSecurityException; - /** Same as {@link #verify}, but with encoded token. */ + /** Same as {@link #verify(Token, + * ContainerCommandRequestProtoOrBuilder)}, but with encoded token. */ default void verify(ContainerCommandRequestProtoOrBuilder cmd, String encodedToken) throws SCMSecurityException { diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultApprover.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultApprover.java index 5a39d0f1dd0..b2d62443b77 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultApprover.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultApprover.java @@ -104,7 +104,7 @@ public DefaultApprover(PKIProfile pkiProfile, SecurityConfig config) { * @param certSerialId - the new certificate id. * @return Signed Certificate. * @throws IOException - On Error - * @throws CertificateException - on Error. + * @throws OperatorCreationException - on Error. */ @SuppressWarnings("ParameterNumber") @Override diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java index 118aa826013..a93bdb4e3d6 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java @@ -195,6 +195,8 @@ public CertPath getCaCertPath() * * @param certSerialId - Certificate for this CA. * @return X509Certificate + * @throws CertificateException - usually thrown if this CA is not + * initialized. * @throws IOException - on Error. */ @Override diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java index 42292b9663f..70a475982bd 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java @@ -73,7 +73,6 @@ import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertResponseProto; import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdds.scm.client.ClientTrustManager; import org.apache.hadoop.hdds.security.SecurityConfig; import org.apache.hadoop.hdds.security.ssl.ReloadingX509KeyManager; import org.apache.hadoop.hdds.security.ssl.ReloadingX509TrustManager; @@ -668,8 +667,6 @@ protected enum InitCase { * certificate. * * Truth table: - *
    -   * {@code
        *  +--------------+---------------+--------------+---------------------+
        *  | Private Key  | Public Keys   | Certificate  |   Result            |
        *  +--------------+---------------+--------------+---------------------+
    @@ -682,8 +679,7 @@ protected enum InitCase {
        *  | True   (1)   | True    (1)   | False  (0)   |   GETCERT->SUCCESS  |
        *  | True   (1)   | True    (1)   | True   (1)   |   SUCCESS           |
        *  +--------------+-----------------+--------------+----------------+
    -   * }
    -   * 
    + * * Success in following cases: * 1. If keypair as well certificate is available. * 2. If private key and certificate is available and public key is @@ -987,6 +983,43 @@ public Set getAllCaCerts() { return certs; } + @Override + public List getCAList() { + pemEncodedCACertsLock.lock(); + try { + return pemEncodedCACerts; + } finally { + pemEncodedCACertsLock.unlock(); + } + } + + public List listCA() throws IOException { + pemEncodedCACertsLock.lock(); + try { + if (pemEncodedCACerts == null) { + updateCAList(); + } + return pemEncodedCACerts; + } finally { + pemEncodedCACertsLock.unlock(); + } + } + + @Override + public List updateCAList() throws IOException { + pemEncodedCACertsLock.lock(); + try { + pemEncodedCACerts = getScmSecureClient().listCACertificate(); + return pemEncodedCACerts; + } catch (Exception e) { + getLogger().error("Error during updating CA list", e); + throw new CertificateException("Error during updating CA list", e, + CERTIFICATE_ERROR); + } finally { + pemEncodedCACertsLock.unlock(); + } + } + @Override public ReloadingX509TrustManager getTrustManager() throws CertificateException { try { @@ -1016,20 +1049,8 @@ public ReloadingX509KeyManager getKeyManager() throws CertificateException { } } - @Override - public ClientTrustManager createClientTrustManager() throws IOException { - CACertificateProvider caCertificateProvider = () -> { - List caCerts = new ArrayList<>(); - caCerts.addAll(getAllCaCerts()); - caCerts.addAll(getAllRootCaCerts()); - return caCerts; - }; - return new ClientTrustManager(caCertificateProvider, caCertificateProvider); - } - /** * Register a receiver that will be called after the certificate renewed. - * * @param receiver */ @Override @@ -1086,7 +1107,7 @@ public Duration timeBeforeExpiryGracePeriod(X509Certificate certificate) { * Renew keys and certificate. Save the keys are certificate to disk in new * directories, swap the current key directory and certs directory with the * new directories. - * @param force check certificate expiry time again if force is false. + * @param force, check certificate expiry time again if force is false. * @return String, new certificate ID * */ public String renewAndStoreKeyAndCertificate(boolean force) diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java index 9d037fed6bc..f27f42e0b4c 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java @@ -123,8 +123,8 @@ /** * Create a Jetty embedded server to answer http requests. The primary goal is * to serve up status information for the server. There are three contexts: - * "/logs/" -> points to the log directory "/static/" -> points to common static - * files (src/webapps/static) "/" -> the jsp server code from + * "/logs/" -> points to the log directory "/static/" -> points to common static + * files (src/webapps/static) "/" -> the jsp server code from * (src/webapps/) * * This class is a fork of the old HttpServer. HttpServer exists for diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/ProfileServlet.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/ProfileServlet.java index bceec92c6c8..f4f188aaf39 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/ProfileServlet.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/ProfileServlet.java @@ -41,12 +41,11 @@ import org.slf4j.LoggerFactory; /** - *
      * Servlet that runs async-profiler as web-endpoint.
    - *
    + * 

    * Source: https://github.com/apache/hive/blob/master/common/src/java/org * /apache/hive/http/ProfileServlet.java - * + *

    * Following options from async-profiler can be specified as query parameter. * // -e event profiling event: cpu|alloc|lock|cache-misses etc. * // -d duration run profiling for seconds @@ -80,7 +79,7 @@ * curl "http://localhost:10002/prof" * - To collect 1 minute CPU profile of current process and output in tree * format (html) - * curl "http://localhost:10002/prof?output=tree&duration=60" + * curl "http://localhost:10002/prof?output=tree&duration=60" * - To collect 30 second heap allocation profile of current process (returns * FlameGraph svg) * curl "http://localhost:10002/prof?event=alloc" @@ -112,7 +111,6 @@ * The default output format of the newest async profiler is HTML. * If the user is using an older version such as 1.5, HTML is not supported. * Please specify the corresponding output format. - *

    */ public class ProfileServlet extends HttpServlet { private static final long serialVersionUID = 1L; diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/PrometheusServlet.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/PrometheusServlet.java index 535a5e6c8e9..0d01aa43b42 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/PrometheusServlet.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/PrometheusServlet.java @@ -24,6 +24,8 @@ import java.io.IOException; import java.io.PrintWriter; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; + import io.prometheus.client.CollectorRegistry; import io.prometheus.client.exporter.common.TextFormat; @@ -56,6 +58,7 @@ protected void doGet(HttpServletRequest req, HttpServletResponse resp) return; } } + DefaultMetricsSystem.instance().publishMetricsNow(); PrintWriter writer = resp.getWriter(); getPrometheusSink().writeMetrics(writer); writer.write("\n\n#Dropwizard metrics\n\n"); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HAUtils.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HAUtils.java index 0dc244bdbc7..342a0400cbd 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HAUtils.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HAUtils.java @@ -35,6 +35,8 @@ import org.apache.hadoop.hdds.scm.proxy.SCMClientConfig; import org.apache.hadoop.hdds.scm.proxy.SCMContainerLocationFailoverProxyProvider; import org.apache.hadoop.hdds.security.exception.SCMSecurityException; +import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; +import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec; import org.apache.hadoop.hdds.tracing.TracingUtil; import org.apache.hadoop.hdds.utils.db.DBDefinition; import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition; @@ -371,6 +373,80 @@ public static List getExistingSstFiles(File db) throws IOException { return sstList; } + /** + * Build CA list which need to be passed to client. + * + * If certificate client is null, obtain the list of CA using SCM security + * client, else it uses certificate client. + * @return list of CA + */ + public static List buildCAList(CertificateClient certClient, + ConfigurationSource configuration) throws IOException { + long waitDuration = + configuration.getTimeDuration(OZONE_SCM_CA_LIST_RETRY_INTERVAL, + OZONE_SCM_CA_LIST_RETRY_INTERVAL_DEFAULT, TimeUnit.SECONDS); + if (certClient != null) { + if (!SCMHAUtils.isSCMHAEnabled(configuration)) { + return generateCAList(certClient); + } else { + Collection scmNodes = SCMHAUtils.getSCMNodeIds(configuration); + int expectedCount = scmNodes.size() + 1; + if (scmNodes.size() > 1) { + // First check if cert client has ca list initialized. + // This is being done, when this method is called multiple times we + // don't make call to SCM, we return from in-memory. + List caCertPemList = certClient.getCAList(); + if (caCertPemList != null && caCertPemList.size() == expectedCount) { + return caCertPemList; + } + return getCAListWithRetry(() -> + waitForCACerts(certClient::updateCAList, expectedCount), + waitDuration); + } else { + return generateCAList(certClient); + } + } + } else { + SCMSecurityProtocolClientSideTranslatorPB scmSecurityProtocolClient = + HddsServerUtil.getScmSecurityClient(configuration); + if (!SCMHAUtils.isSCMHAEnabled(configuration)) { + List caCertPemList = new ArrayList<>(); + SCMGetCertResponseProto scmGetCertResponseProto = + scmSecurityProtocolClient.getCACert(); + if (scmGetCertResponseProto.hasX509Certificate()) { + caCertPemList.add(scmGetCertResponseProto.getX509Certificate()); + } + if (scmGetCertResponseProto.hasX509RootCACertificate()) { + caCertPemList.add(scmGetCertResponseProto.getX509RootCACertificate()); + } + return caCertPemList; + } else { + Collection scmNodes = SCMHAUtils.getSCMNodeIds(configuration); + int expectedCount = scmNodes.size() + 1; + if (scmNodes.size() > 1) { + return getCAListWithRetry(() -> waitForCACerts( + scmSecurityProtocolClient::listCACertificate, + expectedCount), waitDuration); + } else { + return scmSecurityProtocolClient.listCACertificate(); + } + } + } + } + + private static List generateCAList(CertificateClient certClient) + throws IOException { + List caCertPemList = new ArrayList<>(); + for (X509Certificate cert : certClient.getAllRootCaCerts()) { + caCertPemList.add(CertificateCodec.getPEMEncodedString(cert)); + } + for (X509Certificate cert : certClient.getAllCaCerts()) { + caCertPemList.add(CertificateCodec.getPEMEncodedString(cert)); + } + return caCertPemList; + } + + /** * Retry forever until CA list matches expected count. * @param task - task to get CA list. @@ -412,37 +488,23 @@ private static List waitForCACerts( * Build CA List in the format of X509Certificate. * If certificate client is null, obtain the list of CA using SCM * security client, else it uses certificate client. - * * @return list of CA X509Certificates. */ - public static List buildCAX509List(ConfigurationSource conf) throws IOException { - long waitDuration = - conf.getTimeDuration(OZONE_SCM_CA_LIST_RETRY_INTERVAL, - OZONE_SCM_CA_LIST_RETRY_INTERVAL_DEFAULT, TimeUnit.SECONDS); - Collection scmNodes = SCMHAUtils.getSCMNodeIds(conf); - SCMSecurityProtocolClientSideTranslatorPB scmSecurityProtocolClient = - HddsServerUtil.getScmSecurityClient(conf); - if (!SCMHAUtils.isSCMHAEnabled(conf)) { - List caCertPemList = new ArrayList<>(); - SCMGetCertResponseProto scmGetCertResponseProto = - scmSecurityProtocolClient.getCACert(); - if (scmGetCertResponseProto.hasX509Certificate()) { - caCertPemList.add(scmGetCertResponseProto.getX509Certificate()); - } - if (scmGetCertResponseProto.hasX509RootCACertificate()) { - caCertPemList.add(scmGetCertResponseProto.getX509RootCACertificate()); - } - return OzoneSecurityUtil.convertToX509(caCertPemList); - } else { - int expectedCount = scmNodes.size() + 1; - if (scmNodes.size() > 1) { - return OzoneSecurityUtil.convertToX509(getCAListWithRetry(() -> waitForCACerts( - scmSecurityProtocolClient::listCACertificate, - expectedCount), waitDuration)); - } else { - return OzoneSecurityUtil.convertToX509(scmSecurityProtocolClient.listCACertificate()); + public static List buildCAX509List( + CertificateClient certClient, + ConfigurationSource conf) throws IOException { + if (certClient != null) { + // Do this here to avoid extra conversion of X509 to pem and again to + // X509 by buildCAList. + if (!SCMHAUtils.isSCMHAEnabled(conf)) { + List x509Certificates = new ArrayList<>(); + x509Certificates.addAll(certClient.getAllCaCerts()); + x509Certificates.addAll(certClient.getAllRootCaCerts()); + return x509Certificates; } } + List pemEncodedCerts = HAUtils.buildCAList(certClient, conf); + return OzoneSecurityUtil.convertToX509(pemEncodedCerts); } } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java index 94e9dceb6a7..c45e772c241 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java @@ -125,11 +125,11 @@ private HddsServerUtil() { HddsServerUtil.class); /** - * Add protobuf-based protocol to the {@link org.apache.hadoop.ipc.RPC.Server}. + * Add protobuf-based protocol to the {@link RPC.Server}. * @param conf configuration * @param protocol Protocol interface * @param service service that implements the protocol - * @param server RPC server to which the protocol and implementation is added to + * @param server RPC server to which the protocol & implementation is added to */ public static void addPBProtocol(Configuration conf, Class protocol, BlockingService service, RPC.Server server) throws IOException { diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java index 29531f31518..e7c4ec4ce3d 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java @@ -24,7 +24,6 @@ import java.util.List; import java.util.Objects; -import com.google.protobuf.ByteString; import org.apache.hadoop.hdds.utils.db.Codec; import org.apache.hadoop.hdds.utils.db.DelegatedCodec; import org.apache.hadoop.hdds.utils.db.StringCodec; @@ -163,15 +162,7 @@ public String toString() { */ public static TransactionInfo readTransactionInfo( DBStoreHAManager metadataManager) throws IOException { - return metadataManager.getTransactionInfoTable().getSkipCache(TRANSACTION_INFO_KEY); - } - - public ByteString toByteString() throws IOException { - return ByteString.copyFrom(getCodec().toPersistedFormat(this)); - } - - public static TransactionInfo fromByteString(ByteString byteString) throws IOException { - return byteString == null ? null : getCodec().fromPersistedFormat(byteString.toByteArray()); + return metadataManager.getTransactionInfoTable().get(TRANSACTION_INFO_KEY); } public SnapshotInfo toSnapshotInfo() { diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java index 8623a3bdd7d..3e8ea30a652 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java @@ -184,7 +184,7 @@ void move(KEY sourceKey, KEY destKey, VALUE value, /** * Get List of Index to Table Names. * (For decoding table from column family index) - * @return Map of Index -> TableName + * @return Map of Index -> TableName */ Map getTableNames(); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBCheckpointUtils.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBCheckpointUtils.java index 015cd10b8b9..c47b176e93b 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBCheckpointUtils.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBCheckpointUtils.java @@ -49,7 +49,7 @@ public static boolean waitForCheckpointDirectoryExist(File file, final boolean success = RatisHelper.attemptUntilTrue(file::exists, POLL_INTERVAL_DURATION, maxWaitTimeout); if (!success) { LOG.info("Checkpoint directory: {} didn't get created in {} secs.", - file.getAbsolutePath(), maxWaitTimeout.getSeconds()); + maxWaitTimeout.getSeconds(), file.getAbsolutePath()); } return success; } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java index c156b8e4d67..c441ec929c7 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java @@ -841,7 +841,7 @@ private int getLastLevel() throws IOException { /** * Deletes sst files which do not correspond to prefix * for given table. - * @param prefixPairs a map of TableName to prefixUsed. + * @param prefixPairs, a map of TableName to prefixUsed. */ public void deleteFilesNotMatchingPrefix(Map prefixPairs) throws IOException, RocksDBException { diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java index c7055267052..c818c07b1ac 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java @@ -24,7 +24,6 @@ import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.Objects; import org.apache.commons.lang3.NotImplementedException; import org.apache.hadoop.hdds.annotation.InterfaceStability; @@ -170,7 +169,7 @@ default VALUE getReadCopy(KEY key) throws IOException { /** * Returns a prefixed iterator for this metadata store. * @param prefix - * @return MetaStoreIterator + * @return */ TableIterator> iterator(KEY prefix) throws IOException; @@ -246,7 +245,7 @@ default TableCacheMetrics createCacheMetrics() throws IOException { /** * Returns a certain range of key value pairs as a list based on a - * startKey or count. Further a {@link org.apache.hadoop.hdds.utils.MetadataKeyFilters.MetadataKeyFilter} + * startKey or count. Further a {@link MetadataKeyFilters.MetadataKeyFilter} * can be added to * filter keys if necessary. * To prevent race conditions while listing * entries, this implementation takes a snapshot and lists the entries from @@ -262,7 +261,7 @@ default TableCacheMetrics createCacheMetrics() throws IOException { * the value for count must be an integer greater than 0. *

    * This method allows to specify one or more - * {@link org.apache.hadoop.hdds.utils.MetadataKeyFilters.MetadataKeyFilter} + * {@link MetadataKeyFilters.MetadataKeyFilter} * to filter keys by certain condition. Once given, only the entries * whose key passes all the filters will be included in the result. * @@ -270,7 +269,7 @@ default TableCacheMetrics createCacheMetrics() throws IOException { * @param count max number of entries to return. * @param prefix fixed key schema specific prefix * @param filters customized one or more - * {@link org.apache.hadoop.hdds.utils.MetadataKeyFilters.MetadataKeyFilter}. + * {@link MetadataKeyFilters.MetadataKeyFilter}. * @return a list of entries found in the database or an empty list if the * startKey is invalid. * @throws IOException if there are I/O errors. @@ -293,7 +292,7 @@ List> getRangeKVs(KEY startKey, * @param count max number of entries to return. * @param prefix fixed key schema specific prefix * @param filters customized one or more - * {@link org.apache.hadoop.hdds.utils.MetadataKeyFilters.MetadataKeyFilter}. + * {@link MetadataKeyFilters.MetadataKeyFilter}. * @return a list of entries found in the database. * @throws IOException * @throws IllegalArgumentException @@ -308,6 +307,7 @@ List> getSequentialRangeKVs(KEY startKey, * as part of a batch operation. * @param batch * @param prefix + * @return */ void deleteBatchWithPrefix(BatchOperation batch, KEY prefix) throws IOException; @@ -354,24 +354,6 @@ public V getValue() { public String toString() { return "(key=" + key + ", value=" + value + ")"; } - - @Override - public boolean equals(Object obj) { - if (!(obj instanceof KeyValue)) { - return false; - } - KeyValue kv = (KeyValue) obj; - try { - return getKey().equals(kv.getKey()) && getValue().equals(kv.getValue()); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - - @Override - public int hashCode() { - return Objects.hash(getKey(), getValue()); - } }; } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/audit/package-info.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/audit/package-info.java index c428f2860ee..0c1ec710d2c 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/audit/package-info.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/audit/package-info.java @@ -48,7 +48,7 @@ * This interface must be implemented by entities requiring audit logging. * For example - OMVolumeArgs, OMBucketArgs. * The implementing class must override toAuditMap() to return an - * instance of {@code Map} where both Key and Value are String. + * instance of Map where both Key and Value are String. * * Key: must contain printable US ASCII characters * May not contain a space, =, ], or " diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.css b/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.css index 389d9d78f21..e08e9c52060 100644 --- a/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.css +++ b/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.css @@ -91,7 +91,3 @@ body { .om-roles-background { background-color: #dcfbcd!important; } - -.scm-roles-background { - background-color: #dcfbcd!important; -} \ No newline at end of file diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/jvm.html b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/jvm.html index 9706ebdf6b3..c1f7d16aefa 100644 --- a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/jvm.html +++ b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/jvm.html @@ -21,6 +21,6 @@ Input arguments: -

    {{$ctrl.jmx.InputArguments.join('\n')}}
    + {{$ctrl.jmx.InputArguments}} diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClientTestImpl.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClientTestImpl.java index 94ef86650c4..fa784b75538 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClientTestImpl.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClientTestImpl.java @@ -48,7 +48,6 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.client.ClientTrustManager; import org.apache.hadoop.hdds.security.exception.SCMSecurityException; import org.apache.hadoop.hdds.security.SecurityConfig; import org.apache.hadoop.hdds.security.ssl.ReloadingX509KeyManager; @@ -258,6 +257,16 @@ public Set getAllCaCerts() { return rootCerts; } + @Override + public List getCAList() { + return null; + } + + @Override + public List updateCAList() throws IOException { + return null; + } + public void renewRootCA() throws Exception { LocalDateTime start = LocalDateTime.now(); Duration rootCACertDuration = securityConfig.getMaxCertificateDuration(); @@ -355,17 +364,6 @@ public ReloadingX509TrustManager getTrustManager() throws CertificateException { } } - @Override - public ClientTrustManager createClientTrustManager() throws IOException { - CACertificateProvider caCertificateProvider = () -> { - List caCerts = new ArrayList<>(); - caCerts.addAll(getAllCaCerts()); - caCerts.addAll(getAllRootCaCerts()); - return caCerts; - }; - return new ClientTrustManager(caCertificateProvider, caCertificateProvider); - } - @Override public void registerNotificationReceiver(CertificateNotification receiver) { synchronized (notificationReceivers) { diff --git a/hadoop-hdds/interface-client/src/main/proto/hdds.proto b/hadoop-hdds/interface-client/src/main/proto/hdds.proto index e25d85e1957..ee5c0d9cc5a 100644 --- a/hadoop-hdds/interface-client/src/main/proto/hdds.proto +++ b/hadoop-hdds/interface-client/src/main/proto/hdds.proto @@ -191,7 +191,6 @@ message DatanodeUsageInfoProto { optional int64 containerCount = 5; optional int64 committed = 6; optional int64 freeSpaceToSpare = 7; - optional int64 pipelineCount = 8; } /** diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml index 87d76158301..a863fe3ef5d 100644 --- a/hadoop-hdds/pom.xml +++ b/hadoop-hdds/pom.xml @@ -247,30 +247,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - - - org.apache.maven.plugins - maven-remote-resources-plugin - - - org.apache.ozone:ozone-dev-support:${ozone.version} - - - - - org.apache.ozone - ozone-dev-support - ${ozone.version} - - - - - - process - - - - diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeLibraryLoader.java b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeLibraryLoader.java index d93933dee36..ce424c930e1 100644 --- a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeLibraryLoader.java +++ b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeLibraryLoader.java @@ -36,8 +36,6 @@ import java.util.Optional; import java.util.concurrent.ConcurrentHashMap; -import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME; - /** * Class to load Native Libraries. */ @@ -69,10 +67,6 @@ public static NativeLibraryLoader getInstance() { return instance; } - public static String getJniLibraryFileName() { - return appendLibOsSuffix("lib" + ROCKS_TOOLS_NATIVE_LIBRARY_NAME); - } - public static String getJniLibraryFileName(String libraryName) { return appendLibOsSuffix("lib" + libraryName); } @@ -105,12 +99,9 @@ private static String appendLibOsSuffix(String libraryFileName) { return libraryFileName + getLibOsSuffix(); } - public static boolean isLibraryLoaded() { - return isLibraryLoaded(ROCKS_TOOLS_NATIVE_LIBRARY_NAME); - } - public static boolean isLibraryLoaded(final String libraryName) { - return getInstance().librariesLoaded.getOrDefault(libraryName, false); + return getInstance().librariesLoaded + .getOrDefault(libraryName, false); } public synchronized boolean loadLibrary(final String libraryName, final List dependentFiles) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/PlacementPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/PlacementPolicy.java index 05eb32722e7..a792e2cea6b 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/PlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/PlacementPolicy.java @@ -73,7 +73,7 @@ ContainerPlacementStatus validateContainerPlacement( * Given a set of replicas of a container which are * neither over underreplicated nor overreplicated, * return a set of replicas to copy to another node to fix misreplication. - * @param replicas Map of replicas with value signifying if + * @param replicas: Map of replicas with value signifying if * replica can be copied */ Set replicasToCopyToFixMisreplication( @@ -82,8 +82,8 @@ Set replicasToCopyToFixMisreplication( /** * Given a set of replicas of a container which are overreplicated, * return a set of replicas to delete to fix overreplication. - * @param replicas Set of existing replicas of the container - * @param expectedCountPerUniqueReplica Replication factor of each + * @param replicas: Set of existing replicas of the container + * @param expectedCountPerUniqueReplica: Replication factor of each * unique replica */ Set replicasToRemoveToFixOverreplication( diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java index 2a1c6fce0c0..471a9479412 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java @@ -525,7 +525,7 @@ public boolean isValidNode(DatanodeDetails datanodeDetails, * Given a set of replicas of a container which are * neither over underreplicated nor overreplicated, * return a set of replicas to copy to another node to fix misreplication. - * @param replicas Map of replicas with value signifying if + * @param replicas: Map of replicas with value signifying if * replica can be copied */ @Override @@ -582,7 +582,7 @@ protected Node getPlacementGroup(DatanodeDetails dn) { * replication is computed. * The algorithm starts with creating a replicaIdMap which contains the * replicas grouped by replica Index. A placementGroup Map is created which - * groups replicas based on their rack and the replicas within the rack + * groups replicas based on their rack & the replicas within the rack * are further grouped based on the replica Index. * A placement Group Count Map is created which keeps * track of the count of replicas in each rack. @@ -590,13 +590,13 @@ protected Node getPlacementGroup(DatanodeDetails dn) { * order based on their current replication factor in a descending factor. * For each replica Index the replica is removed from the rack which contains * the most replicas, in order to achieve this the racks are put - * into priority queue and are based on the number of replicas they have. - * The replica is removed from the rack with maximum replicas and the replica - * to be removed is also removed from the maps created above and + * into priority queue & are based on the number of replicas they have. + * The replica is removed from the rack with maximum replicas & the replica + * to be removed is also removed from the maps created above & * the count for rack is reduced. * The set of replicas computed are then returned by the function. - * @param replicas Set of existing replicas of the container - * @param expectedCountPerUniqueReplica Replication factor of each + * @param replicas: Set of existing replicas of the container + * @param expectedCountPerUniqueReplica: Replication factor of each * * unique replica * @return Set of replicas to be removed are computed. */ diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java index 5ec68c78d74..45d53c0ef2c 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java @@ -57,7 +57,7 @@ DatanodeDeletedBlockTransactions getTransactions( * considered to be failed if it has been sent more than MAX_RETRY limit * and its count is reset to -1. * - * @param count Maximum num of returned transactions, if < 0. return all. + * @param count Maximum num of returned transactions, if < 0. return all. * @param startTxId The least transaction id to start with. * @return a list of failed deleted block transactions. * @throws IOException diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java index 6b6a888f424..3eba240533e 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java @@ -61,9 +61,9 @@ default List getContainers() { * The max size of the searching range cannot exceed the * value of count. * - * @param startID start containerID, >=0, + * @param startID start containerID, >=0, * start searching at the head if 0. - * @param count count must be >= 0 + * @param count count must be >= 0 * Usually the count will be replace with a very big * value instead of being unlimited in case the db is very big. * @@ -85,9 +85,9 @@ default List getContainers() { * The max size of the searching range cannot exceed the * value of count. * - * @param startID start containerID, >=0, + * @param startID start containerID, >=0, * start searching at the head if 0. - * @param count count must be >= 0 + * @param count count must be >= 0 * Usually the count will be replace with a very big * value instead of being unlimited in case the db is very big. * @param state container state @@ -164,6 +164,7 @@ void updateContainerReplica(ContainerID containerID, ContainerReplica replica) * * @param containerID Container ID * @param replica ContainerReplica + * @return True of dataNode is removed successfully else false. */ void removeContainerReplica(ContainerID containerID, ContainerReplica replica) throws ContainerNotFoundException, ContainerReplicaNotFoundException; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java index 19a2f3c2e62..7fea44671ff 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java @@ -158,14 +158,6 @@ public ContainerBalancerTask(StorageContainerManager scm, this.selectedSources = new HashSet<>(); this.selectedTargets = new HashSet<>(); findSourceStrategy = new FindSourceGreedy(nodeManager); - if (config.getNetworkTopologyEnable()) { - findTargetStrategy = new FindTargetGreedyByNetworkTopology( - containerManager, placementPolicyValidateProxy, - nodeManager, networkTopology); - } else { - findTargetStrategy = new FindTargetGreedyByUsageInfo(containerManager, - placementPolicyValidateProxy, nodeManager); - } this.iterationsStatistic = new ArrayList<>(); } @@ -440,7 +432,14 @@ private boolean initializeIteration() { this.maxDatanodesRatioToInvolvePerIteration = config.getMaxDatanodesRatioToInvolvePerIteration(); this.maxSizeToMovePerIteration = config.getMaxSizeToMovePerIteration(); - + if (config.getNetworkTopologyEnable()) { + findTargetStrategy = new FindTargetGreedyByNetworkTopology( + containerManager, placementPolicyValidateProxy, + nodeManager, networkTopology); + } else { + findTargetStrategy = new FindTargetGreedyByUsageInfo(containerManager, + placementPolicyValidateProxy, nodeManager); + } this.excludeNodes = config.getExcludeNodes(); this.includeNodes = config.getIncludeNodes(); // include/exclude nodes from balancing according to configs diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java index 3d113b3d301..094e535dcbd 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java @@ -42,7 +42,8 @@ public SCMNodeMetric(SCMNodeStat stat) { * @param capacity in bytes * @param used in bytes * @param remaining in bytes - * @param committed in bytes + * @param committed + * @paaram committed in bytes */ @VisibleForTesting public SCMNodeMetric(long capacity, long used, long remaining, diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerHealthResult.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerHealthResult.java index fcfef7de6e6..0abe8f6ea34 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerHealthResult.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerHealthResult.java @@ -248,6 +248,7 @@ public void setOfflineIndexesOkAfterPending(boolean val) { /** * Returns true if a container has under-replication caused by offline * indexes, but it is corrected by a pending add. + * @return */ public boolean offlineIndexesOkAfterPending() { return offlineIndexesOkAfterPending; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerReplicaPendingOps.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerReplicaPendingOps.java index 4eef0a8a744..d1890bdf802 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerReplicaPendingOps.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerReplicaPendingOps.java @@ -116,7 +116,7 @@ public List getPendingOps(ContainerID containerID) { * Store a ContainerReplicaOp to add a replica for the given ContainerID. * @param containerID ContainerID for which to add a replica * @param target The target datanode - * @param replicaIndex The replica index (zero for Ratis, > 0 for EC) + * @param replicaIndex The replica index (zero for Ratis, > 0 for EC) * @param deadlineEpochMillis The time by which the replica should have been * added and reported by the datanode, or it will * be discarded. @@ -130,7 +130,7 @@ public void scheduleAddReplica(ContainerID containerID, * Store a ContainerReplicaOp to delete a replica for the given ContainerID. * @param containerID ContainerID for which to delete a replica * @param target The target datanode - * @param replicaIndex The replica index (zero for Ratis, > 0 for EC) + * @param replicaIndex The replica index (zero for Ratis, > 0 for EC) * @param deadlineEpochMillis The time by which the replica should have been * deleted and reported by the datanode, or it will * be discarded. @@ -145,7 +145,7 @@ public void scheduleDeleteReplica(ContainerID containerID, * been replicated successfully. * @param containerID ContainerID for which to complete the replication * @param target The target Datanode - * @param replicaIndex The replica index (zero for Ratis, > 0 for EC) + * @param replicaIndex The replica index (zero for Ratis, > 0 for EC) * @return True if a pending replica was found and removed, false otherwise. */ public boolean completeAddReplica(ContainerID containerID, @@ -167,7 +167,7 @@ public boolean completeAddReplica(ContainerID containerID, * been deleted successfully. * @param containerID ContainerID for which to complete the deletion * @param target The target Datanode - * @param replicaIndex The replica index (zero for Ratis, > 0 for EC) + * @param replicaIndex The replica index (zero for Ratis, > 0 for EC) * @return True if a pending replica was found and removed, false otherwise. */ public boolean completeDeleteReplica(ContainerID containerID, diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisContainerReplicaCount.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisContainerReplicaCount.java index 4e14798ccdc..fe771fac6a4 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisContainerReplicaCount.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisContainerReplicaCount.java @@ -186,9 +186,9 @@ private void countReplicas() { * For example, consider a CLOSED container with the following replicas: * {CLOSED, CLOSING, OPEN, UNHEALTHY} * In this case, healthy replica count equals 3. Calculation: - * 1 CLOSED -> 1 matching replica. - * 1 OPEN, 1 CLOSING -> 2 mismatched replicas. - * 1 UNHEALTHY -> 1 unhealthy replica. Not counted as healthy. + * 1 CLOSED -> 1 matching replica. + * 1 OPEN, 1 CLOSING -> 2 mismatched replicas. + * 1 UNHEALTHY -> 1 unhealthy replica. Not counted as healthy. * Total healthy replicas = 3 = 1 matching + 2 mismatched replicas */ public int getHealthyReplicaCount() { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/HealthCheck.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/HealthCheck.java index f271b8a863c..a95c0d39945 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/HealthCheck.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/HealthCheck.java @@ -49,6 +49,7 @@ public interface HealthCheck { * returns false. This allows handlers to be chained together, and each will * be tried in turn until one succeeds. * @param handler + * @return */ HealthCheck addNext(HealthCheck handler); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java index 1289a0a21ff..c6f15be5d2c 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java @@ -37,10 +37,10 @@ *

    * Currently we manage the following attributes for a container. *

    - * 1. StateMap - LifeCycleState -> Set of ContainerIDs - * 2. TypeMap - ReplicationType -> Set of ContainerIDs - * 3. OwnerMap - OwnerNames -> Set of ContainerIDs - * 4. FactorMap - ReplicationFactor -> Set of ContainerIDs + * 1. StateMap - LifeCycleState -> Set of ContainerIDs + * 2. TypeMap - ReplicationType -> Set of ContainerIDs + * 3. OwnerMap - OwnerNames -> Set of ContainerIDs + * 4. FactorMap - ReplicationFactor -> Set of ContainerIDs *

    * This means that for a cluster size of 750 PB -- we will have around 150 * Million containers, if we assume 5GB average container size. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/HASecurityUtils.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/HASecurityUtils.java index 5eeb489f677..f0d78b23079 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/HASecurityUtils.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/HASecurityUtils.java @@ -58,7 +58,7 @@ /** * Utilities for SCM HA security. */ -public final class HASecurityUtils { +public final class HASecurityUtils { private HASecurityUtils() { } @@ -150,6 +150,7 @@ public static CertificateServer initializeRootCertificateServer( * * @param conf * @param certificateClient + * @return */ public static GrpcTlsConfig createSCMRatisTLSConfig(SecurityConfig conf, CertificateClient certificateClient) throws IOException { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java index 92a5140ff2a..03f6ae293b2 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java @@ -55,6 +55,7 @@ public interface SCMHAManager extends AutoCloseable { /** * Returns the DBTransactionBuffer as SCMHADBTransactionBuffer if its * valid. + * @return */ SCMHADBTransactionBuffer asSCMHADBTransactionBuffer(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java index b3350d8a12a..05ed833edbe 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java @@ -346,7 +346,7 @@ public void setCommandCounts(CommandQueueReportProto cmds, * Retrieve the number of queued commands of the given type, as reported by * the datanode at the last heartbeat. * @param cmd The command for which to receive the queued command count - * @return -1 if we have no information about the count, or an integer >= 0 + * @return -1 if we have no information about the count, or an integer >= 0 * indicating the command count at the last heartbeat. */ public int getCommandCount(SCMCommandProto.Type cmd) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeUsageInfo.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeUsageInfo.java index 1cafab3f67c..4f7df496906 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeUsageInfo.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeUsageInfo.java @@ -32,7 +32,6 @@ public class DatanodeUsageInfo { private DatanodeDetails datanodeDetails; private SCMNodeStat scmNodeStat; private int containerCount; - private int pipelineCount; /** * Constructs a DatanodeUsageInfo with DatanodeDetails and SCMNodeStat. @@ -46,7 +45,6 @@ public DatanodeUsageInfo( this.datanodeDetails = datanodeDetails; this.scmNodeStat = scmNodeStat; this.containerCount = -1; - this.pipelineCount = -1; } /** @@ -147,14 +145,6 @@ public void setContainerCount(int containerCount) { this.containerCount = containerCount; } - public int getPipelineCount() { - return pipelineCount; - } - - public void setPipelineCount(int pipelineCount) { - this.pipelineCount = pipelineCount; - } - /** * Gets Comparator that compares two DatanodeUsageInfo on the basis of * their utilization values. Utilization is (capacity - remaining) divided @@ -220,7 +210,6 @@ private DatanodeUsageInfoProto.Builder toProtoBuilder(int clientVersion) { } builder.setContainerCount(containerCount); - builder.setPipelineCount(pipelineCount); return builder; } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java index 992dc82582b..25be60945a9 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java @@ -97,6 +97,8 @@ default RegisteredCommand register( * @param type The type of the SCMCommand. * @param scmCommand A BiConsumer that takes a DatanodeDetails and a * SCMCommand object and performs the necessary actions. + * @return whatever the regular register command returns with default + * layout version passed in. */ default void registerSendCommandNotify(SCMCommandProto.Type type, BiConsumer> scmCommand) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManagerMXBean.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManagerMXBean.java index 1bd9677a363..a66fc0d22fb 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManagerMXBean.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManagerMXBean.java @@ -45,7 +45,7 @@ public interface NodeManagerMXBean { /** * @return Get the NodeStatus table information like hostname, - * Commissioned State and Operational State column for dataNode + * Commissioned State & Operational State column for dataNode */ Map> getNodeStatusInfo(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java index 78c1801a103..3c3ff8fb833 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java @@ -791,7 +791,7 @@ public void run() { * * This method is synchronized to coordinate node state updates between * the upgrade finalization thread which calls this method, and the - * node health processing thread that calls {@link #checkNodesHealth}. + * node health processing thread that calls {@link this#checkNodesHealth}. */ public synchronized void forceNodesToHealthyReadOnly() { try { @@ -817,7 +817,7 @@ public synchronized void forceNodesToHealthyReadOnly() { /** * This method is synchronized to coordinate node state updates between * the upgrade finalization thread which calls - * {@link #forceNodesToHealthyReadOnly}, and the node health processing + * {@link this#forceNodesToHealthyReadOnly}, and the node health processing * thread that calls this method. */ @VisibleForTesting diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java index 7db0c88e173..05a68628852 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java @@ -146,8 +146,6 @@ public class SCMNodeManager implements NodeManager { private static final String LASTHEARTBEAT = "LASTHEARTBEAT"; private static final String USEDSPACEPERCENT = "USEDSPACEPERCENT"; private static final String TOTALCAPACITY = "CAPACITY"; - private static final String DNUUID = "UUID"; - private static final String VERSION = "VERSION"; /** * Constructs SCM machine Manager. */ @@ -449,11 +447,6 @@ public RegisteredCommand register( processNodeReport(datanodeDetails, nodeReport); LOG.info("Updated datanode to: {}", dn); scmNodeEventPublisher.fireEvent(SCMEvents.NODE_ADDRESS_UPDATE, dn); - } else if (isVersionChange(oldNode.getVersion(), datanodeDetails.getVersion())) { - LOG.info("Update the version for registered datanode = {}, " + - "oldVersion = {}, newVersion = {}.", - datanodeDetails.getUuid(), oldNode.getVersion(), datanodeDetails.getVersion()); - nodeStateManager.updateNode(datanodeDetails, layoutInfo); } } catch (NodeNotFoundException e) { LOG.error("Cannot find datanode {} from nodeStateManager", @@ -515,18 +508,6 @@ private boolean updateDnsToUuidMap( return ipChanged || hostNameChanged; } - /** - * Check if the version has been updated. - * - * @param oldVersion datanode oldVersion - * @param newVersion datanode newVersion - * @return true means replacement is needed, while false means replacement is not needed. - */ - private boolean isVersionChange(String oldVersion, String newVersion) { - final boolean versionChanged = !Objects.equals(oldVersion, newVersion); - return versionChanged; - } - /** * Send heartbeat to indicate the datanode is alive and doing well. * @@ -1001,7 +982,6 @@ public DatanodeUsageInfo getUsageInfo(DatanodeDetails dn) { DatanodeUsageInfo usageInfo = new DatanodeUsageInfo(dn, stat); try { usageInfo.setContainerCount(getContainerCount(dn)); - usageInfo.setPipelineCount(getPipeLineCount(dn)); } catch (NodeNotFoundException ex) { LOG.error("Unknown datanode {}.", dn, ex); } @@ -1155,8 +1135,6 @@ public Map> getNodeStatusInfo() { String nonScmUsedPerc = storagePercentage[1]; map.put(USEDSPACEPERCENT, "Ozone: " + scmUsedPerc + "%, other: " + nonScmUsedPerc + "%"); - map.put(DNUUID, dni.getUuidString()); - map.put(VERSION, dni.getVersion()); nodes.put(hostName, map); } return nodes; @@ -1166,6 +1144,7 @@ public Map> getNodeStatusInfo() { * Calculate the storage capacity of the DataNode node. * @param storageReports Calculate the storage capacity corresponding * to the storage collection. + * @return */ public static String calculateStorageCapacity( List storageReports) { @@ -1213,6 +1192,7 @@ private static String convertUnit(double value) { * Calculate the storage usage percentage of a DataNode node. * @param storageReports Calculate the storage percentage corresponding * to the storage collection. + * @return */ public static String[] calculateStoragePercentage( List storageReports) { @@ -1630,11 +1610,6 @@ public int getContainerCount(DatanodeDetails datanodeDetails) return nodeStateManager.getContainerCount(datanodeDetails.getUuid()); } - public int getPipeLineCount(DatanodeDetails datanodeDetails) - throws NodeNotFoundException { - return nodeStateManager.getPipelinesCount(datanodeDetails); - } - @Override public void addDatanodeCommand(UUID dnId, SCMCommand command) { writeLock().lock(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java index d6058877126..4dd0443a505 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java @@ -46,6 +46,7 @@ void addPipeline(HddsProtos.Pipeline pipelineProto) /** * Removing pipeline would be replicated to Ratis. * @param pipelineIDProto + * @return Pipeline removed * @throws IOException */ @Replicate diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/RootCARotationManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/RootCARotationManager.java index fc7249462c4..d38a904d09c 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/RootCARotationManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/RootCARotationManager.java @@ -118,8 +118,6 @@ public class RootCARotationManager extends StatefulService { * * @param scm the storage container manager * - *

    -   * {@code
        *                         (1)   (3)(4)
        *                   --------------------------->
        *                         (2)                        scm2(Follower)
    @@ -132,8 +130,8 @@ public class RootCARotationManager extends StatefulService {
        *                   --------------------------->
        *                          (2)                       scm3(Follower)
        *                   <---------------------------
    -   * }
    -   * 
    + * + * * (1) Rotation Prepare * (2) Rotation Prepare Ack * (3) Rotation Commit @@ -188,7 +186,7 @@ public void notifyStatusChanged() { waitAckTask.cancel(true); } if (waitAckTimeoutTask != null) { - waitAckTimeoutTask.cancel(true); + waitAckTask.cancel(true); } if (clearPostProcessingTask != null) { clearPostProcessingTask.cancel(true); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java index 6f5429a853b..cca2df00374 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java @@ -289,12 +289,12 @@ public interface ContainerReport { public enum ContainerReportType { /** * Incremental container report type - * {@link IncrementalContainerReportFromDatanode}. + * {@liks IncrementalContainerReportFromDatanode}. */ ICR, /** * Full container report type - * {@link ContainerReportFromDatanode}. + * {@liks ContainerReportFromDatanode}. */ FCR } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java index 7c6f0fbbddf..e74a83e394f 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java @@ -491,6 +491,7 @@ private static String flatten(String input) { /** * Get Key associated with Datanode address for this server. + * @return */ protected String getDatanodeAddressKey() { return this.scm.getScmNodeDetails().getDatanodeAddressKey(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMHTTPServerConfig.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMHTTPServerConfig.java index 5aaf4b7b485..2b6fa032b53 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMHTTPServerConfig.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMHTTPServerConfig.java @@ -70,10 +70,9 @@ public String getKerberosKeytab() { * This static class is required to support other classes * that reference the key names and also require attributes. * Example: SCMSecurityProtocol where the KerberosInfo references - * the old configuration with the annotation shown below: - *
    - * {@code KerberosInfo(serverPrincipal = - * ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY)} + * the old configuration with the annotation shown below:- + * @KerberosInfo(serverPrincipal = + * ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY) */ public static class ConfigStrings { public static final String HDDS_SCM_HTTP_AUTH_CONFIG_PREFIX = diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java index 75a5193116c..de609356b22 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hdds.scm.server; -import java.util.List; import java.util.Map; import org.apache.hadoop.hdds.annotation.InterfaceAudience; @@ -73,7 +72,7 @@ public interface SCMMXBean extends ServiceRuntimeInfo { String getClusterId(); - List> getScmRatisRoles(); + String getScmRatisRoles(); /** * Primordial node is the node on which scm init operation is performed. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java index 17318107e3d..88b3c887746 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java @@ -430,6 +430,7 @@ public String getCACertificate() throws IOException { * @param role - node role: OM/SCM/DN. * @param startSerialId - start certificate serial id. * @param count - max number of certificates returned in a batch. + * @return * @throws IOException */ @Override diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java index 8f7a7c2f9f1..876c499113d 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java @@ -172,7 +172,6 @@ import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.util.ReflectionUtils; import org.apache.ratis.protocol.RaftPeerId; -import org.apache.ratis.server.RaftServer; import org.apache.ratis.util.ExitUtils; import org.apache.ratis.util.JvmPauseMonitor; import org.slf4j.Logger; @@ -614,8 +613,7 @@ public OzoneConfiguration getConfiguration() { * @param conf HDDS configuration * @param configurator SCM configurator * @return SCM instance - * @throws IOException on Failure, - * @throws AuthenticationException + * @throws IOException, AuthenticationException */ public static StorageContainerManager createSCM( OzoneConfiguration conf, SCMConfigurator configurator) @@ -628,8 +626,7 @@ public static StorageContainerManager createSCM( * * @param conf HDDS configuration * @return SCM instance - * @throws IOException on Failure, - * @throws AuthenticationException + * @throws IOException, AuthenticationException */ public static StorageContainerManager createSCM(OzoneConfiguration conf) throws IOException, AuthenticationException { @@ -1621,7 +1618,8 @@ private void persistSCMCertificates() throws IOException { if (primaryScmNodeId != null && !primaryScmNodeId.equals( scmStorageConfig.getScmId())) { List pemEncodedCerts = - getScmSecurityClientWithMaxRetry(configuration, getCurrentUser()).listCACertificate(); + scmCertificateClient.listCA(); + // Write the primary SCM CA and Root CA during startup. for (String cert : pemEncodedCerts) { X509Certificate x509Certificate = CertificateCodec.getX509Certificate( @@ -2139,54 +2137,10 @@ public ContainerTokenGenerator getContainerTokenGenerator() { } @Override - public List> getScmRatisRoles() { + public String getScmRatisRoles() { final SCMRatisServer server = getScmHAManager().getRatisServer(); - - // If Ratis is disabled - if (server == null) { - return getRatisRolesException("Ratis is disabled"); - } - - // To attempt to find the SCM Leader, - // and if the Leader is not found - // return Leader is not found message. - RaftServer.Division division = server.getDivision(); - RaftPeerId leaderId = division.getInfo().getLeaderId(); - if (leaderId == null) { - return getRatisRolesException("No leader found"); - } - - // If the SCMRatisServer is stopped, return a service stopped message. - if (server.isStopped()) { - return getRatisRolesException("Server is shutting down"); - } - - // Attempt to retrieve role information. - try { - List ratisRoles = server.getRatisRoles(); - List> result = new ArrayList<>(); - for (String role : ratisRoles) { - String[] roleArr = role.split(":"); - List scmInfo = new ArrayList<>(); - // Host Name - scmInfo.add(roleArr[0]); - // Node ID - scmInfo.add(roleArr[3]); - // Ratis Port - scmInfo.add(roleArr[1]); - // Role - scmInfo.add(roleArr[2]); - result.add(scmInfo); - } - return result; - } catch (Exception e) { - LOG.error("Failed to getRatisRoles.", e); - return getRatisRolesException("Exception Occurred, " + e.getMessage()); - } - } - - private static List> getRatisRolesException(String exceptionString) { - return Collections.singletonList(Collections.singletonList(exceptionString)); + return server != null ? + HddsUtils.format(server.getRatisRoles()) : "STANDALONE"; } /** diff --git a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html index 2748716e67f..3f825d4e25f 100644 --- a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html +++ b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html @@ -110,114 +110,6 @@

    Space Statistics

    -

    Pipeline Statistics

    - - - - - - - - - - - - - - - - - - - - - - - -
    Pipeline StateSize
    Closed{{statistics.pipelines.closed}}
    Allocated{{statistics.pipelines.allocated}}
    Open{{statistics.pipelines.open}}
    Dormant{{statistics.pipelines.dormant}}
    - -

    Container Statistics

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Operational StateSize
    Open{{statistics.containers.lifecycle.open}}
    Closing{{statistics.containers.lifecycle.closing}}
    Quasi Closed{{statistics.containers.lifecycle.quasi_closed}}
    Closed{{statistics.containers.lifecycle.closed}}
    Deleting{{statistics.containers.lifecycle.deleting}}
    Deleted{{statistics.containers.lifecycle.deleted}}
    Recovering{{statistics.containers.lifecycle.recovering}}
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    HealthSize
    Under Replicated{{statistics.containers.health.under_replicated}}
    Mis Replicated{{statistics.containers.health.mis_replicated}}
    Over Replicated{{statistics.containers.health.over_replicated}}
    Missing{{statistics.containers.health.missing}}
    Unhealthy{{statistics.containers.health.unhealthy}}
    Empty{{statistics.containers.health.empty}}
    Open Unhealthy{{statistics.containers.health.open_unhealthy}}
    Quasi Closed Stuck{{statistics.containers.health.quasi_closed_stuck}}
    Open Without Pipeline{{statistics.containers.health.open_without_pipeline}}
    -

    Node Status

    @@ -248,10 +140,6 @@

    Node Status

    'sortdesc':(columnName == 'comstate' && !reverse)}">Commisioned State Last Heartbeat - UUID - Version @@ -269,8 +157,6 @@

    Node Status

    {{typestat.opstate}} {{typestat.comstate}} {{typestat.lastheartbeat}} - {{typestat.uuid}} - {{typestat.version}} @@ -324,6 +210,10 @@

    Status

    Force Exit Safe Mode {{$ctrl.overview.jmx.SafeModeExitForceful}} + + SCM Roles (HA) + {{$ctrl.overview.jmx.ScmRatisRoles}} + Primordial Node (HA) {{$ctrl.overview.jmx.PrimordialNode}} @@ -345,35 +235,6 @@

    Meta-Data Volume Information

    -

    SCM Roles (HA)

    -

    {{$ctrl.overview.jmx.ScmRatisRoles[0][0]}}

    -
    - - - - - - - - - - - - - - - - - - - - - - - -
    Host NameNode IDRatis PortRole
    {{roles[0]}}{{roles[1]}}{{roles[2]}}{{roles[3]}}
    {{roles[0]}}{{roles[1]}}{{roles[2]}}{{roles[3]}}
    -
    -

    Safemode rules statuses

    diff --git a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js index fc216c06862..6fac6849530 100644 --- a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js +++ b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js @@ -53,42 +53,9 @@ remaining : "N/A", nonscmused : "N/A" } - }, - pipelines : { - closed : "N/A", - allocated : "N/A", - open : "N/A", - dormant : "N/A" - }, - containers : { - lifecycle : { - open : "N/A", - closing : "N/A", - quasi_closed : "N/A", - closed : "N/A", - deleting : "N/A", - deleted : "N/A", - recovering : "N/A" - }, - health : { - under_replicated : "N/A", - mis_replicated : "N/A", - over_replicated : "N/A", - missing : "N/A", - unhealthy : "N/A", - empty : "N/A", - open_unhealthy : "N/A", - quasi_closed_stuck : "N/A", - open_without_pipeline : "N/A" - } } } - $http.get("jmx?qry=Ratis:service=RaftServer,group=*,id=*") - .then(function (result) { - ctrl.role = result.data.beans[0]; - }); - function get_protocol(URLScheme, value, baseProto, fallbackProto) { let protocol = "unknown" let port = -1; @@ -128,8 +95,6 @@ capacity: value && value.find((element) => element.key === "CAPACITY").value, comstate: value && value.find((element) => element.key === "COMSTATE").value, lastheartbeat: value && value.find((element) => element.key === "LASTHEARTBEAT").value, - uuid: value && value.find((element) => element.key === "UUID").value, - version: value && value.find((element) => element.key === "VERSION").value, port: portSpec.port, protocol: portSpec.proto } @@ -170,46 +135,6 @@ } }); }); - - $http.get("jmx?qry=Hadoop:service=SCMPipelineManager,name=SCMPipelineManagerInfo") - .then(function (result) { - const URLScheme = location.protocol.replace(":" , ""); - ctrl.scmpipelinemanager = result.data.beans[0]; - ctrl.scmpipelinemanager.PipelineInfo.forEach(({key, value}) => { - if(key == "CLOSED") { - $scope.statistics.pipelines.closed = value; - } else if(key == "ALLOCATED") { - $scope.statistics.pipelines.allocated = value; - } else if(key == "OPEN") { - $scope.statistics.pipelines.open = value; - } else if(key == "DORMANT") { - $scope.statistics.pipelines.dormant = value; - } - }); - }); - - $http.get("jmx?qry=Hadoop:service=StorageContainerManager,name=ReplicationManagerMetrics") - .then(function (result) { - const URLScheme = location.protocol.replace(":" , ""); - ctrl.scmcontainermanager = result.data.beans[0]; - $scope.statistics.containers.lifecycle.open = ctrl.scmcontainermanager.OpenContainers; - $scope.statistics.containers.lifecycle.closing = ctrl.scmcontainermanager.ClosingContainers; - $scope.statistics.containers.lifecycle.quasi_closed = ctrl.scmcontainermanager.QuasiClosedContainers; - $scope.statistics.containers.lifecycle.closed = ctrl.scmcontainermanager.ClosedContainers; - $scope.statistics.containers.lifecycle.deleting = ctrl.scmcontainermanager.DeletingContainers; - $scope.statistics.containers.lifecycle.deleted = ctrl.scmcontainermanager.DeletedContainers; - $scope.statistics.containers.lifecycle.recovering = ctrl.scmcontainermanager.RecoveringContainers; - $scope.statistics.containers.health.under_replicated = ctrl.scmcontainermanager.UnderReplicatedContainers; - $scope.statistics.containers.health.mis_replicated = ctrl.scmcontainermanager.MisReplicatedContainers; - $scope.statistics.containers.health.over_replicated = ctrl.scmcontainermanager.OverReplicatedContainers; - $scope.statistics.containers.health.missing = ctrl.scmcontainermanager.MissingContainers; - $scope.statistics.containers.health.unhealthy = ctrl.scmcontainermanager.UnhealthyContainers; - $scope.statistics.containers.health.empty = ctrl.scmcontainermanager.EmptyContainers; - $scope.statistics.containers.health.open_unhealthy = ctrl.scmcontainermanager.OpenUnhealthyContainers; - $scope.statistics.containers.health.quasi_closed_stuck = ctrl.scmcontainermanager.StuckQuasiClosedContainers; - $scope.statistics.containers.health.open_without_pipeline = ctrl.scmcontainermanager.OpenContainersWithoutPipeline; - }); - /*if option is 'All' display all records else display specified record on page*/ $scope.UpdateRecordsToShow = () => { if($scope.RecordsToDisplay == 'All') { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/MockedSCM.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/MockedSCM.java index 0972e57df64..a3ec55d5863 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/MockedSCM.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/MockedSCM.java @@ -86,7 +86,7 @@ public MockedSCM(@Nonnull TestableCluster testableCluster) { } } - void init(@Nonnull ContainerBalancerConfiguration balancerConfig, @Nonnull OzoneConfiguration ozoneCfg) { + private void init(@Nonnull ContainerBalancerConfiguration balancerConfig, @Nonnull OzoneConfiguration ozoneCfg) { ozoneCfg.setFromObject(balancerConfig); try { doMock(balancerConfig, ozoneCfg); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerStatusInfo.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerStatusInfo.java index 48b3ee2d0de..b8ac648e844 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerStatusInfo.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerStatusInfo.java @@ -19,9 +19,7 @@ package org.apache.hadoop.hdds.scm.container.balancer; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.ozone.OzoneConsts; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import java.util.List; @@ -57,30 +55,4 @@ void testGetIterationStatistics() { }); } - - /** - * @see HDDS-11350 - */ - @Test - void testGetCurrentIterationsStatisticDoesNotThrowNullPointerExceptionWhenBalancingThreadIsSleeping() { - MockedSCM mockedScm = new MockedSCM(new TestableCluster(10, OzoneConsts.GB)); - OzoneConfiguration ozoneConfig = new OzoneConfiguration(); - ContainerBalancerConfiguration config = ozoneConfig.getObject(ContainerBalancerConfiguration.class); - - config.setIterations(2); - // the following config makes the balancing thread go to sleep while waiting for DU to be triggered in DNs and - // updated storage reports to arrive via DN heartbeats - of course, this is a unit test and NodeManager, DNs etc. - // are all mocked - config.setTriggerDuEnable(true); - mockedScm.init(config, ozoneConfig); - - // run ContainerBalancerTask in a new thread and have the current thread call getCurrentIterationsStatistic - StorageContainerManager scm = mockedScm.getStorageContainerManager(); - ContainerBalancer cb = new ContainerBalancer(scm); - ContainerBalancerTask task = new ContainerBalancerTask(scm, 0, cb, cb.getMetrics(), config, false); - Thread thread = new Thread(task); - thread.setDaemon(true); - thread.start(); - Assertions.assertDoesNotThrow(task::getCurrentIterationsStatistic); - } } diff --git a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java index 9a3a5c7a8f1..c9fa668445d 100644 --- a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java +++ b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java @@ -335,11 +335,11 @@ private static long monotonicNow() { * *

    * TODO: Add lambda support once Java 8 is common. - * {@code + *

        *   SystemErrCapturer.withCapture(capture -> {
        *     ...
        *   })
    -   * }
    +   * 
    */ public static class SystemErrCapturer implements AutoCloseable { private final ByteArrayOutputStream bytes; @@ -376,11 +376,11 @@ public void close() throws Exception { * *

    * TODO: Add lambda support once Java 8 is common. - * {@code + *

        *   SystemOutCapturer.withCapture(capture -> {
        *     ...
        *   })
    -   * }
    +   * 
    */ public static class SystemOutCapturer implements AutoCloseable { private final ByteArrayOutputStream bytes; @@ -475,8 +475,8 @@ public static final class ReflectionUtils { * This method provides the modifiers field using reflection approach which is compatible * for both pre Java 9 and post java 9 versions. * @return modifiers field - * @throws IllegalAccessException illegalAccessException, - * @throws NoSuchFieldException noSuchFieldException. + * @throws IllegalAccessException + * @throws NoSuchFieldException */ public static Field getModifiersField() throws IllegalAccessException, NoSuchFieldException { Field modifiersField = null; diff --git a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/LambdaTestUtils.java b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/LambdaTestUtils.java index d6b028c815f..661989dade1 100644 --- a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/LambdaTestUtils.java +++ b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/LambdaTestUtils.java @@ -77,13 +77,11 @@ public interface TimeoutHandler { * is called. This returns the exception passed in (if any), * or generates a new one. *
    -   * {@code
        * await(
        *   30 * 1000,
        *   () -> { return 0 == filesystem.listFiles(new Path("/")).length); },
        *   () -> 500),
        *   (timeout, ex) -> ex != null ? ex : new TimeoutException("timeout"));
    -   * }
        * 
    * * @param timeoutMillis timeout in milliseconds. @@ -162,11 +160,9 @@ public static int await(int timeoutMillis, *

    * Example: await for probe to succeed: *

    -   * {@code
        * await(
        *   30 * 1000, 500,
        *   () -> { return 0 == filesystem.listFiles(new Path("/")).length); });
    -   * }
        * 
    * * @param timeoutMillis timeout in milliseconds. diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java index c3e379a5399..98d8bb0d83e 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java @@ -116,7 +116,7 @@ private XceiverClientManager newXCeiverClientManager(ConfigurationSource conf) throws IOException { XceiverClientManager manager; if (OzoneSecurityUtil.isSecurityEnabled(conf)) { - CACertificateProvider caCerts = () -> HAUtils.buildCAX509List(conf); + CACertificateProvider caCerts = () -> HAUtils.buildCAX509List(null, conf); manager = new XceiverClientManager(conf, conf.getObject(XceiverClientManager.ScmClientConfig.class), new ClientTrustManager(caCerts, null)); diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java index 2c069291a86..b967fa0658c 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java @@ -155,8 +155,6 @@ private void printInfo(DatanodeUsage info) { + " B", StringUtils.byteDesc(info.getRemaining())); System.out.printf("%-13s: %s %n", "Remaining %", PERCENT_FORMAT.format(info.getRemainingRatio())); - System.out.printf("%-13s: %d %n", "Pipeline(s)", - info.getPipelineCount()); System.out.printf("%-13s: %d %n", "Container(s)", info.getContainerCount()); System.out.printf("%-24s: %s (%s) %n", "Container Pre-allocated", @@ -194,7 +192,6 @@ private static class DatanodeUsage { private long committed = 0; private long freeSpaceToSpare = 0; private long containerCount = 0; - private long pipelineCount = 0; DatanodeUsage(HddsProtos.DatanodeUsageInfoProto proto) { if (proto.hasNode()) { @@ -215,9 +212,6 @@ private static class DatanodeUsage { if (proto.hasContainerCount()) { containerCount = proto.getContainerCount(); } - if (proto.hasPipelineCount()) { - pipelineCount = proto.getPipelineCount(); - } if (proto.hasFreeSpaceToSpare()) { freeSpaceToSpare = proto.getFreeSpaceToSpare(); } @@ -283,8 +277,5 @@ public double getRemainingRatio() { return remaining / (double) capacity; } - public long getPipelineCount() { - return pipelineCount; - } } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ClosePipelineSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ClosePipelineSubcommand.java index e5392ef618d..7c70456995b 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ClosePipelineSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ClosePipelineSubcommand.java @@ -59,22 +59,12 @@ public void execute(ScmClient scmClient) throws IOException { List pipelineList = new ArrayList<>(); Predicate predicate = replicationFilter.orElse(null); - List pipelines = scmClient.listPipelines(); - if (predicate == null) { - for (Pipeline pipeline : pipelines) { - if (pipeline.getPipelineState() != Pipeline.PipelineState.CLOSED) { - pipelineList.add(pipeline); - } - } - } else { - for (Pipeline pipeline : pipelines) { - boolean filterPassed = predicate.test(pipeline); - if (pipeline.getPipelineState() != Pipeline.PipelineState.CLOSED && filterPassed) { - pipelineList.add(pipeline); - } + for (Pipeline pipeline : scmClient.listPipelines()) { + boolean filterPassed = (predicate != null) && predicate.test(pipeline); + if (pipeline.getPipelineState() != Pipeline.PipelineState.CLOSED && filterPassed) { + pipelineList.add(pipeline); } } - System.out.println("Sending close command for " + pipelineList.size() + " pipelines..."); pipelineList.forEach(pipeline -> { try { diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java index a691e754606..09f6621735e 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java @@ -94,7 +94,6 @@ public void testCorrectJsonValuesInReport() throws IOException { assertEquals(80.00, json.get(0).get("remainingPercent").doubleValue(), 0.001); assertEquals(5, json.get(0).get("containerCount").longValue()); - assertEquals(10, json.get(0).get("pipelineCount").longValue()); } @Test @@ -123,7 +122,6 @@ public void testOutputDataFieldsAligning() throws IOException { assertThat(output).contains("Remaining :"); assertThat(output).contains("Remaining % :"); assertThat(output).contains("Container(s) :"); - assertThat(output).contains("Pipeline(s) :"); assertThat(output).contains("Container Pre-allocated :"); assertThat(output).contains("Remaining Allocatable :"); assertThat(output).contains("Free Space To Spare :"); @@ -137,7 +135,6 @@ private List getUsageProto() { .setRemaining(80) .setUsed(10) .setContainerCount(5) - .setPipelineCount(10) .build()); return result; } diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/pipeline/TestClosePipelinesSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/pipeline/TestClosePipelinesSubCommand.java deleted file mode 100644 index 013350fe871..00000000000 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/pipeline/TestClosePipelinesSubCommand.java +++ /dev/null @@ -1,178 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.cli.pipeline; - -import org.apache.hadoop.hdds.client.ECReplicationConfig; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; -import org.apache.hadoop.hdds.client.ReplicationConfig; -import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.client.ScmClient; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.Arguments; -import org.junit.jupiter.params.provider.MethodSource; -import picocli.CommandLine; - -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.io.PrintStream; -import java.nio.charset.StandardCharsets; -import java.util.ArrayList; -import java.util.List; -import java.util.UUID; -import java.util.stream.Stream; - -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.params.provider.Arguments.arguments; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -/** - * Tests for the ClosePipelineSubcommand class. - */ -class TestClosePipelinesSubCommand { - - private static final String DEFAULT_ENCODING = StandardCharsets.UTF_8.name(); - private final ByteArrayOutputStream outContent = new ByteArrayOutputStream(); - private final ByteArrayOutputStream errContent = new ByteArrayOutputStream(); - private final PrintStream originalOut = System.out; - private final PrintStream originalErr = System.err; - private ClosePipelineSubcommand cmd; - private ScmClient scmClient; - - public static Stream values() { - return Stream.of( - arguments( - new String[]{"--all"}, - "Sending close command for 2 pipelines...\n", - "with empty parameters" - ), - arguments( - new String[]{"--all", "-ffc", "THREE"}, - "Sending close command for 1 pipelines...\n", - "by filter factor, opened" - ), - arguments( - new String[]{"--all", "-ffc", "ONE"}, - "Sending close command for 0 pipelines...\n", - "by filter factor, closed" - ), - arguments( - new String[]{"--all", "-r", "rs-3-2-1024k", "-t", "EC"}, - "Sending close command for 1 pipelines...\n", - "by replication and type, opened" - ), - arguments( - new String[]{"--all", "-r", "rs-6-3-1024k", "-t", "EC"}, - "Sending close command for 0 pipelines...\n", - "by replication and type, closed" - ), - arguments( - new String[]{"--all", "-t", "EC"}, - "Sending close command for 1 pipelines...\n", - "by type, opened" - ), - arguments( - new String[]{"--all", "-t", "RS"}, - "Sending close command for 0 pipelines...\n", - "by type, closed" - ) - ); - } - - @BeforeEach - public void setup() throws IOException { - cmd = new ClosePipelineSubcommand(); - System.setOut(new PrintStream(outContent, false, DEFAULT_ENCODING)); - System.setErr(new PrintStream(errContent, false, DEFAULT_ENCODING)); - - scmClient = mock(ScmClient.class); - when(scmClient.listPipelines()).thenAnswer(invocation -> createPipelines()); - } - - @AfterEach - public void tearDown() { - System.setOut(originalOut); - System.setErr(originalErr); - } - - @ParameterizedTest(name = "{index}. {2}") - @MethodSource("values") - void testCloseAllPipelines(String[] commands, String expectedOutput, String testName) throws IOException { - CommandLine c = new CommandLine(cmd); - c.parseArgs(commands); - cmd.execute(scmClient); - assertEquals(expectedOutput, outContent.toString(DEFAULT_ENCODING)); - } - - private List createPipelines() { - List pipelines = new ArrayList<>(); - pipelines.add(createPipeline(StandaloneReplicationConfig.getInstance(ONE), - Pipeline.PipelineState.CLOSED)); - pipelines.add(createPipeline(RatisReplicationConfig.getInstance(THREE), - Pipeline.PipelineState.OPEN)); - pipelines.add(createPipeline(RatisReplicationConfig.getInstance(THREE), - Pipeline.PipelineState.CLOSED)); - - pipelines.add(createPipeline( - new ECReplicationConfig(3, 2), Pipeline.PipelineState.OPEN)); - pipelines.add(createPipeline( - new ECReplicationConfig(3, 2), Pipeline.PipelineState.CLOSED)); - pipelines.add(createPipeline( - new ECReplicationConfig(6, 3), Pipeline.PipelineState.CLOSED)); - pipelines.add(createPipeline( - RatisReplicationConfig.getInstance(THREE), Pipeline.PipelineState.CLOSED)); - return pipelines; - } - - private Pipeline createPipeline(ReplicationConfig repConfig, - Pipeline.PipelineState state) { - return new Pipeline.Builder() - .setId(PipelineID.randomId()) - .setCreateTimestamp(System.currentTimeMillis()) - .setState(state) - .setReplicationConfig(repConfig) - .setNodes(createDatanodeDetails(1)) - .build(); - } - - private List createDatanodeDetails(int count) { - List dns = new ArrayList<>(); - for (int i = 0; i < count; i++) { - HddsProtos.DatanodeDetailsProto dnd = - HddsProtos.DatanodeDetailsProto.newBuilder() - .setHostName("host" + i) - .setIpAddress("1.2.3." + i + 1) - .setNetworkLocation("/default") - .setNetworkName("host" + i) - .addPorts(HddsProtos.Port.newBuilder() - .setName("ratis").setValue(5678).build()) - .setUuid(UUID.randomUUID().toString()) - .build(); - dns.add(DatanodeDetails.getFromProtoBuf(dnd)); - } - return dns; - } -} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java index 56ca8798f22..65dce09cba1 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java @@ -217,7 +217,7 @@ public S3SecretValue getS3Secret(String kerberosID, boolean createIfNotExist) * Set secretKey for accessId. * @param accessId * @param secretKey - * @return {@code S3SecretValue } pair + * @return S3SecretValue pair * @throws IOException */ public S3SecretValue setS3Secret(String accessId, String secretKey) diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java index 80a495a1d12..44239aafceb 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java @@ -170,7 +170,7 @@ private static OzoneClient getRpcClient(ClientProtocol clientProtocol, * Create OzoneClient for token renew/cancel operations. * @param conf Configuration to be used for OzoneCient creation * @param token ozone token is involved - * @return OzoneClient + * @return * @throws IOException */ public static OzoneClient getOzoneClient(Configuration conf, diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStreamSemaphore.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStreamSemaphore.java index dc85fffe1ca..36031a9cf4d 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStreamSemaphore.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStreamSemaphore.java @@ -32,7 +32,7 @@ public class KeyOutputStreamSemaphore { private final Semaphore requestSemaphore; KeyOutputStreamSemaphore(int maxConcurrentWritePerKey) { - LOG.debug("Initializing semaphore with maxConcurrentWritePerKey = {}", maxConcurrentWritePerKey); + LOG.info("Initializing semaphore with maxConcurrentWritePerKey = {}", maxConcurrentWritePerKey); if (maxConcurrentWritePerKey > 0) { requestSemaphore = new Semaphore(maxConcurrentWritePerKey); } else if (maxConcurrentWritePerKey == 0) { diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java index 8d9614b554a..16211ebbb8e 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java @@ -59,6 +59,7 @@ import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatusLight; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.S3SecretValue; import org.apache.hadoop.ozone.om.helpers.S3VolumeContext; import org.apache.hadoop.ozone.om.helpers.TenantStateList; @@ -513,6 +514,39 @@ List listKeys(String volumeName, String bucketName, String keyPrefix, String prevKey, int maxListResult) throws IOException; + /** + * List trash allows the user to list the keys that were marked as deleted, + * but not actually deleted by Ozone Manager. This allows a user to recover + * keys within a configurable window. + * @param volumeName - The volume name, which can also be a wild card + * using '*'. + * @param bucketName - The bucket name, which can also be a wild card + * using '*'. + * @param startKeyName - List keys from a specific key name. + * @param keyPrefix - List keys using a specific prefix. + * @param maxKeys - The number of keys to be returned. This must be below + * the cluster level set by admins. + * @return The list of keys that are deleted from the deleted table. + * @throws IOException + */ + List listTrash(String volumeName, String bucketName, + String startKeyName, String keyPrefix, + int maxKeys) + throws IOException; + + /** + * Recover trash allows the user to recover keys that were marked as deleted, + * but not actually deleted by Ozone Manager. + * @param volumeName - The volume name. + * @param bucketName - The bucket name. + * @param keyName - The key user want to recover. + * @param destinationBucket - The bucket user want to recover to. + * @return The result of recovering operation is success or not. + * @throws IOException + */ + boolean recoverTrash(String volumeName, String bucketName, String keyName, + String destinationBucket) throws IOException; + /** * Get OzoneKey. * @param volumeName Name of the Volume diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index fe986640176..bfeb9c1e6c1 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -34,7 +34,6 @@ import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.fs.FileEncryptionInfo; import org.apache.hadoop.fs.Syncable; -import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.util.Time; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.client.ECReplicationConfig; @@ -124,6 +123,7 @@ import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatusLight; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.S3SecretValue; import org.apache.hadoop.ozone.om.helpers.S3VolumeContext; import org.apache.hadoop.ozone.om.helpers.ServiceInfo; @@ -1771,6 +1771,25 @@ public List listKeys(String volumeName, String bucketName, } } + @Override + public List listTrash(String volumeName, String bucketName, + String startKeyName, String keyPrefix, int maxKeys) throws IOException { + + Preconditions.checkNotNull(volumeName); + Preconditions.checkNotNull(bucketName); + + return ozoneManagerClient.listTrash(volumeName, bucketName, startKeyName, + keyPrefix, maxKeys); + } + + @Override + public boolean recoverTrash(String volumeName, String bucketName, + String keyName, String destinationBucket) throws IOException { + + return ozoneManagerClient.recoverTrash(volumeName, bucketName, keyName, + destinationBucket); + } + @Override public OzoneKeyDetails getKeyDetails( String volumeName, String bucketName, String keyName) @@ -2165,6 +2184,8 @@ public OzoneFileStatus getOzoneFileStatus(String volumeName, @Override public void createDirectory(String volumeName, String bucketName, String keyName) throws IOException { + verifyVolumeName(volumeName); + verifyBucketName(bucketName); String ownerName = getRealUserInfo().getShortUserName(); OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName) .setBucketName(bucketName) @@ -2316,16 +2337,9 @@ public List listStatusLight(String volumeName, String bucketName, String keyName, boolean recursive, String startKey, long numEntries, boolean allowPartialPrefixes) throws IOException { OmKeyArgs keyArgs = prepareOmKeyArgs(volumeName, bucketName, keyName); - if (omVersion.compareTo(OzoneManagerVersion.LIGHTWEIGHT_LIST_STATUS) >= 0) { - return ozoneManagerClient.listStatusLight(keyArgs, recursive, startKey, - numEntries, allowPartialPrefixes); - } else { - return ozoneManagerClient.listStatus(keyArgs, recursive, startKey, - numEntries, allowPartialPrefixes) - .stream() - .map(OzoneFileStatusLight::fromOzoneFileStatus) - .collect(Collectors.toList()); - } + return ozoneManagerClient + .listStatusLight(keyArgs, recursive, startKey, numEntries, + allowPartialPrefixes); } /** @@ -2497,7 +2511,9 @@ private OzoneOutputStream createOutputStream(OpenKeySession openKey) private OzoneOutputStream createOutputStream(OpenKeySession openKey, KeyOutputStream keyOutputStream) throws IOException { - boolean enableHsync = OzoneFSUtils.canEnableHsync(conf, true); + boolean enableHsync = conf.getBoolean( + OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, + OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED_DEFAULT); keyOutputStream .addPreallocateBlocks(openKey.getKeyInfo().getLatestVersionLocations(), openKey.getOpenVersion()); @@ -2509,7 +2525,9 @@ private OzoneOutputStream createOutputStream(OpenKeySession openKey, private OzoneOutputStream createSecureOutputStream(OpenKeySession openKey, OutputStream keyOutputStream, Syncable syncable) throws IOException { - boolean enableHsync = OzoneFSUtils.canEnableHsync(conf, true); + boolean enableHsync = conf.getBoolean( + OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, + OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED_DEFAULT); final FileEncryptionInfo feInfo = openKey.getKeyInfo().getFileEncryptionInfo(); if (feInfo != null) { @@ -2590,27 +2608,17 @@ public OzoneFsServerDefaults getServerDefaults() throws IOException { long now = Time.monotonicNow(); if ((serverDefaults == null) || (now - serverDefaultsLastUpdate > serverDefaultsValidityPeriod)) { - try { - for (ServiceInfo si : ozoneManagerClient.getServiceInfo() - .getServiceInfoList()) { - if (si.getServerDefaults() != null) { - serverDefaults = si.getServerDefaults(); - serverDefaultsLastUpdate = now; - break; - } - } - } catch (Exception e) { - LOG.warn("Could not get server defaults from OM.", e); - } + serverDefaults = ozoneManagerClient.getServerDefaults(); + serverDefaultsLastUpdate = now; } + assert serverDefaults != null; return serverDefaults; } @Override public URI getKeyProviderUri() throws IOException { - String keyProviderUri = (getServerDefaults() != null) ? - serverDefaults.getKeyProviderUri() : null; - return OzoneKMSUtil.getKeyProviderUri(ugi, null, keyProviderUri, conf); + return OzoneKMSUtil.getKeyProviderUri(ugi, + null, getServerDefaults().getKeyProviderUri(), conf); } @Override diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestBlockOutputStreamIncrementalPutBlock.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestBlockOutputStreamIncrementalPutBlock.java index 361dcb1fd0a..5f2b80bdef6 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestBlockOutputStreamIncrementalPutBlock.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestBlockOutputStreamIncrementalPutBlock.java @@ -71,8 +71,6 @@ private void init(boolean incrementalChunkList) throws IOException { ((InMemoryConfiguration)config).setFromObject(clientConfig); - ((InMemoryConfiguration) config).setBoolean( - OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, true); ((InMemoryConfiguration) config).setBoolean( OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OFSPath.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OFSPath.java index c5985f82093..61ae0879f78 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OFSPath.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OFSPath.java @@ -214,7 +214,7 @@ public String toString() { } /** - * Get the volume and bucket or mount name (non-key path). + * Get the volume & bucket or mount name (non-key path). * @return String of path excluding key in bucket. */ // Prepend a delimiter at beginning. e.g. /vol1/buc1 diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java index d6320061253..11f176362a6 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java @@ -119,7 +119,7 @@ public static InetSocketAddress getOmAddress(ConfigurationSource conf) { * Return list of OM addresses by service ids - when HA is enabled. * * @param conf {@link ConfigurationSource} - * @return {service.id -> [{@link InetSocketAddress}]} + * @return {service.id -> [{@link InetSocketAddress}]} */ public static Map> getOmHAAddressesById( ConfigurationSource conf) { @@ -243,10 +243,6 @@ public static boolean isReadOnly( case ListKeys: case ListKeysLight: case ListTrash: - // ListTrash is deprecated by HDDS-11251. Keeping this in here - // As protobuf currently doesn't support deprecating enum fields - // TODO: Remove once migrated to proto3 and mark fields in proto - // as deprecated case ServiceList: case ListOpenFiles: case ListMultiPartUploadParts: @@ -278,8 +274,7 @@ public static boolean isReadOnly( case SetSafeMode: case PrintCompactionLogDag: case GetSnapshotInfo: - case GetQuotaRepairStatus: - case StartQuotaRepair: + case GetServerDefaults: return true; case CreateVolume: case SetVolumeProperty: @@ -309,10 +304,6 @@ public static boolean isReadOnly( case AddAcl: case PurgeKeys: case RecoverTrash: - // RecoverTrash is deprecated by HDDS-11251. Keeping this in here - // As protobuf currently doesn't support deprecating enum fields - // TODO: Remove once migrated to proto3 and mark fields in proto - // as deprecated case FinalizeUpgrade: case Prepare: case CancelPrepare: @@ -332,7 +323,6 @@ public static boolean isReadOnly( case DeleteSnapshot: case RenameSnapshot: case SnapshotMoveDeletedKeys: - case SnapshotMoveTableKeys: case SnapshotPurge: case RecoverLease: case SetTimes: @@ -707,7 +697,7 @@ public static void verifyKeyNameWithSnapshotReservedWordForDeletion(String keyNa * Look at 'ozone.om.internal.service.id' first. If configured, return that. * If the above is not configured, look at 'ozone.om.service.ids'. * If count(ozone.om.service.ids) == 1, return that id. - * If count(ozone.om.service.ids) > 1 throw exception + * If count(ozone.om.service.ids) > 1 throw exception * If 'ozone.om.service.ids' is not configured, return null. (Non HA) * @param conf configuration * @return OM service ID. diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/SelectorOutputStream.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/SelectorOutputStream.java index c7e20fb7e8b..8ffa3c45c09 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/SelectorOutputStream.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/SelectorOutputStream.java @@ -27,7 +27,7 @@ /** * An {@link OutputStream} first write data to a buffer up to the capacity. - * Then, select {@code Underlying} by the number of bytes written. + * Then, select {@link Underlying} by the number of bytes written. * When {@link #flush()}, {@link #hflush()}, {@link #hsync()} * or {@link #close()} is invoked, * it will force flushing the buffer and {@link OutputStream} selection. diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java index a77bc4f5304..0f3b55235be 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java @@ -73,9 +73,6 @@ private OMConfigKeys() { public static final String OZONE_OM_DECOMMISSIONED_NODES_KEY = "ozone.om.decommissioned.nodes"; - public static final String OZONE_OM_FEATURES_DISABLED = - "ozone.om.features.disabled"; - public static final String OZONE_OM_ADDRESS_KEY = "ozone.om.address"; public static final String OZONE_OM_BIND_HOST_DEFAULT = @@ -403,8 +400,6 @@ private OMConfigKeys() { /** * Configuration properties for Snapshot Directory Service. */ - public static final String OZONE_SNAPSHOT_DEEP_CLEANING_ENABLED = "ozone.snapshot.deep.cleaning.enabled"; - public static final boolean OZONE_SNAPSHOT_DEEP_CLEANING_ENABLED_DEFAULT = false; public static final String OZONE_SNAPSHOT_DIRECTORY_SERVICE_INTERVAL = "ozone.snapshot.directory.service.interval"; public static final String OZONE_SNAPSHOT_DIRECTORY_SERVICE_INTERVAL_DEFAULT diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManager.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManager.java index db00917dacc..ae238f1b45a 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManager.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManager.java @@ -80,7 +80,7 @@ T doUnderLock(String lockId, S3SecretFunction action) /** * Default implementation of secret check method. * @param kerberosId kerberos principal. - * @return true if exist associated s3 secret for given {@code kerberosId}, + * @return true if exist associated s3 secret for given {@param kerberosId}, * false if not. */ default boolean hasS3Secret(String kerberosId) throws IOException { diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMLeaderNotReadyException.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMLeaderNotReadyException.java index 8c3943d0fab..0bfd6922fee 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMLeaderNotReadyException.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMLeaderNotReadyException.java @@ -24,7 +24,7 @@ * Exception thrown by * {@link org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB} when * OM leader is not ready to serve requests. This error is thrown when Raft - * Server returns {@link org.apache.ratis.protocol.exceptions.LeaderNotReadyException}. + * Server returns {@link org.apache.ratis.protocol.LeaderNotReadyException}. */ public class OMLeaderNotReadyException extends IOException { diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java index 0507a27de61..f52a142239b 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java @@ -360,6 +360,7 @@ public synchronized void appendNewBlocks( * @param updateTime if true, updates modification time. * @param keepOldVersions if false, old blocks won't be kept * and the new block versions will always be 0 + * @throws IOException */ public synchronized long addNewVersion( List newLocationList, boolean updateTime, @@ -627,7 +628,7 @@ public OmKeyInfo build() { /** * For network transmit. - * @return KeyInfo + * @return */ public KeyInfo getProtobuf(int clientVersion) { return getProtobuf(false, clientVersion); @@ -659,7 +660,7 @@ public KeyInfo getNetworkProtobuf(String fullKeyName, int clientVersion, /** * * @param ignorePipeline true for persist to DB, false for network transmit. - * @return KeyInfo + * @return */ public KeyInfo getProtobuf(boolean ignorePipeline, int clientVersion) { return getProtobuf(ignorePipeline, null, clientVersion, false); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java index bf4ffa9d8de..74effbd80a3 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java @@ -18,15 +18,10 @@ package org.apache.hadoop.ozone.om.helpers; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdds.conf.ConfigurationSource; -import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; import jakarta.annotation.Nonnull; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.nio.file.Paths; import java.util.UUID; @@ -37,7 +32,6 @@ * Utility class for OzoneFileSystem. */ public final class OzoneFSUtils { - static final Logger LOG = LoggerFactory.getLogger(OzoneFSUtils.class); private OzoneFSUtils() { } @@ -298,31 +292,4 @@ public static Path trimPathToDepth(Path path, int maxDepth) { } return res; } - - /** - * Helper method to return whether Hsync can be enabled. - * And print warning when the config is ignored. - */ - public static boolean canEnableHsync(ConfigurationSource conf, boolean isClient) { - final String confKey = isClient ? - "ozone.client.hbase.enhancements.allowed" : - OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED; - - boolean confHBaseEnhancementsAllowed = conf.getBoolean( - confKey, OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED_DEFAULT); - - boolean confHsyncEnabled = conf.getBoolean( - OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED_DEFAULT); - - if (confHBaseEnhancementsAllowed) { - return confHsyncEnabled; - } else { - if (confHsyncEnabled) { - LOG.warn("Ignoring {} = {} because HBase enhancements are disallowed. To enable it, set {} = true as well.", - OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true, - confKey); - } - return false; - } - } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneIdentityProvider.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneIdentityProvider.java index ed3d3ee25c2..6bab1025b13 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneIdentityProvider.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneIdentityProvider.java @@ -21,6 +21,7 @@ import org.apache.hadoop.ipc.CallerContext; import org.apache.hadoop.ipc.IdentityProvider; import org.apache.hadoop.ipc.Schedulable; +import org.apache.hadoop.ipc.Server; import org.apache.hadoop.security.UserGroupInformation; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -42,7 +43,7 @@ public OzoneIdentityProvider() { } /** - * If schedulable isn't instance of {@link org.apache.hadoop.ipc.Server.Call}, + * If schedulable isn't instance of {@link Server.Call}, * then trying to access getCallerContext() method, will * result in an exception. * diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java index f1dd1e9eeba..24c172ef8fd 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java @@ -34,7 +34,7 @@ /** * Args for deleted keys. This is written to om metadata deletedTable. * Once a key is deleted, it is moved to om metadata deletedTable. Having a - * label: {@code List} ensures that if users create and delete keys with + * {label: List} ensures that if users create & delete keys with * exact same uri multiple times, all the delete instances are bundled under * the same key name. This is useful as part of GDPR compliance where an * admin wants to confirm if a given key is deleted from deletedTable metadata. @@ -110,7 +110,9 @@ public static RepeatedOmKeyInfo getFromProto(RepeatedKeyInfo } /** - * @param compact true for persistence, false for network transmit + * + * @param compact, true for persistence, false for network transmit + * @return */ public RepeatedKeyInfo getProto(boolean compact, int clientVersion) { List list = new ArrayList<>(); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java index 5dbe3487e19..c8bdbf43c42 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java @@ -25,7 +25,6 @@ import java.util.Map; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType; -import org.apache.hadoop.ozone.OzoneFsServerDefaults; import org.apache.hadoop.ozone.OzoneManagerVersion; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRoleInfo; @@ -60,7 +59,6 @@ public final class ServiceInfo { private Map ports; private OMRoleInfo omRoleInfo; - private OzoneFsServerDefaults serverDefaults; /** * Default constructor for JSON deserialization. @@ -78,24 +76,6 @@ private ServiceInfo(NodeType nodeType, List portList, OzoneManagerVersion omVersion, OMRoleInfo omRole) { - this(nodeType, hostname, portList, omVersion, omRole, null); - } - - /** - * Constructs the ServiceInfo for the {@code nodeType}. - * @param nodeType type of node/service - * @param hostname hostname of the service - * @param portList list of ports the service listens to - * @param omVersion Om Version - * @param omRole OM role Ino - * @param keyProviderUri KMS provider URI - */ - private ServiceInfo(NodeType nodeType, - String hostname, - List portList, - OzoneManagerVersion omVersion, - OMRoleInfo omRole, - OzoneFsServerDefaults serverDefaults) { Preconditions.checkNotNull(nodeType); Preconditions.checkNotNull(hostname); this.nodeType = nodeType; @@ -106,7 +86,6 @@ private ServiceInfo(NodeType nodeType, ports.put(port.getType(), port.getValue()); } this.omRoleInfo = omRole; - this.serverDefaults = serverDefaults; } /** @@ -164,15 +143,6 @@ public OMRoleInfo getOmRoleInfo() { return omRoleInfo; } - /** - * Returns the Ozone Server default configuration. - * @return OmRoleInfo - */ - @JsonIgnore - public OzoneFsServerDefaults getServerDefaults() { - return serverDefaults; - } - /** * Converts {@link ServiceInfo} to OzoneManagerProtocolProtos.ServiceInfo. * @@ -200,9 +170,6 @@ public OzoneManagerProtocolProtos.ServiceInfo getProtobuf() { if (nodeType == NodeType.OM && omRoleInfo != null) { builder.setOmRole(omRoleInfo); } - if (serverDefaults != null) { - builder.setServerDefaults(serverDefaults.getProtobuf()); - } return builder.build(); } @@ -218,9 +185,7 @@ public static ServiceInfo getFromProtobuf( serviceInfo.getHostname(), serviceInfo.getServicePortsList(), OzoneManagerVersion.fromProtoValue(serviceInfo.getOMVersion()), - serviceInfo.hasOmRole() ? serviceInfo.getOmRole() : null, - serviceInfo.hasServerDefaults() ? OzoneFsServerDefaults.getFromProtobuf( - serviceInfo.getServerDefaults()) : null); + serviceInfo.hasOmRole() ? serviceInfo.getOmRole() : null); } /** @@ -241,7 +206,6 @@ public static class Builder { private List portList = new ArrayList<>(); private OMRoleInfo omRoleInfo; private OzoneManagerVersion omVersion; - private OzoneFsServerDefaults serverDefaults; /** * Gets the Om Client Protocol Version. @@ -295,11 +259,6 @@ public Builder setOmRoleInfo(OMRoleInfo omRole) { return this; } - public Builder setServerDefaults(OzoneFsServerDefaults defaults) { - serverDefaults = defaults; - return this; - } - /** * Builds and returns {@link ServiceInfo} with the set values. * @return {@link ServiceInfo} @@ -309,8 +268,7 @@ public ServiceInfo build() { host, portList, omVersion, - omRoleInfo, - serverDefaults); + omRoleInfo); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java index 7feefdb0b22..47a48c37e8e 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java @@ -19,7 +19,6 @@ */ import com.google.common.annotations.VisibleForTesting; -import com.google.protobuf.ByteString; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.utils.db.Codec; import org.apache.hadoop.hdds.utils.db.CopyObject; @@ -52,7 +51,7 @@ * Each snapshot created has an associated SnapshotInfo entry * containing the snapshotId, snapshot path, * snapshot checkpoint directory, previous snapshotId - * for the snapshot path and global amongst other necessary fields. + * for the snapshot path & global amongst other necessary fields. */ public final class SnapshotInfo implements Auditable, CopyObject { private static final Codec CODEC = new DelegatedCodec<>( @@ -125,7 +124,6 @@ public static SnapshotStatus valueOf(SnapshotStatusProto status) { private long exclusiveSize; private long exclusiveReplicatedSize; private boolean deepCleanedDeletedDir; - private ByteString lastTransactionInfo; private SnapshotInfo(Builder b) { this.snapshotId = b.snapshotId; @@ -147,7 +145,6 @@ private SnapshotInfo(Builder b) { this.exclusiveSize = b.exclusiveSize; this.exclusiveReplicatedSize = b.exclusiveReplicatedSize; this.deepCleanedDeletedDir = b.deepCleanedDeletedDir; - this.lastTransactionInfo = b.lastTransactionInfo; } public void setName(String name) { @@ -264,15 +261,13 @@ public SnapshotInfo.Builder toBuilder() { .setGlobalPreviousSnapshotId(globalPreviousSnapshotId) .setSnapshotPath(snapshotPath) .setCheckpointDir(checkpointDir) - .setDbTxSequenceNumber(dbTxSequenceNumber) .setDeepClean(deepClean) .setSstFiltered(sstFiltered) .setReferencedSize(referencedSize) .setReferencedReplicatedSize(referencedReplicatedSize) .setExclusiveSize(exclusiveSize) .setExclusiveReplicatedSize(exclusiveReplicatedSize) - .setDeepCleanedDeletedDir(deepCleanedDeletedDir) - .setLastTransactionInfo(lastTransactionInfo); + .setDeepCleanedDeletedDir(deepCleanedDeletedDir); } /** @@ -298,7 +293,6 @@ public static class Builder { private long exclusiveSize; private long exclusiveReplicatedSize; private boolean deepCleanedDeletedDir; - private ByteString lastTransactionInfo; public Builder() { // default values @@ -417,11 +411,6 @@ public Builder setDeepCleanedDeletedDir(boolean deepCleanedDeletedDir) { return this; } - public Builder setLastTransactionInfo(ByteString lastTransactionInfo) { - this.lastTransactionInfo = lastTransactionInfo; - return this; - } - public SnapshotInfo build() { Preconditions.checkNotNull(name); return new SnapshotInfo(this); @@ -456,10 +445,6 @@ public OzoneManagerProtocolProtos.SnapshotInfo getProtobuf() { sib.setGlobalPreviousSnapshotID(toProtobuf(globalPreviousSnapshotId)); } - if (lastTransactionInfo != null) { - sib.setLastTransactionInfo(lastTransactionInfo); - } - sib.setSnapshotPath(snapshotPath) .setCheckpointDir(checkpointDir) .setDbTxSequenceNumber(dbTxSequenceNumber) @@ -528,10 +513,6 @@ public static SnapshotInfo getFromProtobuf( snapshotInfoProto.getDeepCleanedDeletedDir()); } - if (snapshotInfoProto.hasLastTransactionInfo()) { - osib.setLastTransactionInfo(snapshotInfoProto.getLastTransactionInfo()); - } - osib.setSnapshotPath(snapshotInfoProto.getSnapshotPath()) .setCheckpointDir(snapshotInfoProto.getCheckpointDir()) .setDbTxSequenceNumber(snapshotInfoProto.getDbTxSequenceNumber()); @@ -624,14 +605,6 @@ public void setDeepCleanedDeletedDir(boolean deepCleanedDeletedDir) { this.deepCleanedDeletedDir = deepCleanedDeletedDir; } - public ByteString getLastTransactionInfo() { - return lastTransactionInfo; - } - - public void setLastTransactionInfo(ByteString lastTransactionInfo) { - this.lastTransactionInfo = lastTransactionInfo; - } - /** * Generate default name of snapshot, (used if user doesn't provide one). */ @@ -700,8 +673,7 @@ public boolean equals(Object o) { referencedReplicatedSize == that.referencedReplicatedSize && exclusiveSize == that.exclusiveSize && exclusiveReplicatedSize == that.exclusiveReplicatedSize && - deepCleanedDeletedDir == that.deepCleanedDeletedDir && - Objects.equals(lastTransactionInfo, that.lastTransactionInfo); + deepCleanedDeletedDir == that.deepCleanedDeletedDir; } @Override @@ -712,7 +684,7 @@ public int hashCode() { globalPreviousSnapshotId, snapshotPath, checkpointDir, deepClean, sstFiltered, referencedSize, referencedReplicatedSize, - exclusiveSize, exclusiveReplicatedSize, deepCleanedDeletedDir, lastTransactionInfo); + exclusiveSize, exclusiveReplicatedSize, deepCleanedDeletedDir); } /** @@ -720,7 +692,27 @@ public int hashCode() { */ @Override public SnapshotInfo copyObject() { - return this.toBuilder().build(); + return new Builder() + .setSnapshotId(snapshotId) + .setName(name) + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setSnapshotStatus(snapshotStatus) + .setCreationTime(creationTime) + .setDeletionTime(deletionTime) + .setPathPreviousSnapshotId(pathPreviousSnapshotId) + .setGlobalPreviousSnapshotId(globalPreviousSnapshotId) + .setSnapshotPath(snapshotPath) + .setCheckpointDir(checkpointDir) + .setDbTxSequenceNumber(dbTxSequenceNumber) + .setDeepClean(deepClean) + .setSstFiltered(sstFiltered) + .setReferencedSize(referencedSize) + .setReferencedReplicatedSize(referencedReplicatedSize) + .setExclusiveSize(exclusiveSize) + .setExclusiveReplicatedSize(exclusiveReplicatedSize) + .setDeepCleanedDeletedDir(deepCleanedDeletedDir) + .build(); } @Override @@ -745,7 +737,6 @@ public String toString() { ", exclusiveSize: '" + exclusiveSize + '\'' + ", exclusiveReplicatedSize: '" + exclusiveReplicatedSize + '\'' + ", deepCleanedDeletedDir: '" + deepCleanedDeletedDir + '\'' + - ", lastTransactionInfo: '" + lastTransactionInfo + '\'' + '}'; } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/AccountNameSpace.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/AccountNameSpace.java index a715bfbc153..753d528cb05 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/AccountNameSpace.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/AccountNameSpace.java @@ -57,7 +57,7 @@ public interface AccountNameSpace { * Get Space Usage Information for this AccountNameSpace. This can be * used for billing purpose. Such Aggregation can also be done lazily * by a Recon job. Implementations can decide. - * @return SpaceUsage + * @return */ SpaceUsageSource getSpaceUsage(); @@ -71,7 +71,7 @@ public interface AccountNameSpace { /** * Get Quota Information for this AccountNameSpace. - * @return OzoneQuota + * @return */ OzoneQuota getQuota(); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/BucketNameSpace.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/BucketNameSpace.java index d5ecf7bba80..1481f1b466b 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/BucketNameSpace.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/BucketNameSpace.java @@ -74,7 +74,7 @@ public interface BucketNameSpace { * Get Space Usage Information for this BucketNameSpace. This can be * used for billing purpose. Such Aggregation can also be done lazily * by a Recon job. Implementations can decide. - * @return SpaceUsageSource + * @return */ SpaceUsageSource getSpaceUsage(); @@ -88,7 +88,7 @@ public interface BucketNameSpace { /** * Get Quota Information for this BucketNameSpace. - * @return OzoneQuota + * @return */ OzoneQuota getQuota(); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java index 94822630f8e..45922c107cb 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java @@ -28,6 +28,7 @@ import org.apache.hadoop.fs.SafeModeAction; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.OzoneFsServerDefaults; import org.apache.hadoop.ozone.om.IOmMetadataReader; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.exceptions.OMException; @@ -55,6 +56,7 @@ import org.apache.hadoop.ozone.om.helpers.OpenKeySession; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatusLight; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.S3SecretValue; import org.apache.hadoop.ozone.om.helpers.S3VolumeContext; import org.apache.hadoop.ozone.om.helpers.ServiceInfo; @@ -1053,6 +1055,39 @@ DBUpdates getDBUpdates( OzoneManagerProtocolProtos.DBUpdatesRequest dbUpdatesRequest) throws IOException; + /** + * List trash allows the user to list the keys that were marked as deleted, + * but not actually deleted by Ozone Manager. This allows a user to recover + * keys within a configurable window. + * @param volumeName - The volume name, which can also be a wild card + * using '*'. + * @param bucketName - The bucket name, which can also be a wild card + * using '*'. + * @param startKeyName - List keys from a specific key name. + * @param keyPrefix - List keys using a specific prefix. + * @param maxKeys - The number of keys to be returned. This must be below + * the cluster level set by admins. + * @return The list of keys that are deleted from the deleted table. + * @throws IOException + */ + List listTrash(String volumeName, String bucketName, + String startKeyName, String keyPrefix, int maxKeys) throws IOException; + + /** + * Recover trash allows the user to recover keys that were marked as deleted, + * but not actually deleted by Ozone Manager. + * @param volumeName - The volume name. + * @param bucketName - The bucket name. + * @param keyName - The key user want to recover. + * @param destinationBucket - The bucket user want to recover to. + * @return The result of recovering operation is success or not. + * @throws IOException + */ + default boolean recoverTrash(String volumeName, String bucketName, + String keyName, String destinationBucket) throws IOException { + return false; + } + /** * * @param txnApplyWaitTimeoutSeconds Max time in SECONDS to wait for all @@ -1061,7 +1096,7 @@ DBUpdates getDBUpdates( * @param txnApplyCheckIntervalSeconds Time in SECONDS to wait between * successive checks for all transactions * to be applied to the OM DB. - * @return {@code long} + * @return */ default long prepareOzoneManager( long txnApplyWaitTimeoutSeconds, long txnApplyCheckIntervalSeconds) @@ -1146,15 +1181,10 @@ boolean setSafeMode(SafeModeAction action, boolean isChecked) throws IOException; /** - * Get status of last triggered quota repair in OM. - * @return String - * @throws IOException - */ - String getQuotaRepairStatus() throws IOException; - - /** - * start quota repair in OM. + * Get server default configurations. + * + * @return OzoneFsServerDefaults some default configurations from server. * @throws IOException */ - void startQuotaRepair(List buckets) throws IOException; + OzoneFsServerDefaults getServerDefaults() throws IOException; } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java index b140cf95e69..f70beed5f25 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java @@ -41,6 +41,7 @@ import org.apache.hadoop.ipc.CallerContext; import org.apache.hadoop.ozone.ClientVersion; import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.OzoneFsServerDefaults; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BasicOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.ErrorInfo; @@ -71,6 +72,7 @@ import org.apache.hadoop.ozone.om.helpers.OpenKeySession; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatusLight; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.S3SecretValue; import org.apache.hadoop.ozone.om.helpers.S3VolumeContext; import org.apache.hadoop.ozone.om.helpers.ServiceInfo; @@ -148,6 +150,8 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListStatusResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListTenantRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListTenantResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListTrashRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListTrashResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListVolumeRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListVolumeResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.LookupFileRequest; @@ -178,6 +182,8 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RangerBGSyncResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RecoverLeaseRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RecoverLeaseResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RecoverTrashRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RecoverTrashResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RefetchSecretKeyRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RefetchSecretKeyResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RemoveAclRequest; @@ -192,6 +198,8 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3Authentication; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3Secret; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SafeMode; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServerDefaultsRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServerDefaultsResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServiceListRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServiceListResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetAclRequest; @@ -2114,8 +2122,12 @@ public OzoneFileStatus getFileStatus(OmKeyArgs args) throws IOException { .setGetFileStatusRequest(req) .build(); - final GetFileStatusResponse resp = handleError(submitRequest(omRequest)) - .getGetFileStatusResponse(); + final GetFileStatusResponse resp; + try { + resp = handleError(submitRequest(omRequest)).getGetFileStatusResponse(); + } catch (IOException e) { + throw e; + } return OzoneFileStatus.getFromProtobuf(resp.getStatus()); } @@ -2430,6 +2442,85 @@ public List listStatus(OmKeyArgs args, boolean recursive, return listStatus(args, recursive, startKey, numEntries, false); } + @Override + public List listTrash(String volumeName, + String bucketName, String startKeyName, String keyPrefix, int maxKeys) + throws IOException { + + Preconditions.checkArgument(Strings.isNullOrEmpty(volumeName), + "The volume name cannot be null or " + + "empty. Please enter a valid volume name or use '*' as a wild card"); + + Preconditions.checkArgument(Strings.isNullOrEmpty(bucketName), + "The bucket name cannot be null or " + + "empty. Please enter a valid bucket name or use '*' as a wild card"); + + ListTrashRequest trashRequest = ListTrashRequest.newBuilder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setStartKeyName(startKeyName) + .setKeyPrefix(keyPrefix) + .setMaxKeys(maxKeys) + .build(); + + OMRequest omRequest = createOMRequest(Type.ListTrash) + .setListTrashRequest(trashRequest) + .build(); + + ListTrashResponse trashResponse = + handleError(submitRequest(omRequest)).getListTrashResponse(); + + List deletedKeyList = + new ArrayList<>(trashResponse.getDeletedKeysCount()); + + List list = new ArrayList<>(); + for (OzoneManagerProtocolProtos.RepeatedKeyInfo + repeatedKeyInfo : trashResponse.getDeletedKeysList()) { + RepeatedOmKeyInfo fromProto = + RepeatedOmKeyInfo.getFromProto(repeatedKeyInfo); + list.add(fromProto); + } + deletedKeyList.addAll(list); + + return deletedKeyList; + } + + @Override + public boolean recoverTrash(String volumeName, String bucketName, + String keyName, String destinationBucket) throws IOException { + + Preconditions.checkArgument(Strings.isNullOrEmpty(volumeName), + "The volume name cannot be null or empty. " + + "Please enter a valid volume name."); + + Preconditions.checkArgument(Strings.isNullOrEmpty(bucketName), + "The bucket name cannot be null or empty. " + + "Please enter a valid bucket name."); + + Preconditions.checkArgument(Strings.isNullOrEmpty(keyName), + "The key name cannot be null or empty. " + + "Please enter a valid key name."); + + Preconditions.checkArgument(Strings.isNullOrEmpty(destinationBucket), + "The destination bucket name cannot be null or empty. " + + "Please enter a valid destination bucket name."); + + RecoverTrashRequest.Builder req = RecoverTrashRequest.newBuilder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setKeyName(keyName) + .setDestinationBucket(destinationBucket); + + OMRequest omRequest = createOMRequest(Type.RecoverTrash) + .setRecoverTrashRequest(req) + .build(); + + RecoverTrashResponse recoverResponse = + handleError(submitRequest(omRequest)).getRecoverTrashResponse(); + + return recoverResponse.getResponse(); + } + @Override public long prepareOzoneManager( long txnApplyWaitTimeoutSeconds, long txnApplyCheckIntervalSeconds) @@ -2557,27 +2648,19 @@ public boolean setSafeMode(SafeModeAction action, boolean isChecked) } @Override - public String getQuotaRepairStatus() throws IOException { - OzoneManagerProtocolProtos.GetQuotaRepairStatusRequest quotaRepairStatusRequest = - OzoneManagerProtocolProtos.GetQuotaRepairStatusRequest.newBuilder() - .build(); + public OzoneFsServerDefaults getServerDefaults() + throws IOException { + ServerDefaultsRequest serverDefaultsRequest = + ServerDefaultsRequest.newBuilder().build(); - OMRequest omRequest = createOMRequest(Type.GetQuotaRepairStatus) - .setGetQuotaRepairStatusRequest(quotaRepairStatusRequest).build(); + OMRequest omRequest = createOMRequest(Type.GetServerDefaults) + .setServerDefaultsRequest(serverDefaultsRequest).build(); - OzoneManagerProtocolProtos.GetQuotaRepairStatusResponse quotaRepairStatusResponse - = handleError(submitRequest(omRequest)).getGetQuotaRepairStatusResponse(); - return quotaRepairStatusResponse.getStatus(); - } + ServerDefaultsResponse serverDefaultsResponse = + handleError(submitRequest(omRequest)).getServerDefaultsResponse(); - @Override - public void startQuotaRepair(List buckets) throws IOException { - OzoneManagerProtocolProtos.StartQuotaRepairRequest startQuotaRepairRequest = - OzoneManagerProtocolProtos.StartQuotaRepairRequest.newBuilder() - .build(); - OMRequest omRequest = createOMRequest(Type.StartQuotaRepair) - .setStartQuotaRepairRequest(startQuotaRepairRequest).build(); - handleError(submitRequest(omRequest)); + return OzoneFsServerDefaults.getFromProtobuf( + serverDefaultsResponse.getServerDefaults()); } private SafeMode toProtoBuf(SafeModeAction action) { diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java index e28c9477f29..ccb2080a875 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java @@ -242,13 +242,11 @@ public static MD5MD5Crc32FileChecksumProto convert( DataOutputBuffer buf = new DataOutputBuffer(); checksum.write(buf); byte[] bytes = buf.getData(); - int bytesPerCRC; - long crcPerBlock; - try (DataInputBuffer buffer = new DataInputBuffer()) { - buffer.reset(bytes, 0, bytes.length); - bytesPerCRC = buffer.readInt(); - crcPerBlock = buffer.readLong(); - } + DataInputBuffer buffer = new DataInputBuffer(); + buffer.reset(bytes, 0, bytes.length); + int bytesPerCRC = buffer.readInt(); + long crcPerBlock = buffer.readLong(); + buffer.close(); int offset = Integer.BYTES + Long.BYTES; ByteString byteString = ByteString.copyFrom( diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAccessAuthorizer.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAccessAuthorizer.java index abd4cd6f6d2..1f105a03ad4 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAccessAuthorizer.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAccessAuthorizer.java @@ -1,4 +1,4 @@ -/* +/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with this * work for additional information regarding copyright ownership. The ASF @@ -19,7 +19,7 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; /** - * No-op implementation for {@link IAccessAuthorizer}, allows everything. + * Default implementation for {@link IAccessAuthorizer}. * */ public class OzoneAccessAuthorizer implements IAccessAuthorizer { @@ -35,9 +35,4 @@ public boolean checkAccess(IOzoneObj ozoneObject, RequestContext context) throws OMException { return true; } - - @Override - public boolean isNative() { - return true; - } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java index e1f1f3a8c1e..ca32c96855d 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java @@ -24,7 +24,7 @@ /** * Class representing an ozone object. - * It can be a volume with non-null volumeName {@literal (bucketName=null & name=null)} + * It can be a volume with non-null volumeName (bucketName=null & name=null) * or a bucket with non-null volumeName and bucketName (name=null) * or a key with non-null volumeName, bucketName and key name * (via getKeyName) diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneFsUtils.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneFsUtils.java index f8363af3751..84ad208cf93 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneFsUtils.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneFsUtils.java @@ -18,13 +18,8 @@ package org.apache.hadoop.ozone.om.helpers; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.OzoneConfigKeys; import org.junit.jupiter.api.Test; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.CsvSource; -import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -43,29 +38,4 @@ public void testPaths() { assertFalse(OzoneFSUtils.isValidName("/a:/b")); assertFalse(OzoneFSUtils.isValidName("/a//b")); } - - /** - * In these scenarios below, OzoneFSUtils.canEnableHsync() should return false: - * 1. ozone.hbase.enhancements.allowed = false, ozone.fs.hsync.enabled = false - * 2. ozone.hbase.enhancements.allowed = false, ozone.fs.hsync.enabled = true - * 3. ozone.hbase.enhancements.allowed = true, ozone.fs.hsync.enabled = false - *

    - * The only case where OzoneFSUtils.canEnableHsync() would return true: - * 4. ozone.hbase.enhancements.allowed = true, ozone.fs.hsync.enabled = true - */ - @ParameterizedTest - @CsvSource({"false,false,false,false", "false,false,true,false", "false,true,false,false", "true,true,true,false", - "false,false,false,true", "false,false,true,true", "false,true,false,true", "true,true,true,true"}) - void testCanEnableHsync(boolean canEnableHsync, - boolean hbaseEnhancementsEnabled, boolean fsHsyncEnabled, - boolean isClient) { - OzoneConfiguration conf = new OzoneConfiguration(); - final String confKey = isClient ? - "ozone.client.hbase.enhancements.allowed" : - OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED; - conf.setBoolean(confKey, hbaseEnhancementsEnabled); - conf.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, fsHsyncEnabled); - - assertEquals(canEnableHsync, OzoneFSUtils.canEnableHsync(conf, isClient)); - } } diff --git a/hadoop-ozone/dist/src/main/assemblies/ozone-src.xml b/hadoop-ozone/dist/src/main/assemblies/ozone-src.xml index 3450b387393..3c97d3add76 100644 --- a/hadoop-ozone/dist/src/main/assemblies/ozone-src.xml +++ b/hadoop-ozone/dist/src/main/assemblies/ozone-src.xml @@ -67,13 +67,6 @@ dev-support true - - **/.classpath - **/.project - **/.settings - **/*.iml - **/target/** - hadoop-hdds diff --git a/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - ReadKey Metrics.json b/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - ReadKey Metrics.json index 72325cba080..827e2f04e10 100644 --- a/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - ReadKey Metrics.json +++ b/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - ReadKey Metrics.json @@ -1,34 +1,4 @@ { - "__inputs": [ - { - "label": "prometheus", - "description": "", - "type": "datasource", - "pluginId": "prometheus", - "pluginName": "Prometheus" - } - ], - "__elements": {}, - "__requires": [ - { - "type": "grafana", - "id": "grafana", - "name": "Grafana", - "version": "11.1.3" - }, - { - "type": "datasource", - "id": "prometheus", - "name": "Prometheus", - "version": "1.0.0" - }, - { - "type": "panel", - "id": "timeseries", - "name": "Time series", - "version": "" - } - ], "annotations": { "list": [ { @@ -50,1763 +20,1284 @@ "liveNow": false, "panels": [ { - "collapsed": false, + "collapsed": true, "gridPos": { "h": 1, "w": 24, "x": 0, "y": 0 }, - "id": 2, - "panels": [], - "title": "OM API Metrics", - "type": "row" - }, - { - "datasource": { - "type": "prometheus" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 1 - }, - "id": 49, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ + "id": 19, + "panels": [ { "datasource": { "type": "prometheus" }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rate(om_performance_metrics_get_key_info_acl_check_latency_ns_num_ops[1m])", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{hostname}}", - "range": true, - "refId": "A", - "useBackend": false - } - ], - "title": "Rate of Key Reads ", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "no. of keys", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] } - ] + }, + "overrides": [] }, - "unit": "ns" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 1 - }, - "id": 48, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "timezone": [ - "browser" - ], - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus" + "gridPos": { + "h": 9, + "w": 8, + "x": 0, + "y": 1 }, - "editorMode": "builder", - "expr": "om_performance_metrics_get_key_info_read_key_info_latency_ns_avg_time", - "instant": false, - "legendFormat": "{{hostname}}", - "range": true, - "refId": "A" - } - ], - "title": "Read Key Info Latency", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" + "id": 8, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" } }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 9 - }, - "id": 53, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true + "targets": [ + { + "datasource": { + "type": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rocksdb_om_db_estimate_num_keys", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}, {{hostname}}", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rocksdb_om_db_filetable_estimate_num_keys", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}, {{hostname}}", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rocksdb_om_db_keytable_estimate_num_keys", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}, {{hostname}}", + "range": true, + "refId": "C", + "useBackend": false + } + ], + "title": "Rocksdb metrics (no. of keys)", + "type": "timeseries" }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ { "datasource": { "type": "prometheus" }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rate(om_performance_metrics_get_key_info_acl_check_latency_ns_num_ops[1m])", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{hostname}}", - "range": true, - "refId": "A", - "useBackend": false - } - ], - "title": "Rate of Get Key Info ACL Checks", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "cache used", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] } - ] + }, + "overrides": [] }, - "unit": "ns" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 9 - }, - "id": 52, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus" + "gridPos": { + "h": 9, + "w": 8, + "x": 8, + "y": 1 }, - "editorMode": "code", - "expr": "om_performance_metrics_get_key_info_acl_check_latency_ns_avg_time", - "instant": false, - "legendFormat": "{{hostname}}", - "range": true, - "refId": "A" - } - ], - "title": "Get Key Info ACL check latency", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" + "id": 7, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" } }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 17 - }, - "id": 51, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rate(om_performance_metrics_check_access_latency_ns_num_ops[1m])", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{hostname}}", - "range": true, - "refId": "A", - "useBackend": false - } - ], - "title": "Rate of Check Access", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" + "targets": [ + { + "datasource": { + "type": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rocksdb_om_db_block_cache_usage", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}, {{hostname}}", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rocksdb_om_db_filetable_block_cache_usage", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}, {{hostname}}", + "range": true, + "refId": "B", + "useBackend": false } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "ns" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 17 - }, - "id": 50, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true + ], + "title": "Rocksdb block cache usage metrics", + "type": "timeseries" }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ { "datasource": { "type": "prometheus" }, - "editorMode": "code", - "expr": "om_performance_metrics_check_access_latency_ns_avg_time", - "instant": false, - "legendFormat": "{{hostname}}", - "range": true, - "refId": "A" - } - ], - "title": "OM Check Access Latency", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "no. of files", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] } - ] + }, + "overrides": [] }, - "unit": "ns" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 25 - }, - "id": 55, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus" + "gridPos": { + "h": 9, + "w": 8, + "x": 16, + "y": 1 }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rate(om_performance_metrics_get_key_info_resolve_bucket_latency_ns_num_ops[1m])", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{hostname}}", - "range": true, - "refId": "A", - "useBackend": false - } - ], - "title": "Resolve Bucket Latency rate", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" + "id": 13, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" } }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "ns" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 25 - }, - "id": 54, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true + "targets": [ + { + "datasource": { + "type": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rocksdb_om_db_filetable_num_files_at_level0", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}, {{hostname}}", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rocksdb_om_db_keytable_num_files_at_level0", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}, {{hostname}}", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rocksdb_om_db_s3secrettable_num_files_at_level0", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}, {{hostname}}", + "range": true, + "refId": "C", + "useBackend": false + } + ], + "title": "Rocksdb level0 metrics (num files)", + "type": "timeseries" }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ { "datasource": { "type": "prometheus" }, - "editorMode": "code", - "expr": "om_performance_metrics_get_key_info_resolve_bucket_latency_ns_avg_time", - "instant": false, - "legendFormat": "{{hostname}}", - "range": true, - "refId": "A" - } - ], - "title": "Resolve Bucket Latency for Get Key Info", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "no. of keys", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] } - ] + }, + "overrides": [] }, - "unit": "ns" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 33 - }, - "id": 56, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true + "gridPos": { + "h": 9, + "w": 8, + "x": 0, + "y": 10 + }, + "id": 6, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "rdb_metrics_num_db_key_get_if_exist_checks{instance=~\".*:9875\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}, {{hostname}}", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "rdb_metrics_num_db_key_get_if_exist_gets{instance=~\".*:9875\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}, {{hostname}}", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "Rocksdb no. of db key metrics", + "type": "timeseries" }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ { "datasource": { "type": "prometheus" }, - "editorMode": "code", - "expr": "{__name__=~\"om_lock.*avg.*\"}", - "instant": false, - "legendFormat": "{{__name__}}, {{hostname}}", - "range": true, - "refId": "A" - } - ], - "title": "OM Locking Metrics", - "type": "timeseries" - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 41 - }, - "id": 19, - "panels": [], - "title": "OM Rocksdb Metrics", - "type": "row" - }, - { - "datasource": { - "type": "prometheus" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "no. of keys", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 8, + "x": 8, + "y": 10 + }, + "id": 10, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rocksdb_om_db_filetable_cur_size_active_mem_table", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}, {{hostname}}", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rocksdb_om_db_filetable_cur_size_all_mem_tables", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}, {{hostname}}", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rocksdb_om_db_filetable_size_all_mem_tables", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}, {{hostname}}", + "range": true, + "refId": "C", + "useBackend": false + } + ], + "title": "Rocksdb mem table metrics (size)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 8, + "x": 16, + "y": 10 + }, + "id": 11, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rocksdb_om_db_estimate_table_readers_mem", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}, {{hostname}}", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rocksdb_om_db_filetable_estimate_table_readers_mem", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}, {{hostname}}", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rocksdb_om_db_keytable_estimate_table_readers_mem", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}, {{hostname}}", + "range": true, + "refId": "C", + "useBackend": false } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 9, - "w": 8, - "x": 0, - "y": 42 - }, - "id": 8, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rocksdb_om_db_estimate_num_keys", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{__name__}}, {{hostname}}", - "range": true, - "refId": "A", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rocksdb_om_db_filetable_estimate_num_keys", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{__name__}}, {{hostname}}", - "range": true, - "refId": "B", - "useBackend": false + ], + "title": "Rocksdb om db table readers mem metrics", + "type": "timeseries" }, { "datasource": { "type": "prometheus" }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rocksdb_om_db_keytable_estimate_num_keys", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{__name__}}, {{hostname}}", - "range": true, - "refId": "C", - "useBackend": false - } - ], - "title": "Rocksdb metrics (no. of keys)", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "cache used", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 9, - "w": 8, - "x": 8, - "y": 42 - }, - "id": 7, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus" + }, + "overrides": [] }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rocksdb_om_db_block_cache_usage", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{__name__}}, {{hostname}}", - "range": true, - "refId": "A", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus" + "gridPos": { + "h": 9, + "w": 8, + "x": 0, + "y": 19 }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rocksdb_om_db_filetable_block_cache_usage", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{__name__}}, {{hostname}}", - "range": true, - "refId": "B", - "useBackend": false - } - ], - "title": "Rocksdb block cache usage metrics", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "no. of files", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" + "id": 12, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" } }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 9, - "w": 8, - "x": 16, - "y": 42 - }, - "id": 13, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rocksdb_om_db_filetable_num_files_at_level0", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{__name__}}, {{hostname}}", - "range": true, - "refId": "A", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rocksdb_om_db_keytable_num_files_at_level0", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{__name__}}, {{hostname}}", - "range": true, - "refId": "B", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rocksdb_om_db_s3secrettable_num_files_at_level0", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{__name__}}, {{hostname}}", - "range": true, - "refId": "C", - "useBackend": false + "targets": [ + { + "datasource": { + "type": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rocksdb_om_db_filetable_live_sst_files_size", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}, {{hostname}}", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rocksdb_om_db_keytable_live_sst_files_size", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}, {{hostname}}", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rocksdb_om_db_s3secrettable_live_sst_files_size", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}, {{hostname}}", + "range": true, + "refId": "C", + "useBackend": false + } + ], + "title": "Rocksdb live sst file size metrics", + "type": "timeseries" } ], - "title": "Rocksdb level0 metrics (num files)", - "type": "timeseries" + "title": "OM Rocksdb Metrics", + "type": "row" }, { - "datasource": { - "type": "prometheus" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "no. of keys", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, + "collapsed": true, "gridPos": { - "h": 9, - "w": 8, + "h": 1, + "w": 24, "x": 0, - "y": 51 - }, - "id": 6, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } + "y": 1 }, - "targets": [ + "id": 20, + "panels": [ { "datasource": { "type": "prometheus" }, - "disableTextWrap": false, - "editorMode": "code", - "expr": "rdb_metrics_num_db_key_get_if_exist_checks{instance=~\".*:9875\"}", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{__name__}}, {{hostname}}", - "range": true, - "refId": "A", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus" + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "no. of ops", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 0, + "y": 2 }, - "disableTextWrap": false, - "editorMode": "code", - "expr": "rdb_metrics_num_db_key_get_if_exist_gets{instance=~\".*:9875\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{__name__}}, {{hostname}}", - "range": true, - "refId": "B", - "useBackend": false - } - ], - "title": "Rocksdb no. of db key metrics", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" + "id": 16, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" } }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 9, - "w": 8, - "x": 8, - "y": 51 - }, - "id": 10, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true + "targets": [ + { + "datasource": { + "type": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "ugi_metrics_get_groups_num_ops{servername=\"ozoneManager\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}, {{hostname}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Ugi Metrics (no. of ops)", + "type": "timeseries" }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ { "datasource": { "type": "prometheus" }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rocksdb_om_db_filetable_cur_size_active_mem_table", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{__name__}}, {{hostname}}", - "range": true, - "refId": "A", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus" + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "time (ns)", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "ns" + }, + "overrides": [] }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rocksdb_om_db_filetable_cur_size_all_mem_tables", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{__name__}}, {{hostname}}", - "range": true, - "refId": "B", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus" + "gridPos": { + "h": 8, + "w": 8, + "x": 8, + "y": 2 + }, + "id": 15, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rocksdb_om_db_filetable_size_all_mem_tables", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{__name__}}, {{hostname}}", - "range": true, - "refId": "C", - "useBackend": false + "targets": [ + { + "datasource": { + "type": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "ugi_metrics_get_groups_avg_time{servername=\"ozoneManager\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}, {{hostname}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Ugi Metrics (avg. time)", + "type": "timeseries" } ], - "title": "Rocksdb mem table metrics (size)", - "type": "timeseries" + "title": "OM Ugi Metrics", + "type": "row" }, { - "datasource": { - "type": "prometheus" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, + "collapsed": true, "gridPos": { - "h": 9, - "w": 8, - "x": 16, - "y": 51 - }, - "id": 11, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } + "h": 1, + "w": 24, + "x": 0, + "y": 2 }, - "targets": [ + "id": 2, + "panels": [ { "datasource": { "type": "prometheus" }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rocksdb_om_db_estimate_table_readers_mem", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{__name__}}, {{hostname}}", - "range": true, - "refId": "A", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus" + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "no of keys", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rocksdb_om_db_filetable_estimate_table_readers_mem", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{__name__}}, {{hostname}}", - "range": true, - "refId": "B", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus" + "gridPos": { + "h": 8, + "w": 8, + "x": 0, + "y": 163 }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rocksdb_om_db_keytable_estimate_table_readers_mem", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{__name__}}, {{hostname}}", - "range": true, - "refId": "C", - "useBackend": false - } - ], - "title": "Rocksdb om db table readers mem metrics", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" + "id": 4, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "om_metrics_num_keys", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}, {{hostname}}", + "range": true, + "refId": "A", + "useBackend": false } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 9, - "w": 8, - "x": 0, - "y": 60 - }, - "id": 12, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true + ], + "title": "OM num key metrics", + "type": "timeseries" }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ { "datasource": { "type": "prometheus" }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rocksdb_om_db_filetable_live_sst_files_size", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{__name__}}, {{hostname}}", - "range": true, - "refId": "A", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus" + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "no. of ops", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rocksdb_om_db_keytable_live_sst_files_size", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{__name__}}, {{hostname}}", - "range": true, - "refId": "B", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus" + "gridPos": { + "h": 8, + "w": 8, + "x": 8, + "y": 163 + }, + "id": 5, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rocksdb_om_db_s3secrettable_live_sst_files_size", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{__name__}}, {{hostname}}", - "range": true, - "refId": "C", - "useBackend": false + "targets": [ + { + "datasource": { + "type": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "om_metrics_num_key_ops", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}, {{hostname}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "OM num key ops metrics", + "type": "timeseries" } ], - "title": "Rocksdb live sst file size metrics", - "type": "timeseries" + "title": "OM Num Key Metrics", + "type": "row" }, { "collapsed": true, @@ -1814,9 +1305,9 @@ "h": 1, "w": 24, "x": 0, - "y": 69 + "y": 3 }, - "id": 20, + "id": 21, "panels": [ { "datasource": { @@ -1828,10 +1319,9 @@ "mode": "palette-classic" }, "custom": { - "axisBorderShow": false, "axisCenteredZero": false, "axisColorMode": "text", - "axisLabel": "no. of ops", + "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", @@ -1879,9 +1369,9 @@ "h": 8, "w": 8, "x": 0, - "y": 83 + "y": 164 }, - "id": 16, + "id": 1, "options": { "legend": { "calcs": [], @@ -1897,13 +1387,13 @@ "targets": [ { "datasource": { - "ype": "prometheus" + "type": "prometheus" }, "disableTextWrap": false, "editorMode": "builder", - "expr": "rate(ugi_metrics_get_groups_num_ops{servername=\"ozoneManager\"}[1m])", + "expr": "om_metrics_num_get_service_lists", "fullMetaSearch": false, - "includeNullMetadata": false, + "includeNullMetadata": true, "instant": false, "legendFormat": "{{__name__}}, {{hostname}}", "range": true, @@ -1911,9 +1401,23 @@ "useBackend": false } ], - "title": "Ugi Metrics (no. of ops)", + "title": "Get service lists metrics", "type": "timeseries" - }, + } + ], + "title": "OM Service Lists Metrics", + "type": "row" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 4 + }, + "id": 22, + "panels": [ { "datasource": { "type": "prometheus" @@ -1924,7 +1428,6 @@ "mode": "palette-classic" }, "custom": { - "axisBorderShow": false, "axisCenteredZero": false, "axisColorMode": "text", "axisLabel": "time (ns)", @@ -1975,10 +1478,10 @@ "gridPos": { "h": 8, "w": 8, - "x": 8, - "y": 83 + "x": 0, + "y": 5 }, - "id": 15, + "id": 3, "options": { "legend": { "calcs": [], @@ -1998,7 +1501,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "ugi_metrics_get_groups_avg_time{servername=\"ozoneManager\"}", + "expr": "om_performance_metrics_get_key_info_read_key_info_latency_ns_avg_time", "fullMetaSearch": false, "includeNullMetadata": true, "instant": false, @@ -2008,138 +1511,137 @@ "useBackend": false } ], - "title": "Ugi Metrics (avg. time)", + "title": "Read key info (avg time) metrics", "type": "timeseries" } ], - "title": "OM Ugi Metrics", + "title": "OM Read Key Info Metrics", "type": "row" }, { - "collapsed": false, + "collapsed": true, "gridPos": { "h": 1, "w": 24, "x": 0, - "y": 70 + "y": 5 }, "id": 23, - "panels": [], - "title": "OM Table Cache Metrics", - "type": "row" - }, - { - "datasource": { - "type": "prometheus" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 71 - }, - "id": 14, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ + "panels": [ { "datasource": { "type": "prometheus" }, - "disableTextWrap": false, - "editorMode": "code", - "expr": "table_cache_metrics_hit_count{instance=~\".*:9875|.+9876\"}", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{__name__}}, {{hostname}}, {{tablename}}", - "range": true, - "refId": "A", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus" + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 0, + "y": 174 + }, + "id": 14, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } }, - "disableTextWrap": false, - "editorMode": "code", - "expr": "table_cache_metrics_miss_count{instance=~\".*:9875|.+9876\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{__name__}}, {{hostname}}, {{tablename}}", - "range": true, - "refId": "B", - "useBackend": false + "targets": [ + { + "datasource": { + "type": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "table_cache_metrics_hit_count{instance=~\".*:9875\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}, {{hostname}}, {{tablename}}", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "table_cache_metrics_miss_count{instance=~\".*:9875\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}, {{hostname}}, {{tablename}}", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "Table cache metrics (count)", + "type": "timeseries" } ], - "title": "Table cache metrics (count)", - "type": "timeseries" + "title": "OM Table Cache Metrics", + "type": "row" }, { "collapsed": true, @@ -2147,7 +1649,7 @@ "h": 1, "w": 24, "x": 0, - "y": 79 + "y": 6 }, "id": 9, "panels": [ @@ -2212,7 +1714,7 @@ "h": 9, "w": 8, "x": 0, - "y": 111 + "y": 47 }, "id": 17, "options": { @@ -2355,7 +1857,7 @@ "h": 9, "w": 8, "x": 8, - "y": 111 + "y": 47 }, "id": 18, "options": { @@ -2448,7 +1950,7 @@ "h": 1, "w": 24, "x": 0, - "y": 80 + "y": 7 }, "id": 24, "panels": [ @@ -2512,7 +2014,7 @@ "h": 9, "w": 8, "x": 0, - "y": 210 + "y": 146 }, "id": 26, "options": { @@ -2623,7 +2125,7 @@ "h": 9, "w": 8, "x": 8, - "y": 210 + "y": 146 }, "id": 27, "options": { @@ -2668,7 +2170,7 @@ "h": 1, "w": 24, "x": 0, - "y": 81 + "y": 8 }, "id": 25, "panels": [ @@ -2732,7 +2234,7 @@ "h": 9, "w": 8, "x": 0, - "y": 202 + "y": 138 }, "id": 30, "options": { @@ -2777,7 +2279,7 @@ "h": 1, "w": 24, "x": 0, - "y": 82 + "y": 10 }, "id": 29, "panels": [ @@ -2841,7 +2343,7 @@ "h": 8, "w": 8, "x": 0, - "y": 107 + "y": 43 }, "id": 36, "options": { @@ -2936,7 +2438,7 @@ "h": 8, "w": 8, "x": 8, - "y": 107 + "y": 43 }, "id": 37, "options": { @@ -2981,7 +2483,7 @@ "h": 1, "w": 24, "x": 0, - "y": 83 + "y": 11 }, "id": 38, "panels": [ @@ -2995,7 +2497,6 @@ "mode": "palette-classic" }, "custom": { - "axisBorderShow": false, "axisCenteredZero": false, "axisColorMode": "text", "axisLabel": "time (ns)", @@ -3047,7 +2548,7 @@ "h": 8, "w": 8, "x": 0, - "y": 73 + "y": 44 }, "id": 39, "options": { @@ -3108,7 +2609,6 @@ "mode": "palette-classic" }, "custom": { - "axisBorderShow": false, "axisCenteredZero": false, "axisColorMode": "text", "axisLabel": "no. of ops", @@ -3159,7 +2659,7 @@ "h": 8, "w": 8, "x": 8, - "y": 73 + "y": 44 }, "id": 40, "options": { @@ -3204,7 +2704,7 @@ "h": 1, "w": 24, "x": 0, - "y": 84 + "y": 12 }, "id": 42, "panels": [ @@ -3268,7 +2768,7 @@ "h": 8, "w": 8, "x": 0, - "y": 85 + "y": 21 }, "id": 41, "options": { @@ -3363,7 +2863,7 @@ "h": 8, "w": 8, "x": 8, - "y": 85 + "y": 21 }, "id": 43, "options": { @@ -3458,7 +2958,7 @@ "h": 8, "w": 8, "x": 16, - "y": 85 + "y": 21 }, "id": 44, "options": { @@ -3503,7 +3003,7 @@ "h": 1, "w": 24, "x": 0, - "y": 85 + "y": 13 }, "id": 45, "panels": [ @@ -3567,7 +3067,7 @@ "h": 8, "w": 8, "x": 0, - "y": 78 + "y": 14 }, "id": 46, "options": { @@ -3662,7 +3162,7 @@ "h": 8, "w": 8, "x": 8, - "y": 78 + "y": 14 }, "id": 47, "options": { @@ -3703,7 +3203,8 @@ } ], "refresh": "", - "schemaVersion": 39, + "schemaVersion": 38, + "style": "dark", "tags": [], "templating": { "list": [] @@ -3715,7 +3216,6 @@ "timepicker": {}, "timezone": "", "title": "Read Key Dashboard", - "uid": "edu3g1mx0be2oc", - "version": 29, + "version": 21, "weekStart": "" } \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/compose/common/s3a-test.sh b/hadoop-ozone/dist/src/main/compose/common/s3a-test.sh index e8032068465..554b22b5a39 100644 --- a/hadoop-ozone/dist/src/main/compose/common/s3a-test.sh +++ b/hadoop-ozone/dist/src/main/compose/common/s3a-test.sh @@ -94,9 +94,10 @@ EOF # Some tests are skipped due to known issues. # - ITestS3AContractDistCp: HDDS-10616 + # - ITestS3AContractGetFileStatusV1List: HDDS-10617 # - ITestS3AContractRename: HDDS-10665 mvn -B -V --fail-never --no-transfer-progress \ - -Dtest='ITestS3AContract*, ITestS3ACommitterMRJob, !ITestS3AContractDistCp, !ITestS3AContractRename' \ + -Dtest='ITestS3AContract*, ITestS3ACommitterMRJob, !ITestS3AContractDistCp, !ITestS3AContractGetFileStatusV1List, !ITestS3AContractRename' \ clean test local target="${RESULT_DIR}/junit/${bucket}/target" diff --git a/hadoop-ozone/dist/src/main/compose/compatibility/docker-config b/hadoop-ozone/dist/src/main/compose/compatibility/docker-config index d3984110d8d..a5727d2b1e4 100644 --- a/hadoop-ozone/dist/src/main/compose/compatibility/docker-config +++ b/hadoop-ozone/dist/src/main/compose/compatibility/docker-config @@ -31,7 +31,6 @@ OZONE-SITE.XML_hdds.datanode.volume.min.free.space=100MB OZONE-SITE.XML_ozone.recon.address=recon:9891 OZONE-SITE.XML_hdds.scmclient.max.retry.timeout=30s OZONE-SITE.XML_ozone.http.basedir=/tmp/ozone_http -OZONE-SITE.XML_ozone.om.features.disabled=ATOMIC_REWRITE_KEY HADOOP_OPTS="-Dhadoop.opts=test" HDFS_STORAGECONTAINERMANAGER_OPTS="-Dhdfs.scm.opts=test" diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config index 38cc5b71a18..db517a7f7c6 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config @@ -33,7 +33,6 @@ OZONE-SITE.XML_ozone.om.http-address.omservice.om3=om3 OZONE-SITE.XML_ozone.om.ratis.enable=true OZONE-SITE.XML_ozone.scm.service.ids=scmservice -OZONE-SITE.XML_ozone.scm.primordial.node.id=scm1 OZONE-SITE.XML_ozone.scm.nodes.scmservice=scm1,scm2,scm3 OZONE-SITE.XML_ozone.scm.address.scmservice.scm1=scm1.org OZONE-SITE.XML_ozone.scm.address.scmservice.scm2=scm2.org diff --git a/hadoop-ozone/dist/src/main/compose/test-all.sh b/hadoop-ozone/dist/src/main/compose/test-all.sh index 863e1d0b75a..cb76257cd8d 100755 --- a/hadoop-ozone/dist/src/main/compose/test-all.sh +++ b/hadoop-ozone/dist/src/main/compose/test-all.sh @@ -33,7 +33,7 @@ source "$SCRIPT_DIR"/testlib.sh if [[ "${OZONE_WITH_COVERAGE}" == "true" ]]; then java -cp "$PROJECT_DIR"/share/coverage/$(ls "$PROJECT_DIR"/share/coverage | grep test-util):"$PROJECT_DIR"/share/coverage/jacoco-core.jar org.apache.ozone.test.JacocoServer & DOCKER_BRIDGE_IP=$(docker network inspect bridge --format='{{(index .IPAM.Config 0).Gateway}}') - export OZONE_OPTS="-javaagent:share/coverage/jacoco-agent.jar=output=tcpclient,address=$DOCKER_BRIDGE_IP,includes=org.apache.hadoop.ozone.*:org.apache.hadoop.hdds.*:org.apache.hadoop.fs.ozone.*:org.apache.ozone.*:org.hadoop.ozone.*" + export OZONE_OPTS="-javaagent:share/coverage/jacoco-agent.jar=output=tcpclient,address=$DOCKER_BRIDGE_IP,includes=org.apache.hadoop.ozone.*:org.apache.hadoop.hdds.*:org.apache.hadoop.fs.ozone.*" fi cd "$SCRIPT_DIR" diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-config b/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-config index a1b6da80c4b..909b72852aa 100644 --- a/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-config +++ b/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-config @@ -40,7 +40,6 @@ OZONE-SITE.XML_ozone.scm.container.size=1GB OZONE-SITE.XML_hdds.datanode.dir=/data/hdds OZONE-SITE.XML_hdds.datanode.volume.min.free.space=100MB OZONE-SITE.XML_ozone.http.basedir=/tmp/ozone_http -OZONE-SITE.XML_ozone.fs.hsync.enabled=true # If SCM sends container close commands as part of upgrade finalization while # datanodes are doing a leader election, all 3 replicas may end up in the diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/docker-config b/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/docker-config index 88126ddf2cb..95ce6c0c9c5 100644 --- a/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/docker-config +++ b/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/docker-config @@ -37,7 +37,6 @@ OZONE-SITE.XML_ozone.recon.db.dir=/data/metadata/recon OZONE-SITE.XML_ozone.recon.om.snapshot.task.interval.delay=1m OZONE-SITE.XML_hdds.scmclient.max.retry.timeout=30s OZONE-SITE.XML_ozone.http.basedir=/tmp/ozone_http -OZONE-SITE.XML_ozone.fs.hsync.enabled=true OZONE_CONF_DIR=/etc/hadoop OZONE_LOG_DIR=/var/log/hadoop diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/docker-config b/hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/docker-config index 77fa2b40ee4..1b805c98960 100644 --- a/hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/docker-config +++ b/hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/docker-config @@ -32,7 +32,6 @@ OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.scm.container.size=1GB OZONE-SITE.XML_ozone.scm.client.address=scm OZONE-SITE.XML_ozone.http.basedir=/tmp/ozone_http -OZONE-SITE.XML_ozone.fs.hsync.enabled=true OZONE-SITE.XML_hdds.datanode.dir=/data/hdds OZONE-SITE.XML_hdds.datanode.volume.min.free.space=100MB diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/testlib.sh b/hadoop-ozone/dist/src/main/compose/upgrade/testlib.sh index d2718d04b7d..69af73f50c9 100755 --- a/hadoop-ozone/dist/src/main/compose/upgrade/testlib.sh +++ b/hadoop-ozone/dist/src/main/compose/upgrade/testlib.sh @@ -51,12 +51,12 @@ create_data_dirs() { # be used. ## Else, a binary image will be used. prepare_for_image() { - local image_version="${1}" + local image_version="$1" if [[ "$image_version" = "$OZONE_CURRENT_VERSION" ]]; then prepare_for_runner_image else - prepare_for_binary_image "${image_version}-rocky" + prepare_for_binary_image "$image_version" fi } diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/upgrades/non-rolling-upgrade/callbacks/1.5.0/callback.sh b/hadoop-ozone/dist/src/main/compose/upgrade/upgrades/non-rolling-upgrade/callbacks/1.5.0/callback.sh deleted file mode 100644 index ec64d5dcd54..00000000000 --- a/hadoop-ozone/dist/src/main/compose/upgrade/upgrades/non-rolling-upgrade/callbacks/1.5.0/callback.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -source "$TEST_DIR"/testlib.sh - -with_this_version_pre_finalized() { - # New layout features were added in this version, so OM and SCM should be pre-finalized. - execute_robot_test "$SCM" -N "${OUTPUT_NAME}-check-finalization" --include pre-finalized upgrade/check-finalization.robot - # Test that HSync is disabled when pre-finalized. - execute_robot_test "$SCM" -N "${OUTPUT_NAME}-hsync" --include pre-finalized-hsync-tests hsync/upgrade-hsync-check.robot -} - -with_this_version_finalized() { - execute_robot_test "$SCM" -N "${OUTPUT_NAME}-check-finalization" --include finalized upgrade/check-finalization.robot - execute_robot_test "$SCM" -N "${OUTPUT_NAME}-hsync" debug/ozone-debug-lease-recovery.robot - execute_robot_test "$SCM" -N "${OUTPUT_NAME}-freon-hsync" freon/hsync.robot -} diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/.env b/hadoop-ozone/dist/src/main/compose/xcompat/.env index a673b7f4655..140975d4bd0 100644 --- a/hadoop-ozone/dist/src/main/compose/xcompat/.env +++ b/hadoop-ozone/dist/src/main/compose/xcompat/.env @@ -17,5 +17,3 @@ HDDS_VERSION=${hdds.version} OZONE_RUNNER_VERSION=${docker.ozone-runner.version} OZONE_RUNNER_IMAGE=apache/ozone-runner -HADOOP_VERSION=${hadoop.version} -OZONE_TESTKRB5_IMAGE=${docker.ozone-testkr5b.image} diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/clients.yaml b/hadoop-ozone/dist/src/main/compose/xcompat/clients.yaml index eda14353688..0bf0f619bd7 100644 --- a/hadoop-ozone/dist/src/main/compose/xcompat/clients.yaml +++ b/hadoop-ozone/dist/src/main/compose/xcompat/clients.yaml @@ -16,49 +16,39 @@ services: old_client_1_0_0: - image: apache/ozone:1.0.0-rocky + image: apache/ozone:1.0.0 env_file: - docker-config volumes: - ../..:/opt/ozone - - ../_keytabs:/etc/security/keytabs - - ./krb5.conf:/etc/krb5.conf command: ["sleep","1000000"] old_client_1_1_0: - image: apache/ozone:1.1.0-rocky + image: apache/ozone:1.1.0 env_file: - docker-config volumes: - ../..:/opt/ozone - - ../_keytabs:/etc/security/keytabs - - ./krb5.conf:/etc/krb5.conf command: ["sleep","1000000"] old_client_1_2_1: - image: apache/ozone:1.2.1-rocky + image: apache/ozone:1.2.1 env_file: - docker-config volumes: - ../..:/opt/ozone - - ../_keytabs:/etc/security/keytabs - - ./krb5.conf:/etc/krb5.conf command: ["sleep","1000000"] old_client_1_3_0: - image: apache/ozone:1.3.0-rocky + image: apache/ozone:1.3.0 env_file: - docker-config volumes: - ../..:/opt/ozone - - ../_keytabs:/etc/security/keytabs - - ./krb5.conf:/etc/krb5.conf command: ["sleep","1000000"] old_client_1_4_0: - image: apache/ozone:1.4.0-rocky + image: apache/ozone:1.4.0 env_file: - docker-config volumes: - ../..:/opt/ozone - - ../_keytabs:/etc/security/keytabs - - ./krb5.conf:/etc/krb5.conf command: ["sleep","1000000"] new_client: image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION} @@ -66,8 +56,6 @@ services: - docker-config volumes: - ../..:/opt/hadoop - - ../_keytabs:/etc/security/keytabs - - ./krb5.conf:/etc/krb5.conf environment: OZONE_OPTS: command: ["sleep","1000000"] diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/docker-config b/hadoop-ozone/dist/src/main/compose/xcompat/docker-config index 1a61aaf4f7e..85099f902d3 100644 --- a/hadoop-ozone/dist/src/main/compose/xcompat/docker-config +++ b/hadoop-ozone/dist/src/main/compose/xcompat/docker-config @@ -14,8 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -CORE-SITE.XML_fs.defaultFS=ofs://om -CORE-SITE.XML_fs.trash.interval=1 CORE-SITE.XML_fs.ofs.impl=org.apache.hadoop.fs.ozone.RootedOzoneFileSystem OZONE-SITE.XML_hdds.datanode.dir=/data/hdds @@ -24,7 +22,6 @@ OZONE-SITE.XML_hdds.scm.safemode.min.datanode=3 OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata OZONE-SITE.XML_ozone.om.address=om OZONE-SITE.XML_ozone.om.http-address=om:9874 -OZONE-SITE.XML_ozone.scm.http-address=scm:9876 OZONE-SITE.XML_ozone.recon.address=recon:9891 OZONE-SITE.XML_ozone.recon.db.dir=/data/metadata/recon OZONE-SITE.XML_ozone.server.default.replication=3 @@ -34,98 +31,9 @@ OZONE-SITE.XML_ozone.scm.container.size=1GB OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 -OZONE-SITE.XML_ozone.datanode.pipeline.limit=1 OZONE-SITE.XML_recon.om.snapshot.task.interval.delay=1m OZONE-SITE.XML_hdds.scmclient.max.retry.timeout=30s OZONE-SITE.XML_ozone.default.bucket.layout=LEGACY OZONE-SITE.XML_ozone.http.basedir=/tmp/ozone_http - -OZONE-SITE.XML_hdds.block.token.enabled=true -OZONE-SITE.XML_hdds.container.token.enabled=true -OZONE-SITE.XML_hdds.grpc.tls.enabled=true - -OZONE-SITE.XML_ozone.security.enabled=true -OZONE-SITE.XML_ozone.acl.enabled=true -OZONE-SITE.XML_ozone.acl.authorizer.class=org.apache.hadoop.ozone.security.acl.OzoneNativeAuthorizer -OZONE-SITE.XML_ozone.administrators="testuser,recon,om" -OZONE-SITE.XML_ozone.s3.administrators="testuser,recon,om" -OZONE-SITE.XML_ozone.recon.administrators="testuser2" -OZONE-SITE.XML_ozone.s3.administrators="testuser,s3g" - -HDFS-SITE.XML_dfs.datanode.address=0.0.0.0:1019 -HDFS-SITE.XML_dfs.datanode.http.address=0.0.0.0:1012 -CORE-SITE.XML_dfs.data.transfer.protection=authentication -CORE-SITE.XML_hadoop.security.authentication=kerberos -CORE-SITE.XML_hadoop.security.auth_to_local="DEFAULT" -CORE-SITE.XML_hadoop.security.key.provider.path=kms://http@kms:9600/kms - -OZONE-SITE.XML_hdds.scm.kerberos.principal=scm/scm@EXAMPLE.COM -OZONE-SITE.XML_hdds.scm.kerberos.keytab.file=/etc/security/keytabs/scm.keytab -OZONE-SITE.XML_ozone.om.kerberos.principal=om/om@EXAMPLE.COM -OZONE-SITE.XML_ozone.om.kerberos.keytab.file=/etc/security/keytabs/om.keytab -OZONE-SITE.XML_ozone.recon.kerberos.keytab.file=/etc/security/keytabs/recon.keytab -OZONE-SITE.XML_ozone.recon.kerberos.principal=recon/recon@EXAMPLE.COM - -OZONE-SITE.XML_ozone.s3g.kerberos.keytab.file=/etc/security/keytabs/s3g.keytab -OZONE-SITE.XML_ozone.s3g.kerberos.principal=s3g/s3g@EXAMPLE.COM - -OZONE-SITE.XML_ozone.httpfs.kerberos.keytab.file=/etc/security/keytabs/httpfs.keytab -OZONE-SITE.XML_ozone.httpfs.kerberos.principal=httpfs/httpfs@EXAMPLE.COM - -HDFS-SITE.XML_dfs.datanode.kerberos.principal=dn/dn@EXAMPLE.COM -HDFS-SITE.XML_dfs.datanode.keytab.file=/etc/security/keytabs/dn.keytab -HDFS-SITE.XML_dfs.datanode.kerberos.keytab.file=/etc/security/keytabs/dn.keytab -HDFS-SITE.XML_dfs.web.authentication.kerberos.principal=HTTP/ozone@EXAMPLE.COM -HDFS-SITE.XML_dfs.web.authentication.kerberos.keytab=/etc/security/keytabs/HTTP.keytab - -OZONE-SITE.XML_ozone.security.http.kerberos.enabled=true -OZONE-SITE.XML_ozone.s3g.secret.http.enabled=true -OZONE-SITE.XML_ozone.http.filter.initializers=org.apache.hadoop.security.AuthenticationFilterInitializer - -OZONE-SITE.XML_ozone.om.http.auth.type=kerberos -OZONE-SITE.XML_hdds.scm.http.auth.type=kerberos -OZONE-SITE.XML_hdds.datanode.http.auth.type=kerberos -OZONE-SITE.XML_ozone.s3g.http.auth.type=kerberos -OZONE-SITE.XML_ozone.s3g.secret.http.auth.type=kerberos -OZONE-SITE.XML_ozone.httpfs.http.auth.type=kerberos -OZONE-SITE.XML_ozone.recon.http.auth.type=kerberos - -OZONE-SITE.XML_hdds.scm.http.auth.kerberos.principal=HTTP/scm@EXAMPLE.COM -OZONE-SITE.XML_hdds.scm.http.auth.kerberos.keytab=/etc/security/keytabs/scm.keytab -OZONE-SITE.XML_ozone.om.http.auth.kerberos.principal=HTTP/om@EXAMPLE.COM -OZONE-SITE.XML_ozone.om.http.auth.kerberos.keytab=/etc/security/keytabs/om.keytab -OZONE-SITE.XML_hdds.datanode.http.auth.kerberos.principal=HTTP/dn@EXAMPLE.COM -OZONE-SITE.XML_hdds.datanode.http.auth.kerberos.keytab=/etc/security/keytabs/dn.keytab -OZONE-SITE.XML_ozone.s3g.http.auth.kerberos.keytab=/etc/security/keytabs/s3g.keytab -OZONE-SITE.XML_ozone.s3g.http.auth.kerberos.principal=HTTP/s3g@EXAMPLE.COM -OZONE-SITE.XML_ozone.httpfs.http.auth.kerberos.keytab=/etc/security/keytabs/httpfs.keytab -OZONE-SITE.XML_ozone.httpfs.http.auth.kerberos.principal=HTTP/httpfs@EXAMPLE.COM -OZONE-SITE.XML_ozone.recon.http.auth.kerberos.principal=* -OZONE-SITE.XML_ozone.recon.http.auth.kerberos.keytab=/etc/security/keytabs/recon.keytab - -CORE-SITE.XML_hadoop.http.authentication.simple.anonymous.allowed=false -CORE-SITE.XML_hadoop.http.authentication.signature.secret.file=/etc/security/http_secret -CORE-SITE.XML_hadoop.http.authentication.type=kerberos -CORE-SITE.XML_hadoop.http.authentication.kerberos.principal=HTTP/ozone@EXAMPLE.COM -CORE-SITE.XML_hadoop.http.authentication.kerberos.keytab=/etc/security/keytabs/HTTP.keytab - -CORE-SITE.XML_hadoop.security.authorization=true -HADOOP-POLICY.XML_ozone.om.security.client.protocol.acl=* -HADOOP-POLICY.XML_hdds.security.client.datanode.container.protocol.acl=* -HADOOP-POLICY.XML_hdds.security.client.scm.container.protocol.acl=* -HADOOP-POLICY.XML_hdds.security.client.scm.block.protocol.acl=* -HADOOP-POLICY.XML_hdds.security.client.scm.certificate.protocol.acl=* -HADOOP-POLICY.XML_ozone.security.reconfigure.protocol.acl=* - -KMS-SITE.XML_hadoop.kms.proxyuser.s3g.users=* -KMS-SITE.XML_hadoop.kms.proxyuser.s3g.groups=* -KMS-SITE.XML_hadoop.kms.proxyuser.s3g.hosts=* - -OZONE_DATANODE_SECURE_USER=root -JSVC_HOME=/usr/bin - -OZONE_LOG_DIR=/var/log/hadoop - -no_proxy=om,scm,recon,s3g,kdc,localhost,127.0.0.1 +no_proxy=om,recon,scm,s3g,kdc,localhost,127.0.0.1 diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/krb5.conf b/hadoop-ozone/dist/src/main/compose/xcompat/krb5.conf deleted file mode 100644 index eefc5b9c685..00000000000 --- a/hadoop-ozone/dist/src/main/compose/xcompat/krb5.conf +++ /dev/null @@ -1,41 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -[logging] -default = FILE:/var/log/krb5libs.log -kdc = FILE:/var/log/krb5kdc.log -admin_server = FILE:/var/log/kadmind.log - -[libdefaults] - dns_canonicalize_hostname = false - dns_lookup_realm = false - ticket_lifetime = 24h - renew_lifetime = 7d - forwardable = true - rdns = false - default_realm = EXAMPLE.COM - -[realms] - EXAMPLE.COM = { - kdc = kdc - admin_server = kdc - max_renewable_life = 7d - } - -[domain_realm] - .example.com = EXAMPLE.COM - example.com = EXAMPLE.COM - diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/new-cluster.yaml b/hadoop-ozone/dist/src/main/compose/xcompat/new-cluster.yaml index 32059140ce9..6e3ff6cfbc9 100644 --- a/hadoop-ozone/dist/src/main/compose/xcompat/new-cluster.yaml +++ b/hadoop-ozone/dist/src/main/compose/xcompat/new-cluster.yaml @@ -18,39 +18,14 @@ x-new-config: &new-config image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION} - dns_search: . env_file: - docker-config volumes: - ../..:/opt/hadoop - - ../_keytabs:/etc/security/keytabs - - ./krb5.conf:/etc/krb5.conf services: - kdc: - image: ${OZONE_TESTKRB5_IMAGE} - hostname: kdc - dns_search: . - volumes: - - ../..:/opt/hadoop - - ../_keytabs:/etc/security/keytabs - command: [ "krb5kdc","-n" ] - kms: - image: apache/hadoop:${HADOOP_VERSION} - hostname: kms - dns_search: . - ports: - - 9600:9600 - env_file: - - ./docker-config - environment: - HADOOP_CONF_DIR: /opt/hadoop/etc/hadoop - volumes: - - ../../libexec/transformation.py:/opt/transformation.py - command: [ "hadoop", "kms" ] datanode: <<: *new-config - hostname: dn ports: - 19864 - 9882 @@ -59,17 +34,15 @@ services: command: ["ozone","datanode"] om: <<: *new-config - hostname: om environment: ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION - OZONE_OPTS: -Dcom.sun.net.ssl.checkRevocation=false + OZONE_OPTS: ports: - 9874:9874 - 9862:9862 command: ["ozone","om"] recon: <<: *new-config - hostname: recon ports: - 9888:9888 environment: @@ -77,7 +50,6 @@ services: command: ["ozone","recon"] s3g: <<: *new-config - hostname: s3g environment: OZONE_OPTS: ports: @@ -85,12 +57,9 @@ services: command: ["ozone","s3g"] scm: <<: *new-config - hostname: scm ports: - 9876:9876 - - 9860:9860 environment: ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION - OZONE-SITE.XML_hdds.scm.safemode.min.datanode: "${OZONE_SAFEMODE_MIN_DATANODES:-1}" OZONE_OPTS: command: ["ozone","scm"] diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/old-cluster.yaml b/hadoop-ozone/dist/src/main/compose/xcompat/old-cluster.yaml index d1b6e56a084..c603bb51df3 100644 --- a/hadoop-ozone/dist/src/main/compose/xcompat/old-cluster.yaml +++ b/hadoop-ozone/dist/src/main/compose/xcompat/old-cluster.yaml @@ -17,40 +17,15 @@ # reusable fragments (see https://docs.docker.com/compose/compose-file/#extension-fields) x-old-config: &old-config - image: apache/ozone:${OZONE_VERSION}-rocky - dns_search: . + image: apache/ozone:${OZONE_VERSION} env_file: - docker-config volumes: - ../..:/opt/ozone - - ../_keytabs:/etc/security/keytabs - - ./krb5.conf:/etc/krb5.conf services: - kdc: - image: ${OZONE_TESTKRB5_IMAGE} - hostname: kdc - dns_search: . - volumes: - - ../..:/opt/ozone - - ../_keytabs:/etc/security/keytabs - command: [ "krb5kdc","-n" ] - kms: - image: apache/hadoop:${HADOOP_VERSION} - hostname: kms - dns_search: . - ports: - - 9600:9600 - env_file: - - ./docker-config - environment: - HADOOP_CONF_DIR: /opt/hadoop/etc/hadoop - volumes: - - ../../libexec/transformation.py:/opt/transformation.py - command: [ "hadoop", "kms" ] datanode: <<: *old-config - hostname: dn ports: - 19864 - 9882 @@ -59,10 +34,8 @@ services: command: ["ozone","datanode"] om: <<: *old-config - hostname: om environment: ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION - OZONE_OPTS: -Dcom.sun.net.ssl.checkRevocation=false HADOOP_OPTS: ports: - 9874:9874 @@ -70,7 +43,6 @@ services: command: ["ozone","om"] recon: <<: *old-config - hostname: recon ports: - 9888:9888 environment: @@ -78,7 +50,6 @@ services: command: ["ozone","recon"] s3g: <<: *old-config - hostname: s3g environment: HADOOP_OPTS: ports: @@ -86,11 +57,9 @@ services: command: ["ozone","s3g"] scm: <<: *old-config - hostname: scm ports: - 9876:9876 environment: ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION - OZONE-SITE.XML_hdds.scm.safemode.min.datanode: "${OZONE_SAFEMODE_MIN_DATANODES:-1}" HADOOP_OPTS: command: ["ozone","scm"] diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/test.sh b/hadoop-ozone/dist/src/main/compose/xcompat/test.sh index 8774cf2f632..695d8bf06ab 100755 --- a/hadoop-ozone/dist/src/main/compose/xcompat/test.sh +++ b/hadoop-ozone/dist/src/main/compose/xcompat/test.sh @@ -22,15 +22,11 @@ export COMPOSE_DIR basename=$(basename ${COMPOSE_DIR}) current_version="${ozone.version}" -# TODO: debug acceptance test failures for client versions 1.0.0 on secure clusters -old_versions="1.1.0 1.2.1 1.3.0 1.4.0" # container is needed for each version in clients.yaml +old_versions="1.0.0 1.1.0 1.2.1 1.3.0 1.4.0" # container is needed for each version in clients.yaml # shellcheck source=hadoop-ozone/dist/src/main/compose/testlib.sh source "${COMPOSE_DIR}/../testlib.sh" -export SECURITY_ENABLED=true -: ${OZONE_BUCKET_KEY_NAME:=key1} - old_client() { OZONE_DIR=/opt/ozone container=${client} @@ -44,40 +40,24 @@ new_client() { "$@" } -_kinit() { - execute_command_in_container ${container} kinit -k -t /etc/security/keytabs/testuser.keytab testuser/scm@EXAMPLE.COM -} - _init() { - _kinit execute_command_in_container ${container} ozone freon ockg -n1 -t1 -p warmup } _write() { - _kinit execute_robot_test ${container} -N "xcompat-cluster-${cluster_version}-client-${client_version}-write" -v SUFFIX:${client_version} compatibility/write.robot } _read() { - _kinit local data_version="$1" execute_robot_test ${container} -N "xcompat-cluster-${cluster_version}-client-${client_version}-read-${data_version}" -v SUFFIX:${data_version} compatibility/read.robot } -test_bucket_encryption() { - _kinit - execute_robot_test ${container} -N "xcompat-cluster-${cluster_version}-client-${client_version}" -v SUFFIX:${client_version} security/bucket-encryption.robot -} - test_cross_compatibility() { echo "Starting cluster with COMPOSE_FILE=${COMPOSE_FILE}" OZONE_KEEP_RESULTS=true start_docker_env - execute_command_in_container kms hadoop key create ${OZONE_BUCKET_KEY_NAME} - new_client test_bucket_encryption - - container=scm _kinit execute_command_in_container scm ozone freon ockg -n1 -t1 -p warmup new_client _write new_client _read ${current_version} @@ -85,8 +65,6 @@ test_cross_compatibility() { for client_version in "$@"; do client="old_client_${client_version//./_}" - old_client test_bucket_encryption - old_client _write old_client _read ${client_version} @@ -101,8 +79,7 @@ test_ec_cross_compatibility() { echo "Running Erasure Coded storage backward compatibility tests." # local cluster_versions_with_ec="1.3.0 1.4.0 ${current_version}" local cluster_versions_with_ec="${current_version}" # until HDDS-11334 - # TODO: debug acceptance test failures for client versions 1.0.0 on secure clusters - local non_ec_client_versions="1.1.0 1.2.1" + local non_ec_client_versions="1.0.0 1.1.0 1.2.1" for cluster_version in ${cluster_versions_with_ec}; do export COMPOSE_FILE=new-cluster.yaml:clients.yaml cluster_version=${cluster_version} @@ -125,14 +102,12 @@ test_ec_cross_compatibility() { local prefix=$(LC_CTYPE=C tr -dc '[:alnum:]' < /dev/urandom | head -c 5 | tr '[:upper:]' '[:lower:]') OZONE_DIR=/opt/hadoop - new_client _kinit execute_robot_test new_client --include setup-ec-data -N "xcompat-cluster-${cluster_version}-setup-data" -v prefix:"${prefix}" ec/backward-compat.robot OZONE_DIR=/opt/ozone for client_version in ${non_ec_client_versions}; do client="old_client_${client_version//./_}" unset OUTPUT_PATH - container="${client}" _kinit execute_robot_test "${client}" --include test-ec-compat -N "xcompat-cluster-${cluster_version}-client-${client_version}-read-${cluster_version}" -v prefix:"${prefix}" ec/backward-compat.robot done diff --git a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt b/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt index 9cb9202be0b..c28483c6735 100644 --- a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt +++ b/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt @@ -407,7 +407,6 @@ Apache License 2.0 org.apache.ratis:ratis-proto org.apache.ratis:ratis-server org.apache.ratis:ratis-server-api - org.apache.ratis:ratis-shell org.apache.ratis:ratis-thirdparty-misc org.apache.ratis:ratis-tools org.apache.thrift:libthrift diff --git a/hadoop-ozone/dist/src/main/license/jar-report.txt b/hadoop-ozone/dist/src/main/license/jar-report.txt index 1e07ec1a2c2..042c9380e4a 100644 --- a/hadoop-ozone/dist/src/main/license/jar-report.txt +++ b/hadoop-ozone/dist/src/main/license/jar-report.txt @@ -252,7 +252,6 @@ share/ozone/lib/ratis-netty.jar share/ozone/lib/ratis-proto.jar share/ozone/lib/ratis-server-api.jar share/ozone/lib/ratis-server.jar -share/ozone/lib/ratis-shell.jar share/ozone/lib/ratis-thirdparty-misc.jar share/ozone/lib/ratis-tools.jar share/ozone/lib/re2j.jar diff --git a/hadoop-ozone/dist/src/main/smoketest/compatibility/om.robot b/hadoop-ozone/dist/src/main/smoketest/compatibility/om.robot index c3caec2ae91..dc862d59c1a 100644 --- a/hadoop-ozone/dist/src/main/smoketest/compatibility/om.robot +++ b/hadoop-ozone/dist/src/main/smoketest/compatibility/om.robot @@ -25,8 +25,3 @@ Picks up command line options ${processes} = List All Processes Should Contain ${processes} %{HDFS_OM_OPTS} Should Contain ${processes} %{HADOOP_OPTS} - -Rejects Atomic Key Rewrite - Execute ozone freon ockg -n1 -t1 -p rewrite - ${output} = Execute and check rc ozone sh key rewrite -t EC -r rs-3-2-1024k /vol1/bucket1/rewrite/0 255 - Should Contain ${output} Feature disabled: ATOMIC_REWRITE_KEY diff --git a/hadoop-ozone/dist/src/main/smoketest/compatibility/read.robot b/hadoop-ozone/dist/src/main/smoketest/compatibility/read.robot index 57715cda95f..511679c56f4 100644 --- a/hadoop-ozone/dist/src/main/smoketest/compatibility/read.robot +++ b/hadoop-ozone/dist/src/main/smoketest/compatibility/read.robot @@ -30,10 +30,6 @@ Key Can Be Read Dir Can Be Listed Execute ozone fs -ls o3fs://bucket1.vol1/dir-${SUFFIX} -Dir Can Be Listed Using Shell - ${result} = Execute ozone sh key list /vol1/bucket1 - Should Contain ${result} key-${SUFFIX} - File Can Be Get Execute ozone fs -get o3fs://bucket1.vol1/dir-${SUFFIX}/file-${SUFFIX} /tmp/ Execute diff -q ${TESTFILE} /tmp/file-${SUFFIX} diff --git a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-ldb.robot b/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-ldb.robot deleted file mode 100644 index e006e154af1..00000000000 --- a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-ldb.robot +++ /dev/null @@ -1,93 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -*** Settings *** -Documentation Test ozone debug ldb CLI -Library OperatingSystem -Resource ../lib/os.robot -Test Timeout 5 minute -Suite Setup Write keys - -*** Variables *** -${PREFIX} ${EMPTY} -${VOLUME} cli-debug-volume${PREFIX} -${BUCKET} cli-debug-bucket -${DEBUGKEY} debugKey -${TESTFILE} testfile - -*** Keywords *** -Write keys - Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit test user testuser testuser.keytab - Execute ozone sh volume create ${VOLUME} - Execute ozone sh bucket create ${VOLUME}/${BUCKET} -l OBJECT_STORE - Execute dd if=/dev/urandom of=${TEMP_DIR}/${TESTFILE} bs=100000 count=15 - Execute ozone sh key put ${VOLUME}/${BUCKET}/${TESTFILE}1 ${TEMP_DIR}/${TESTFILE} - Execute ozone sh key put ${VOLUME}/${BUCKET}/${TESTFILE}2 ${TEMP_DIR}/${TESTFILE} - Execute ozone sh key put ${VOLUME}/${BUCKET}/${TESTFILE}3 ${TEMP_DIR}/${TESTFILE} - Execute ozone sh key addacl -a user:systest:a ${VOLUME}/${BUCKET}/${TESTFILE}3 - -*** Test Cases *** -Test ozone debug ldb ls - ${output} = Execute ozone debug ldb --db=/data/metadata/om.db ls - Should contain ${output} keyTable - -Test ozone debug ldb scan - # test count option - ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --count - Should Not Be Equal ${output} 0 - # test valid json for scan command - ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable | jq -r '.' - Should contain ${output} keyName - Should contain ${output} testfile1 - Should contain ${output} testfile2 - Should contain ${output} testfile3 - # test startkey option - ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --startkey="/cli-debug-volume/cli-debug-bucket/testfile2" - Should not contain ${output} testfile1 - Should contain ${output} testfile2 - Should contain ${output} testfile3 - # test endkey option - ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --endkey="/cli-debug-volume/cli-debug-bucket/testfile2" - Should contain ${output} testfile1 - Should contain ${output} testfile2 - Should not contain ${output} testfile3 - # test fields option - ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --fields="volumeName,bucketName,keyName" - Should contain ${output} volumeName - Should contain ${output} bucketName - Should contain ${output} keyName - Should not contain ${output} objectID - Should not contain ${output} dataSize - Should not contain ${output} keyLocationVersions - # test filter option with one filter - ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --filter="keyName:equals:testfile2" - Should not contain ${output} testfile1 - Should contain ${output} testfile2 - Should not contain ${output} testfile3 - # test filter option with one multi-level filter - ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --filter="acls.name:equals:systest" - Should not contain ${output} testfile1 - Should not contain ${output} testfile2 - Should contain ${output} testfile3 - # test filter option with multiple filter - ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --filter="keyName:equals:testfile3,acls.name:equals:systest" - Should not contain ${output} testfile1 - Should not contain ${output} testfile2 - Should contain ${output} testfile3 - # test filter option with no records match both filters - ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --filter="acls.name:equals:systest,keyName:equals:testfile2" - Should not contain ${output} testfile1 - Should not contain ${output} testfile2 - Should not contain ${output} testfile3 diff --git a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-lease-recovery.robot b/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-lease-recovery.robot index 691769dbd72..f867ee99f64 100644 --- a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-lease-recovery.robot +++ b/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-lease-recovery.robot @@ -17,13 +17,11 @@ Documentation Test lease recovery of ozone filesystem Library OperatingSystem Resource ../lib/os.robot -Resource ../lib/fs.robot Resource ozone-debug.robot Test Timeout 5 minute Suite Setup Create volume bucket and put key *** Variables *** -${OM_SERVICE_ID} %{OM_SERVICE_ID} ${VOLUME} lease-recovery-volume ${BUCKET} lease-recovery-bucket ${TESTFILE} testfile22 @@ -37,17 +35,13 @@ Create volume bucket and put key *** Test Cases *** Test ozone debug recover for o3fs - ${o3fs_path} = Format FS URL o3fs ${VOLUME} ${BUCKET} ${TESTFILE} - ${result} = Execute Lease recovery cli ${o3fs_path} - Should Contain ${result} Lease recovery SUCCEEDED - ${o3fs_path} = Format FS URL o3fs ${VOLUME} ${BUCKET} randomfile - ${result} = Execute Lease recovery cli ${o3fs_path} - Should Contain ${result} not found + ${result} = Execute Lease recovery cli o3fs://${BUCKET}.${VOLUME}.om/${TESTFILE} + Should Contain ${result} Lease recovery SUCCEEDED + ${result} = Execute Lease recovery cli o3fs://${BUCKET}.${VOLUME}.om/randomfile + Should Contain ${result} not found Test ozone debug recover for ofs - ${ofs_path} = Format FS URL ofs ${VOLUME} ${BUCKET} ${TESTFILE} - ${result} = Execute Lease recovery cli ${ofs_path} - Should Contain ${result} Lease recovery SUCCEEDED - ${ofs_path} = Format FS URL ofs ${VOLUME} ${BUCKET} randomfile - ${result} = Execute Lease recovery cli ${ofs_path} - Should Contain ${result} not found + ${result} = Execute Lease recovery cli ofs://om/${VOLUME}/${BUCKET}/${TESTFILE} + Should Contain ${result} Lease recovery SUCCEEDED + ${result} = Execute Lease recovery cli ofs://om/${VOLUME}/${BUCKET}/randomfile + Should Contain ${result} not found diff --git a/hadoop-ozone/dist/src/main/smoketest/freon/hsync.robot b/hadoop-ozone/dist/src/main/smoketest/freon/hsync.robot deleted file mode 100644 index c8462124427..00000000000 --- a/hadoop-ozone/dist/src/main/smoketest/freon/hsync.robot +++ /dev/null @@ -1,51 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -*** Settings *** -Documentation Test HSync via freon CLI. -Library OperatingSystem -Library String -Library BuiltIn -Resource ../ozone-lib/freon.robot -Resource ../lib/fs.robot -Test Timeout 10 minutes -Suite Setup Create volume and bucket - -*** Variables *** -${OM_SERVICE_ID} %{OM_SERVICE_ID} -${VOLUME} hsync-volume -${BUCKET} hsync-bucket - -*** Keywords *** -Create volume and bucket - Execute ozone sh volume create /${volume} - Execute ozone sh bucket create /${volume}/${bucket} - -*** Test Cases *** -Generate key for o3fs by HSYNC - ${path} = Format FS URL o3fs ${VOLUME} ${BUCKET} - Freon DFSG sync=HSYNC path=${path} - -Generate key for o3fs by HFLUSH - ${path} = Format FS URL o3fs ${VOLUME} ${BUCKET} - Freon DFSG sync=HFLUSH path=${path} - -Generate key for ofs by HSYNC - ${path} = Format FS URL ofs ${VOLUME} ${BUCKET} - Freon DFSG sync=HSYNC path=${path} - -Generate key for ofs by HFLUSH - ${path} = Format FS URL ofs ${VOLUME} ${BUCKET} - Freon DFSG sync=HFLUSH path=${path} diff --git a/hadoop-ozone/dist/src/main/smoketest/hsync/upgrade-hsync-check.robot b/hadoop-ozone/dist/src/main/smoketest/hsync/upgrade-hsync-check.robot deleted file mode 100644 index 1250ad1344e..00000000000 --- a/hadoop-ozone/dist/src/main/smoketest/hsync/upgrade-hsync-check.robot +++ /dev/null @@ -1,68 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -*** Settings *** -Documentation Test HSync during upgrade -Library OperatingSystem -Library String -Library BuiltIn -Resource ../commonlib.robot -Resource ../lib/fs.robot -Resource ../debug/ozone-debug.robot -Default Tags pre-finalized-hsync-tests -Suite Setup Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit test user testuser testuser.keytab - -*** Variables *** -${OM_SERVICE_ID} %{OM_SERVICE_ID} -${VOLUME} upgrade-hsync-volume -${BUCKET} upgrade-hsync-bucket -${KEY} upgrade-hsync-key - -*** Keywords *** -Create volume bucket and put key - Execute ozone sh volume create /${volume} - Execute ozone sh bucket create /${volume}/${bucket} - Execute ozone sh key put /${volume}/${bucket}/${key} /etc/hosts - -Freon DFSG - [arguments] ${prefix}=dfsg ${n}=1000 ${path}={EMPTY} ${sync}=HSYNC ${buffer}=1024 ${copy-buffer}=1024 ${size}=10240 - ${result} = Execute and checkrc ozone freon dfsg -n ${n} --sync ${sync} -s ${size} --path ${path} --buffer ${buffer} --copy-buffer ${copy-buffer} -p ${prefix} 255 - Should contain ${result} NOT_SUPPORTED_OPERATION_PRIOR_FINALIZATION - -*** Test Cases *** -Test HSync lease recover prior to finalization - Create volume bucket and put key - ${o3fs_path} = Format FS URL o3fs ${VOLUME} ${BUCKET} ${KEY} - ${result} = Execute and checkrc ozone debug recover --path=${o3fs_path} 255 - Should contain ${result} It belongs to the layout feature HBASE_SUPPORT, whose layout version is 7 - ${ofs_path} = Format FS URL ofs ${VOLUME} ${BUCKET} ${KEY} - ${result} = Execute and checkrc ozone debug recover --path=${ofs_path} 255 - Should contain ${result} It belongs to the layout feature HBASE_SUPPORT, whose layout version is 7 - -Generate key for o3fs by HSYNC prior to finalization - ${path} = Format FS URL o3fs ${VOLUME} ${BUCKET} - Freon DFSG sync=HSYNC path=${path} - -Generate key for o3fs by HFLUSH prior to finalization - ${path} = Format FS URL o3fs ${VOLUME} ${BUCKET} - Freon DFSG sync=HFLUSH path=${path} - -Generate key for ofs by HSYNC prior to finalization - ${path} = Format FS URL ofs ${VOLUME} ${BUCKET} - Freon DFSG sync=HSYNC path=${path} - -Generate key for ofs by HFLUSH prior to finalization - ${path} = Format FS URL ofs ${VOLUME} ${BUCKET} - Freon DFSG sync=HFLUSH path=${path} diff --git a/hadoop-ozone/dist/src/main/smoketest/ozone-lib/freon.robot b/hadoop-ozone/dist/src/main/smoketest/ozone-lib/freon.robot index b813c9ed411..8d10cc81e90 100644 --- a/hadoop-ozone/dist/src/main/smoketest/ozone-lib/freon.robot +++ b/hadoop-ozone/dist/src/main/smoketest/ozone-lib/freon.robot @@ -67,9 +67,3 @@ Freon OMBR [arguments] ${prefix}=ombg ${n}=1 ${threads}=1 ${args}=${EMPTY} ${result} = Execute ozone freon ombr ${OM_HA_PARAM} -t ${threads} -n${n} -p ${prefix} ${args} Should contain ${result} Successful executions: ${n} - -Freon DFSG - [arguments] ${prefix}=dfsg ${n}=1000 ${path}={EMPTY} ${threads}=1 ${sync}=HSYNC ${buffer}=1024 ${copy-buffer}=1024 ${size}=10240 ${args}=${EMPTY} - ${result} = Execute ozone freon dfsg -n ${n} --sync ${sync} -s ${size} --path ${path} --buffer ${buffer} --copy-buffer ${copy-buffer} -p ${prefix} -t ${threads} ${args} - Should contain ${result} Successful executions: ${n} - diff --git a/hadoop-ozone/dist/src/main/smoketest/ozone-lib/shell_tests.robot b/hadoop-ozone/dist/src/main/smoketest/ozone-lib/shell_tests.robot index 651cda016f2..22805efcb1b 100644 --- a/hadoop-ozone/dist/src/main/smoketest/ozone-lib/shell_tests.robot +++ b/hadoop-ozone/dist/src/main/smoketest/ozone-lib/shell_tests.robot @@ -56,11 +56,3 @@ Compare Key With Local File with Different File Compare Key With Local File if File Does Not Exist ${matches} = Compare Key With Local File o3://${OM_SERVICE_ID}/vol1/bucket/passwd /no-such-file Should Be Equal ${matches} ${FALSE} - -Rejects Put Key With Zero Expected Generation - ${output} = Execute and checkrc ozone sh key put --expectedGeneration 0 o3://${OM_SERVICE_ID}/vol1/bucket/passwd /etc/passwd 255 - Should Contain ${output} must be positive - -Rejects Put Key With Negative Expected Generation - ${output} = Execute and checkrc ozone sh key put --expectedGeneration -1 o3://${OM_SERVICE_ID}/vol1/bucket/passwd /etc/passwd 255 - Should Contain ${output} must be positive diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot b/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot index d62a217e606..dd06d55f75f 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot @@ -107,11 +107,6 @@ Test Multipart Upload Complete ${part2Md5Sum} = Execute md5sum /tmp/part2 | awk '{print $1}' Should Be Equal As Strings ${eTag2} ${part2Md5Sum} -#complete multipart upload without any parts - ${result} = Execute AWSS3APICli and checkrc complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 255 - Should contain ${result} InvalidRequest - Should contain ${result} must specify at least one part - #complete multipart upload ${result} = Execute AWSS3APICli complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 --multipart-upload 'Parts=[{ETag=${eTag1},PartNumber=1},{ETag=${eTag2},PartNumber=2}]' Should contain ${result} ${BUCKET} diff --git a/hadoop-ozone/dist/src/shell/ozone/ozone b/hadoop-ozone/dist/src/shell/ozone/ozone index 0d005b3bd78..22ceed9ed3c 100755 --- a/hadoop-ozone/dist/src/shell/ozone/ozone +++ b/hadoop-ozone/dist/src/shell/ozone/ozone @@ -61,7 +61,6 @@ function ozone_usage ozone_add_subcommand "debug" client "Ozone debug tool" ozone_add_subcommand "repair" client "Ozone repair tool" ozone_add_subcommand "checknative" client "checks if native libraries are loaded" - ozone_add_subcommand "ratis" client "Ozone ratis tool" ozone_generate_usage "${OZONE_SHELL_EXECNAME}" false } @@ -232,10 +231,6 @@ function ozonecmd_case OZONE_CLASSNAME=org.apache.hadoop.ozone.shell.checknative.CheckNative OZONE_RUN_ARTIFACT_NAME="ozone-tools" ;; - ratis) - OZONE_CLASSNAME=org.apache.hadoop.ozone.shell.OzoneRatis - OZONE_RUN_ARTIFACT_NAME="ozone-tools" - ;; *) OZONE_CLASSNAME="${subcmd}" if ! ozone_validate_classname "${OZONE_CLASSNAME}"; then diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java index 69242d2b1f0..78b67f99f1e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java @@ -187,8 +187,6 @@ void init() throws Exception { conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, omRatisEnabled); conf.setBoolean(OZONE_ACL_ENABLED, true); - conf.setBoolean(OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, true); - conf.setBoolean("ozone.client.hbase.enhancements.allowed", true); conf.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true); conf.set(OzoneConfigKeys.OZONE_OM_LEASE_SOFT_LIMIT, "0s"); if (!bucketLayout.equals(FILE_SYSTEM_OPTIMIZED)) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java index cfc9029019a..32a785a95a9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java @@ -237,8 +237,6 @@ void initClusterAndEnv() throws IOException, InterruptedException, TimeoutExcept conf.setFloat(FS_TRASH_INTERVAL_KEY, TRASH_INTERVAL); conf.setFloat(FS_TRASH_CHECKPOINT_INTERVAL_KEY, TRASH_INTERVAL / 2); conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, omRatisEnabled); - conf.setBoolean(OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, true); - conf.setBoolean("ozone.client.hbase.enhancements.allowed", true); conf.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true); conf.set(OzoneConfigKeys.OZONE_OM_LEASE_SOFT_LIMIT, "0s"); if (bucketLayout == BucketLayout.FILE_SYSTEM_OPTIMIZED) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java index 8d161dedeb3..0abfb133654 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java @@ -18,11 +18,7 @@ package org.apache.hadoop.fs.ozone; -import java.util.List; -import java.util.Random; import java.util.concurrent.CompletableFuture; - -import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; @@ -36,16 +32,10 @@ import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.TestDataUtil; -import org.apache.hadoop.ozone.client.BucketArgs; -import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OmSnapshotManager; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.ratis.OzoneManagerDoubleBuffer; @@ -58,16 +48,12 @@ import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; -import org.mockito.ArgumentMatchers; -import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -78,8 +64,6 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.function.LongSupplier; -import static java.nio.charset.StandardCharsets.UTF_8; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -88,12 +72,6 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyInt; -import static org.mockito.ArgumentMatchers.anyList; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.when; /** * Directory deletion service test cases. @@ -119,7 +97,6 @@ public static void init() throws Exception { conf.setInt(OMConfigKeys.OZONE_PATH_DELETING_LIMIT_PER_TASK, 5); conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100, TimeUnit.MILLISECONDS); - conf.setTimeDuration(OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL, 1000, TimeUnit.MILLISECONDS); conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, omRatisEnabled); conf.setBoolean(OZONE_ACL_ENABLED, true); cluster = MiniOzoneCluster.newBuilder(conf) @@ -483,123 +460,6 @@ public void testDeleteFilesAndSubFiles() throws Exception { assertEquals(prevDeletedKeyCount + 5, currentDeletedKeyCount); } - private void createFileKey(OzoneBucket bucket, String key) - throws Exception { - byte[] value = RandomStringUtils.randomAscii(10240).getBytes(UTF_8); - OzoneOutputStream fileKey = bucket.createKey(key, value.length); - fileKey.write(value); - fileKey.close(); - } - - /* - * Create key d1/k1 - * Create snap1 - * Rename dir1 to dir2 - * Delete dir2 - * Wait for KeyDeletingService to start processing deleted key k2 - * Create snap2 by making the KeyDeletingService thread wait till snap2 is flushed - * Resume KeyDeletingService thread. - * Read d1 from snap1. - */ - @Test - public void testAOSKeyDeletingWithSnapshotCreateParallelExecution() - throws Exception { - OMMetadataManager omMetadataManager = cluster.getOzoneManager().getMetadataManager(); - Table snapshotInfoTable = omMetadataManager.getSnapshotInfoTable(); - Table deletedDirTable = omMetadataManager.getDeletedDirTable(); - Table renameTable = omMetadataManager.getSnapshotRenamedTable(); - cluster.getOzoneManager().getKeyManager().getSnapshotDeletingService().shutdown(); - DirectoryDeletingService dirDeletingService = cluster.getOzoneManager().getKeyManager().getDirDeletingService(); - // Suspend KeyDeletingService - dirDeletingService.suspend(); - GenericTestUtils.waitFor(() -> !dirDeletingService.isRunningOnAOS(), 1000, 10000); - Random random = new Random(); - final String testVolumeName = "volume" + random.nextInt(); - final String testBucketName = "bucket" + random.nextInt(); - // Create Volume and Buckets - ObjectStore store = client.getObjectStore(); - store.createVolume(testVolumeName); - OzoneVolume volume = store.getVolume(testVolumeName); - volume.createBucket(testBucketName, - BucketArgs.newBuilder().setBucketLayout(BucketLayout.FILE_SYSTEM_OPTIMIZED).build()); - OzoneBucket bucket = volume.getBucket(testBucketName); - - OzoneManager ozoneManager = Mockito.spy(cluster.getOzoneManager()); - OmSnapshotManager omSnapshotManager = Mockito.spy(ozoneManager.getOmSnapshotManager()); - when(ozoneManager.getOmSnapshotManager()).thenAnswer(i -> omSnapshotManager); - DirectoryDeletingService service = Mockito.spy(new DirectoryDeletingService(1000, TimeUnit.MILLISECONDS, 1000, - ozoneManager, - cluster.getConf())); - service.shutdown(); - final int initialSnapshotCount = - (int) cluster.getOzoneManager().getMetadataManager().countRowsInTable(snapshotInfoTable); - final int initialDeletedCount = (int) omMetadataManager.countRowsInTable(deletedDirTable); - final int initialRenameCount = (int) omMetadataManager.countRowsInTable(renameTable); - String snap1 = "snap1"; - String snap2 = "snap2"; - createFileKey(bucket, "dir1/key1"); - store.createSnapshot(testVolumeName, testBucketName, "snap1"); - bucket.renameKey("dir1", "dir2"); - OmKeyArgs omKeyArgs = new OmKeyArgs.Builder() - .setVolumeName(testVolumeName) - .setBucketName(testBucketName) - .setKeyName("dir2").build(); - long objectId = store.getClientProxy().getOzoneManagerClient().getKeyInfo(omKeyArgs, false) - .getKeyInfo().getObjectID(); - long volumeId = omMetadataManager.getVolumeId(testVolumeName); - long bucketId = omMetadataManager.getBucketId(testVolumeName, testBucketName); - String deletePathKey = omMetadataManager.getOzoneDeletePathKey(objectId, - omMetadataManager.getOzonePathKey(volumeId, - bucketId, bucketId, "dir2")); - bucket.deleteDirectory("dir2", true); - - - assertTableRowCount(deletedDirTable, initialDeletedCount + 1); - assertTableRowCount(renameTable, initialRenameCount + 1); - Mockito.doAnswer(i -> { - List purgePathRequestList = i.getArgument(5); - for (OzoneManagerProtocolProtos.PurgePathRequest purgeRequest : purgePathRequestList) { - Assertions.assertNotEquals(deletePathKey, purgeRequest.getDeletedDir()); - } - return i.callRealMethod(); - }).when(service).optimizeDirDeletesAndSubmitRequest(anyLong(), anyLong(), anyLong(), - anyLong(), anyList(), anyList(), eq(null), anyLong(), anyInt(), Mockito.any(), any()); - - Mockito.doAnswer(i -> { - store.createSnapshot(testVolumeName, testBucketName, snap2); - GenericTestUtils.waitFor(() -> { - try { - SnapshotInfo snapshotInfo = store.getClientProxy().getOzoneManagerClient() - .getSnapshotInfo(testVolumeName, testBucketName, snap2); - - return OmSnapshotManager.areSnapshotChangesFlushedToDB(cluster.getOzoneManager().getMetadataManager(), - snapshotInfo); - } catch (IOException e) { - throw new RuntimeException(e); - } - }, 1000, 100000); - GenericTestUtils.waitFor(() -> { - try { - return renameTable.get(omMetadataManager.getRenameKey(testVolumeName, testBucketName, objectId)) == null; - } catch (IOException e) { - throw new RuntimeException(e); - } - }, 1000, 10000); - return i.callRealMethod(); - }).when(omSnapshotManager).getSnapshot(ArgumentMatchers.eq(testVolumeName), ArgumentMatchers.eq(testBucketName), - ArgumentMatchers.eq(snap1)); - assertTableRowCount(snapshotInfoTable, initialSnapshotCount + 1); - service.runPeriodicalTaskNow(); - service.runPeriodicalTaskNow(); - assertTableRowCount(snapshotInfoTable, initialSnapshotCount + 2); - store.deleteSnapshot(testVolumeName, testBucketName, snap2); - service.runPeriodicalTaskNow(); - store.deleteSnapshot(testVolumeName, testBucketName, snap1); - cluster.restartOzoneManager(); - assertTableRowCount(cluster.getOzoneManager().getMetadataManager().getSnapshotInfoTable(), initialSnapshotCount); - dirDeletingService.resume(); - } - @Test public void testDirDeletedTableCleanUpForSnapshot() throws Exception { Table deletedDirTable = diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java index c39e24571a8..49b515d53c5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java @@ -70,7 +70,6 @@ import org.apache.hadoop.fs.StreamCapabilities; import org.apache.hadoop.ozone.ClientConfigForTesting; -import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; @@ -84,9 +83,7 @@ import org.apache.hadoop.ozone.client.io.KeyOutputStream; import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.container.TestHelper; import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler; -import org.apache.hadoop.ozone.container.keyvalue.impl.AbstractTestChunkManager; import org.apache.hadoop.ozone.container.keyvalue.impl.BlockManagerImpl; import org.apache.hadoop.ozone.container.metadata.AbstractDatanodeStore; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -96,7 +93,6 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.service.OpenKeyCleanupService; import org.apache.hadoop.security.UserGroupInformation; @@ -181,8 +177,6 @@ public static void init() throws Exception { CONF.setBoolean(OZONE_OM_RATIS_ENABLE_KEY, false); CONF.set(OZONE_DEFAULT_BUCKET_LAYOUT, layout.name()); - CONF.setBoolean(OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, true); - CONF.setBoolean("ozone.client.hbase.enhancements.allowed", true); CONF.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true); CONF.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); // Reduce KeyDeletingService interval @@ -349,8 +343,6 @@ public void testEmptyHsync() throws Exception { } @Test - // Making this the second test to be run to avoid lingering block files from previous tests - @Order(2) public void testKeyHSyncThenClose() throws Exception { // Check that deletedTable should not have keys with the same block as in // keyTable's when a key is hsync()'ed then close()'d. @@ -366,16 +358,10 @@ public void testKeyHSyncThenClose() throws Exception { String data = "random data"; final Path file = new Path(dir, "file-hsync-then-close"); try (FileSystem fs = FileSystem.get(CONF)) { - String chunkPath; try (FSDataOutputStream outputStream = fs.create(file, true)) { outputStream.write(data.getBytes(UTF_8), 0, data.length()); outputStream.hsync(); - // locate the container chunk path on the first DataNode. - chunkPath = getChunkPathOnDataNode(outputStream); - assertFalse(AbstractTestChunkManager.checkChunkFilesClosed(chunkPath)); } - // After close, the chunk file should be closed. - assertTrue(AbstractTestChunkManager.checkChunkFilesClosed(chunkPath)); } OzoneManager ozoneManager = cluster.getOzoneManager(); @@ -401,22 +387,6 @@ public void testKeyHSyncThenClose() throws Exception { } } - private static String getChunkPathOnDataNode(FSDataOutputStream outputStream) - throws IOException { - String chunkPath; - KeyOutputStream groupOutputStream = - ((OzoneFSOutputStream) outputStream.getWrappedStream()).getWrappedOutputStream().getKeyOutputStream(); - List locationInfoList = - groupOutputStream.getLocationInfoList(); - OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0); - HddsDatanodeService dn = TestHelper.getDatanodeService(omKeyLocationInfo, cluster); - chunkPath = dn.getDatanodeStateMachine() - .getContainer().getContainerSet() - .getContainer(omKeyLocationInfo.getContainerID()). - getContainerData().getChunksPath(); - return chunkPath; - } - @ParameterizedTest @ValueSource(booleans = {false, true}) public void testO3fsHSync(boolean incrementalChunkList) throws Exception { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSyncUpgrade.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSyncUpgrade.java index 624b5e02c14..917ce57fe7d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSyncUpgrade.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSyncUpgrade.java @@ -107,8 +107,6 @@ public void init() throws Exception { conf.setBoolean(OZONE_OM_RATIS_ENABLE_KEY, false); conf.set(OZONE_DEFAULT_BUCKET_LAYOUT, layout.name()); - conf.setBoolean(OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, true); - conf.setBoolean("ozone.client.hbase.enhancements.allowed", true); conf.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true); conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); // Reduce KeyDeletingService interval diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java index 6a3a0eb5b67..a4a9bcff470 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java @@ -120,8 +120,6 @@ public void init() throws IOException, InterruptedException, final BucketLayout layout = BucketLayout.FILE_SYSTEM_OPTIMIZED; conf.setBoolean(OZONE_OM_RATIS_ENABLE_KEY, false); - conf.setBoolean(OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, true); - conf.setBoolean("ozone.client.hbase.enhancements.allowed", true); conf.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true); conf.set(OZONE_DEFAULT_BUCKET_LAYOUT, layout.name()); conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/AbstractOzoneContractTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/AbstractOzoneContractTest.java index bce96251873..b79c9a870e4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/AbstractOzoneContractTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/AbstractOzoneContractTest.java @@ -47,7 +47,6 @@ import static org.apache.hadoop.fs.contract.ContractTestUtils.cleanup; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT; import static org.assertj.core.api.Assumptions.assumeThat; @@ -94,8 +93,6 @@ protected static OzoneConfiguration createBaseConfiguration() { conf.addResource(CONTRACT_XML); - conf.setBoolean(OZONE_HBASE_ENHANCEMENTS_ALLOWED, true); - conf.setBoolean("ozone.client.hbase.enhancements.allowed", true); conf.setBoolean(OZONE_FS_HSYNC_ENABLED, true); return conf; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerMXBean.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerMXBean.java index 4a9efceeb7b..75d860d951b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerMXBean.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerMXBean.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.ozone.test.GenericTestUtils; +import org.apache.ozone.test.tag.Flaky; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -61,15 +62,16 @@ public void init() * * @throws Exception */ + @Flaky("HDDS-11359") @Test public void testPipelineInfo() throws Exception { ObjectName bean = new ObjectName( "Hadoop:service=SCMPipelineManager,name=SCMPipelineManagerInfo"); + Map pipelineStateCount = cluster + .getStorageContainerManager().getPipelineManager().getPipelineInfo(); GenericTestUtils.waitFor(() -> { try { - Map pipelineStateCount = cluster - .getStorageContainerManager().getPipelineManager().getPipelineInfo(); final TabularData data = (TabularData) mbs.getAttribute( bean, "PipelineInfo"); for (Map.Entry entry : pipelineStateCount.entrySet()) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java index 6f79839cd02..c274d8fea30 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java @@ -61,7 +61,6 @@ import org.apache.hadoop.ozone.client.OzoneKeyDetails; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.SecretKeyTestClient; -import org.apache.hadoop.ozone.client.io.BlockOutputStreamEntry; import org.apache.hadoop.ozone.client.io.InsufficientLocationsException; import org.apache.hadoop.ozone.client.io.KeyOutputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; @@ -84,7 +83,6 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; import java.io.IOException; @@ -101,7 +99,6 @@ import java.util.SortedMap; import java.util.TreeMap; import java.util.UUID; -import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; import java.util.function.Function; import java.util.stream.Collectors; @@ -120,7 +117,6 @@ import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.params.provider.Arguments.arguments; /** * This class tests container commands on EC containers. @@ -617,33 +613,30 @@ private static byte[] getBytesWith(int singleDigitNumber, int total) { @ParameterizedTest @MethodSource("recoverableMissingIndexes") - void testECReconstructionCoordinatorWith(List missingIndexes, boolean triggerRetry) + void testECReconstructionCoordinatorWith(List missingIndexes) throws Exception { - testECReconstructionCoordinator(missingIndexes, 3, triggerRetry); + testECReconstructionCoordinator(missingIndexes, 3); } @ParameterizedTest @MethodSource("recoverableMissingIndexes") - void testECReconstructionCoordinatorWithPartialStripe(List missingIndexes, - boolean triggerRetry) throws Exception { - testECReconstructionCoordinator(missingIndexes, 1, triggerRetry); + void testECReconstructionCoordinatorWithPartialStripe(List missingIndexes) + throws Exception { + testECReconstructionCoordinator(missingIndexes, 1); } @ParameterizedTest @MethodSource("recoverableMissingIndexes") - void testECReconstructionCoordinatorWithFullAndPartialStripe(List missingIndexes, - boolean triggerRetry) throws Exception { - testECReconstructionCoordinator(missingIndexes, 4, triggerRetry); + void testECReconstructionCoordinatorWithFullAndPartialStripe(List missingIndexes) + throws Exception { + testECReconstructionCoordinator(missingIndexes, 4); } - static Stream recoverableMissingIndexes() { - Stream args = IntStream.rangeClosed(1, 5).mapToObj(i -> arguments(ImmutableList.of(i), true)); - Stream args1 = IntStream.rangeClosed(1, 5).mapToObj(i -> arguments(ImmutableList.of(i), false)); - Stream args2 = Stream.of(arguments(ImmutableList.of(2, 3), true), - arguments(ImmutableList.of(2, 4), true), arguments(ImmutableList.of(3, 5), true)); - Stream args3 = Stream.of(arguments(ImmutableList.of(2, 3), false), - arguments(ImmutableList.of(2, 4), false), arguments(ImmutableList.of(3, 5), false)); - return Stream.concat(Stream.concat(args, args1), Stream.concat(args2, args3)); + static Stream> recoverableMissingIndexes() { + return Stream + .concat(IntStream.rangeClosed(1, 5).mapToObj(ImmutableList::of), Stream + .of(ImmutableList.of(2, 3), ImmutableList.of(2, 4), + ImmutableList.of(3, 5), ImmutableList.of(4, 5))); } /** @@ -654,7 +647,7 @@ static Stream recoverableMissingIndexes() { public void testECReconstructionCoordinatorWithMissingIndexes135() { InsufficientLocationsException exception = assertThrows(InsufficientLocationsException.class, () -> { - testECReconstructionCoordinator(ImmutableList.of(1, 3, 5), 3, false); + testECReconstructionCoordinator(ImmutableList.of(1, 3, 5), 3); }); String expectedMessage = @@ -665,7 +658,7 @@ public void testECReconstructionCoordinatorWithMissingIndexes135() { } private void testECReconstructionCoordinator(List missingIndexes, - int numInputChunks, boolean triggerRetry) throws Exception { + int numInputChunks) throws Exception { ObjectStore objectStore = rpcClient.getObjectStore(); String keyString = UUID.randomUUID().toString(); String volumeName = UUID.randomUUID().toString(); @@ -674,7 +667,7 @@ private void testECReconstructionCoordinator(List missingIndexes, objectStore.getVolume(volumeName).createBucket(bucketName); OzoneVolume volume = objectStore.getVolume(volumeName); OzoneBucket bucket = volume.getBucket(bucketName); - createKeyAndWriteData(keyString, bucket, numInputChunks, triggerRetry); + createKeyAndWriteData(keyString, bucket, numInputChunks); try ( XceiverClientManager xceiverClientManager = @@ -786,7 +779,7 @@ private void testECReconstructionCoordinator(List missingIndexes, .getReplicationConfig(), cToken); assertEquals(blockDataArrList.get(i).length, reconstructedBlockData.length); - checkBlockDataWithRetry(blockDataArrList.get(i), reconstructedBlockData, triggerRetry); + checkBlockData(blockDataArrList.get(i), reconstructedBlockData); XceiverClientSpi client = xceiverClientManager.acquireClient( newTargetPipeline); try { @@ -807,7 +800,7 @@ private void testECReconstructionCoordinator(List missingIndexes, } private void createKeyAndWriteData(String keyString, OzoneBucket bucket, - int numChunks, boolean triggerRetry) throws IOException { + int numChunks) throws IOException { for (int i = 0; i < numChunks; i++) { inputChunks[i] = getBytesWith(i + 1, EC_CHUNK_SIZE); } @@ -816,48 +809,11 @@ private void createKeyAndWriteData(String keyString, OzoneBucket bucket, new HashMap<>())) { assertInstanceOf(KeyOutputStream.class, out.getOutputStream()); for (int i = 0; i < numChunks; i++) { - // We generally wait until the data is written to the last chunk - // before attempting to trigger CloseContainer. - // We use an asynchronous approach for this trigger, - // aiming to ensure that closing the container does not interfere with the write operation. - // However, this process often needs to be executed multiple times before it takes effect. - if (i == numChunks - 1 && triggerRetry) { - triggerRetryByCloseContainer(out); - } out.write(inputChunks[i]); } } } - private void triggerRetryByCloseContainer(OzoneOutputStream out) { - CompletableFuture.runAsync(() -> { - BlockOutputStreamEntry blockOutputStreamEntry = out.getKeyOutputStream().getStreamEntries().get(0); - BlockID entryBlockID = blockOutputStreamEntry.getBlockID(); - long entryContainerID = entryBlockID.getContainerID(); - Pipeline entryPipeline = blockOutputStreamEntry.getPipeline(); - Map replicaIndexes = entryPipeline.getReplicaIndexes(); - try { - for (Map.Entry entry : replicaIndexes.entrySet()) { - DatanodeDetails key = entry.getKey(); - Integer value = entry.getValue(); - XceiverClientManager xceiverClientManager = new XceiverClientManager(config); - Token cToken = containerTokenGenerator - .generateToken(ANY_USER, ContainerID.valueOf(entryContainerID)); - XceiverClientSpi client = xceiverClientManager.acquireClient( - createSingleNodePipeline(entryPipeline, key, value)); - try { - ContainerProtocolCalls.closeContainer(client, entryContainerID, cToken.encodeToUrlString()); - } finally { - xceiverClientManager.releaseClient(client, false); - } - break; - } - } catch (Exception e) { - throw new RuntimeException(e); - } - }); - } - @Test public void testECReconstructionCoordinatorShouldCleanupContainersOnFailure() throws Exception { @@ -870,7 +826,7 @@ public void testECReconstructionCoordinatorShouldCleanupContainersOnFailure() objectStore.getVolume(volumeName).createBucket(bucketName); OzoneVolume volume = objectStore.getVolume(volumeName); OzoneBucket bucket = volume.getBucket(bucketName); - createKeyAndWriteData(keyString, bucket, 3, false); + createKeyAndWriteData(keyString, bucket, 3); OzoneKeyDetails key = bucket.getKey(keyString); long conID = key.getOzoneKeyLocations().get(0).getContainerID(); @@ -944,25 +900,6 @@ private void closeContainer(long conID) HddsProtos.LifeCycleEvent.CLOSE); } - private void checkBlockDataWithRetry( - org.apache.hadoop.ozone.container.common.helpers.BlockData[] blockData, - org.apache.hadoop.ozone.container.common.helpers.BlockData[] - reconstructedBlockData, boolean triggerRetry) { - if (triggerRetry) { - for (int i = 0; i < reconstructedBlockData.length; i++) { - assertEquals(blockData[i].getBlockID(), reconstructedBlockData[i].getBlockID()); - List oldBlockDataChunks = blockData[i].getChunks(); - List newBlockDataChunks = reconstructedBlockData[i].getChunks(); - for (int j = 0; j < newBlockDataChunks.size(); j++) { - ContainerProtos.ChunkInfo chunkInfo = oldBlockDataChunks.get(j); - assertEquals(chunkInfo, newBlockDataChunks.get(j)); - } - } - return; - } - checkBlockData(blockData, reconstructedBlockData); - } - private void checkBlockData( org.apache.hadoop.ozone.container.common.helpers.BlockData[] blockData, org.apache.hadoop.ozone.container.common.helpers.BlockData[] @@ -1030,7 +967,8 @@ public static void prepareData(int[][] ranges) throws Exception { out.write(values[i]); } } - +// List containerIDs = +// new ArrayList<>(scm.getContainerManager().getContainerIDs()); List containerIDs = scm.getContainerManager().getContainers() .stream() diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java index 8a219514d34..cc1f93fbc1e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java @@ -93,7 +93,6 @@ private void addPropertiesNotInXml() { OMConfigKeys.OZONE_FS_TRASH_INTERVAL_KEY, OMConfigKeys.OZONE_FS_TRASH_CHECKPOINT_INTERVAL_KEY, OMConfigKeys.OZONE_OM_S3_GPRC_SERVER_ENABLED, - OMConfigKeys.OZONE_OM_FEATURES_DISABLED, OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS_NATIVE, OzoneConfigKeys.OZONE_CLIENT_REQUIRED_OM_VERSION_MIN_KEY, OzoneConfigKeys.OZONE_RECOVERING_CONTAINER_SCRUBBING_SERVICE_WORKERS, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java index 3063e2587e4..766ed09bccd 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java @@ -126,8 +126,6 @@ protected static void init(boolean zeroCopyEnabled) throws Exception { zeroCopyEnabled); conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); // "Enable" hsync to verify that hsync would be blocked by ECKeyOutputStream - conf.setBoolean(OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, true); - conf.setBoolean("ozone.client.hbase.enhancements.allowed", true); conf.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true); ClientConfigForTesting.newBuilder(StorageUnit.BYTES) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java index eb3709c9a85..8810bab5190 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java @@ -41,7 +41,6 @@ import org.apache.hadoop.hdds.scm.storage.RatisBlockOutputStream; import org.apache.hadoop.ozone.ClientConfigForTesting; import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientFactory; @@ -49,6 +48,7 @@ import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.container.TestHelper; +import org.apache.ozone.test.tag.Flaky; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.TestInstance; @@ -96,9 +96,6 @@ static MiniOzoneCluster createCluster() throws IOException, conf.setStorageSize(OZONE_SCM_BLOCK_SIZE, 4, StorageUnit.MB); conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 3); - conf.setBoolean(OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, true); - conf.setBoolean("ozone.client.hbase.enhancements.allowed", true); - DatanodeRatisServerConfig ratisServerConfig = conf.getObject(DatanodeRatisServerConfig.class); ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); @@ -665,6 +662,7 @@ void testWriteExactlyMaxFlushSize(boolean flushDelay, boolean enablePiggybacking @ParameterizedTest @MethodSource("clientParameters") + @Flaky("HDDS-11325") void testWriteMoreThanMaxFlushSize(boolean flushDelay, boolean enablePiggybacking) throws Exception { OzoneClientConfig config = newClientConfig(cluster.getConf(), flushDelay, enablePiggybacking); try (OzoneClient client = newClient(cluster.getConf(), config)) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java index 5e5461634c0..f823add57bd 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java @@ -93,6 +93,7 @@ private static Stream clientParameters() { @ParameterizedTest @MethodSource("clientParameters") + @Flaky("HDDS-11325") void testContainerClose(boolean flushDelay, boolean enablePiggybacking) throws Exception { OzoneClientConfig config = newClientConfig(cluster.getConf(), flushDelay, enablePiggybacking); try (OzoneClient client = newClient(cluster.getConf(), config)) { @@ -385,8 +386,7 @@ private void testWriteMoreThanMaxFlushSize(OzoneClient client) assertInstanceOf(RatisBlockOutputStream.class, keyOutputStream.getStreamEntries().get(0).getOutputStream()); - assertThat(blockOutputStream.getBufferPool().getSize()) - .isLessThanOrEqualTo(4); + assertEquals(4, blockOutputStream.getBufferPool().getSize()); assertEquals(dataLength, blockOutputStream.getWrittenDataLength()); assertEquals(400, blockOutputStream.getTotalDataFlushedLength()); @@ -442,8 +442,7 @@ private void testExceptionDuringClose(OzoneClient client) throws Exception { assertInstanceOf(RatisBlockOutputStream.class, keyOutputStream.getStreamEntries().get(0).getOutputStream()); - assertThat(blockOutputStream.getBufferPool().getSize()) - .isLessThanOrEqualTo(2); + assertEquals(2, blockOutputStream.getBufferPool().getSize()); assertEquals(dataLength, blockOutputStream.getWrittenDataLength()); assertEquals(0, blockOutputStream.getTotalDataFlushedLength()); @@ -456,8 +455,7 @@ private void testExceptionDuringClose(OzoneClient client) throws Exception { // Since the data in the buffer is already flushed, flush here will have // no impact on the counters and data structures - assertThat(blockOutputStream.getBufferPool().getSize()) - .isLessThanOrEqualTo(2); + assertEquals(2, blockOutputStream.getBufferPool().getSize()); assertEquals(dataLength, blockOutputStream.getWrittenDataLength()); assertEquals(dataLength, blockOutputStream.getTotalDataFlushedLength()); @@ -508,10 +506,9 @@ private void testWatchForCommitWithSingleNodeRatis(OzoneClient client) keyOutputStream.getStreamEntries().get(0).getOutputStream()); // we have just written data more than flush Size(2 chunks), at this time - // buffer pool will have up to 4 buffers allocated worth of chunk size + // buffer pool will have 4 buffers allocated worth of chunk size - assertThat(blockOutputStream.getBufferPool().getSize()) - .isLessThanOrEqualTo(4); + assertEquals(4, blockOutputStream.getBufferPool().getSize()); // writtenDataLength as well flushedDataLength will be updated here assertEquals(dataLength, blockOutputStream.getWrittenDataLength()); @@ -534,8 +531,7 @@ private void testWatchForCommitWithSingleNodeRatis(OzoneClient client) // Since the data in the buffer is already flushed, flush here will have // no impact on the counters and data structures - assertThat(blockOutputStream.getBufferPool().getSize()) - .isLessThanOrEqualTo(4); + assertEquals(4, blockOutputStream.getBufferPool().getSize()); assertEquals(dataLength, blockOutputStream.getWrittenDataLength()); assertEquals(dataLength, blockOutputStream.getTotalDataFlushedLength()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java index b2766599ae4..958a37380cf 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java @@ -121,8 +121,6 @@ public static void init() throws Exception { // constructed. conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT, OMConfigKeys.OZONE_BUCKET_LAYOUT_OBJECT_STORE); - conf.setBoolean(OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, true); - conf.setBoolean("ozone.client.hbase.enhancements.allowed", true); conf.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH, keyProviderUri); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java index a4327a49bfa..7af0b5f9aa1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java @@ -170,18 +170,6 @@ private static Stream scanTestCases() { Named.of("Invalid EndKey key9", Arrays.asList("--endkey", "key9")), Named.of("Expect key1-key5", Pair.of("key1", "key6")) ), - Arguments.of( - Named.of(KEY_TABLE, Pair.of(KEY_TABLE, false)), - Named.of("Default", Pair.of(0, "")), - Named.of("Filter key3", Arrays.asList("--filter", "keyName:equals:key3")), - Named.of("Expect key3", Pair.of("key3", "key4")) - ), - Arguments.of( - Named.of(KEY_TABLE, Pair.of(KEY_TABLE, false)), - Named.of("Default", Pair.of(0, "")), - Named.of("Filter invalid key", Arrays.asList("--filter", "keyName:equals:key9")), - Named.of("Expect key1-key3", null) - ), Arguments.of( Named.of(BLOCK_DATA + " V3", Pair.of(BLOCK_DATA, true)), Named.of("Default", Pair.of(0, "")), diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLeaseRecoverer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLeaseRecoverer.java index 29f91821ebd..c24cf748ddb 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLeaseRecoverer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLeaseRecoverer.java @@ -70,8 +70,6 @@ public class TestLeaseRecoverer { @BeforeAll public static void init() throws Exception { conf = new OzoneConfiguration(); - conf.setBoolean(OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, true); - conf.setBoolean("ozone.client.hbase.enhancements.allowed", true); conf.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true); conf.set(OzoneConfigKeys.OZONE_OM_LEASE_SOFT_LIMIT, "0s"); OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java index 0481ee4a867..4619af1baa2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java @@ -338,6 +338,7 @@ public void testKeyOps() throws Exception { long initialNumKeyLookup = getLongCounter("NumKeyLookup", omMetrics); long initialNumKeyDeletes = getLongCounter("NumKeyDeletes", omMetrics); long initialNumKeyLists = getLongCounter("NumKeyLists", omMetrics); + long initialNumTrashKeyLists = getLongCounter("NumTrashKeyLists", omMetrics); long initialNumKeys = getLongCounter("NumKeys", omMetrics); long initialNumInitiateMultipartUploads = getLongCounter("NumInitiateMultipartUploads", omMetrics); @@ -345,6 +346,7 @@ public void testKeyOps() throws Exception { long initialNumKeyAllocateFails = getLongCounter("NumKeyAllocateFails", omMetrics); long initialNumKeyLookupFails = getLongCounter("NumKeyLookupFails", omMetrics); long initialNumKeyDeleteFails = getLongCounter("NumKeyDeleteFails", omMetrics); + long initialNumTrashKeyListFails = getLongCounter("NumTrashKeyListFails", omMetrics); long initialNumInitiateMultipartUploadFails = getLongCounter("NumInitiateMultipartUploadFails", omMetrics); long initialNumBlockAllocationFails = getLongCounter("NumBlockAllocationFails", omMetrics); long initialNumKeyListFails = getLongCounter("NumKeyListFails", omMetrics); @@ -354,15 +356,16 @@ public void testKeyOps() throws Exception { TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName, BucketLayout.LEGACY); OmKeyArgs keyArgs = createKeyArgs(volumeName, bucketName, RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE)); - doKeyOps(keyArgs); // This will perform 7 different operations on the key + doKeyOps(keyArgs); omMetrics = getMetrics("OMMetrics"); - assertEquals(initialNumKeyOps + 7, getLongCounter("NumKeyOps", omMetrics)); + assertEquals(initialNumKeyOps + 8, getLongCounter("NumKeyOps", omMetrics)); assertEquals(initialNumKeyAllocate + 1, getLongCounter("NumKeyAllocate", omMetrics)); assertEquals(initialNumKeyLookup + 1, getLongCounter("NumKeyLookup", omMetrics)); assertEquals(initialNumKeyDeletes + 1, getLongCounter("NumKeyDeletes", omMetrics)); assertEquals(initialNumKeyLists + 1, getLongCounter("NumKeyLists", omMetrics)); + assertEquals(initialNumTrashKeyLists + 1, getLongCounter("NumTrashKeyLists", omMetrics)); assertEquals(initialNumKeys, getLongCounter("NumKeys", omMetrics)); assertEquals(initialNumInitiateMultipartUploads + 1, getLongCounter("NumInitiateMultipartUploads", omMetrics)); @@ -406,6 +409,8 @@ public void testKeyOps() throws Exception { doThrow(exception).when(mockKm).lookupKey(any(), any(), any()); doThrow(exception).when(mockKm).listKeys( any(), any(), any(), any(), anyInt()); + doThrow(exception).when(mockKm).listTrash( + any(), any(), any(), any(), anyInt()); OmMetadataReader omMetadataReader = (OmMetadataReader) ozoneManager.getOmMetadataReader().get(); HddsWhiteboxTestUtils.setInternalState( @@ -421,17 +426,19 @@ public void testKeyOps() throws Exception { doKeyOps(keyArgs); omMetrics = getMetrics("OMMetrics"); - assertEquals(initialNumKeyOps + 28, getLongCounter("NumKeyOps", omMetrics)); + assertEquals(initialNumKeyOps + 31, getLongCounter("NumKeyOps", omMetrics)); assertEquals(initialNumKeyAllocate + 6, getLongCounter("NumKeyAllocate", omMetrics)); assertEquals(initialNumKeyLookup + 3, getLongCounter("NumKeyLookup", omMetrics)); assertEquals(initialNumKeyDeletes + 4, getLongCounter("NumKeyDeletes", omMetrics)); assertEquals(initialNumKeyLists + 3, getLongCounter("NumKeyLists", omMetrics)); + assertEquals(initialNumTrashKeyLists + 3, getLongCounter("NumTrashKeyLists", omMetrics)); assertEquals(initialNumInitiateMultipartUploads + 3, getLongCounter("NumInitiateMultipartUploads", omMetrics)); assertEquals(initialNumKeyAllocateFails + 1, getLongCounter("NumKeyAllocateFails", omMetrics)); assertEquals(initialNumKeyLookupFails + 1, getLongCounter("NumKeyLookupFails", omMetrics)); assertEquals(initialNumKeyDeleteFails + 1, getLongCounter("NumKeyDeleteFails", omMetrics)); assertEquals(initialNumKeyListFails + 1, getLongCounter("NumKeyListFails", omMetrics)); + assertEquals(initialNumTrashKeyListFails + 1, getLongCounter("NumTrashKeyListFails", omMetrics)); assertEquals(initialNumInitiateMultipartUploadFails + 1, getLongCounter( "NumInitiateMultipartUploadFails", omMetrics)); assertEquals(initialNumKeys + 2, getLongCounter("NumKeys", omMetrics)); @@ -836,6 +843,12 @@ private void doKeyOps(OmKeyArgs keyArgs) { } catch (IOException ignored) { } + try { + ozoneManager.listTrash(keyArgs.getVolumeName(), + keyArgs.getBucketName(), null, null, 0); + } catch (IOException ignored) { + } + try { writeClient.deleteKey(keyArgs); } catch (IOException ignored) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java index a173bd9222e..abc21ed4351 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java @@ -80,7 +80,6 @@ import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.Mockito.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doNothing; @@ -234,7 +233,7 @@ public void setUp() throws IOException { ozoneManager.getMetadataManager().getMetaTable().put( OzoneConsts.RANGER_OZONE_SERVICE_VERSION_KEY, String.valueOf(v)); return null; - }).when(omRatisServer).submitRequest(any(), any(), anyLong()); + }).when(omRatisServer).submitRequest(any(), any()); } catch (ServiceException e) { throw new RuntimeException(e); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java index 9a6bca29b88..c123675565a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java @@ -204,7 +204,11 @@ private void init() throws Exception { conf.setBoolean(OZONE_OM_ENABLE_FILESYSTEM_PATHS, enabledFileSystemPaths); conf.set(OZONE_DEFAULT_BUCKET_LAYOUT, bucketLayout.name()); conf.setBoolean(OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF, forceFullSnapshotDiff); - conf.setBoolean(OZONE_OM_SNAPSHOT_DIFF_DISABLE_NATIVE_LIBS, disableNativeDiff); + conf.setBoolean(OZONE_OM_SNAPSHOT_DIFF_DISABLE_NATIVE_LIBS, + disableNativeDiff); + conf.setBoolean(OZONE_OM_ENABLE_FILESYSTEM_PATHS, enabledFileSystemPaths); + conf.set(OZONE_DEFAULT_BUCKET_LAYOUT, bucketLayout.name()); + conf.setBoolean(OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF, forceFullSnapshotDiff); conf.setEnum(HDDS_DB_PROFILE, DBProfile.TEST); // Enable filesystem snapshot feature for the test regardless of the default conf.setBoolean(OMConfigKeys.OZONE_FILESYSTEM_SNAPSHOT_ENABLED_KEY, true); @@ -1477,8 +1481,10 @@ public void testSnapDiffCancel() throws Exception { String toSnapshotTableKey = SnapshotInfo.getTableKey(volumeName, bucketName, toSnapName); - UUID fromSnapshotID = SnapshotUtils.getSnapshotInfo(ozoneManager, fromSnapshotTableKey).getSnapshotId(); - UUID toSnapshotID = SnapshotUtils.getSnapshotInfo(ozoneManager, toSnapshotTableKey).getSnapshotId(); + UUID fromSnapshotID = ozoneManager.getOmSnapshotManager() + .getSnapshotInfo(fromSnapshotTableKey).getSnapshotId(); + UUID toSnapshotID = ozoneManager.getOmSnapshotManager() + .getSnapshotInfo(toSnapshotTableKey).getSnapshotId(); // Construct SnapshotDiffJob table key. String snapDiffJobKey = fromSnapshotID + DELIMITER + toSnapshotID; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerHASnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerHASnapshot.java index f178d00daa7..341b5b78c60 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerHASnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerHASnapshot.java @@ -21,12 +21,10 @@ import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.IOUtils; -import org.apache.hadoop.hdds.utils.db.RDBCheckpointUtils; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl; -import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.TestDataUtil; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneBucket; @@ -36,27 +34,20 @@ import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; -import org.apache.hadoop.ozone.om.ratis.OzoneManagerDoubleBuffer; import org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse; -import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; -import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Objects; import java.util.UUID; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; -import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPath; import static org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.JobStatus.DONE; import static org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.JobStatus.IN_PROGRESS; import static org.apache.ozone.test.LambdaTestUtils.await; @@ -81,8 +72,6 @@ public class TestOzoneManagerHASnapshot { public static void staticInit() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); conf.setBoolean(OMConfigKeys.OZONE_FILESYSTEM_SNAPSHOT_ENABLED_KEY, true); - conf.setTimeDuration(OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 1, TimeUnit.SECONDS); - conf.setTimeDuration(OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL, 1, TimeUnit.SECONDS); cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId("om-service-test") @@ -276,97 +265,4 @@ private void createFileKey(OzoneBucket bucket, String keyName) fileKey.write(value); } } - - /** - * This is to simulate HDDS-11152 scenario. In which a follower's doubleBuffer is lagging and accumulates purgeKey - * and purgeSnapshot in same batch. - */ - @Test - public void testKeyAndSnapshotDeletionService() throws IOException, InterruptedException, TimeoutException { - OzoneManager omLeader = cluster.getOMLeader(); - OzoneManager omFollower; - - if (omLeader != cluster.getOzoneManager(0)) { - omFollower = cluster.getOzoneManager(0); - } else { - omFollower = cluster.getOzoneManager(1); - } - - int numKeys = 5; - List keys = new ArrayList<>(); - for (int i = 0; i < numKeys; i++) { - String keyName = "key-" + RandomStringUtils.randomNumeric(10); - createFileKey(ozoneBucket, keyName); - keys.add(keyName); - } - - // Stop the key deletion service so that deleted keys get trapped in the snapshots. - omLeader.getKeyManager().getDeletingService().suspend(); - // Stop the snapshot deletion service so that deleted keys get trapped in the snapshots. - omLeader.getKeyManager().getSnapshotDeletingService().suspend(); - - // Delete half of the keys - for (int i = 0; i < numKeys / 2; i++) { - ozoneBucket.deleteKey(keys.get(i)); - } - - String snapshotName = "snap-" + RandomStringUtils.randomNumeric(10); - createSnapshot(volumeName, bucketName, snapshotName); - - store.deleteSnapshot(volumeName, bucketName, snapshotName); - - // Pause double buffer on follower node to accumulate all the key purge, snapshot delete and purge transactions. - omFollower.getOmRatisServer().getOmStateMachine().getOzoneManagerDoubleBuffer().stopDaemon(); - - long keyDeleteServiceCount = omLeader.getKeyManager().getDeletingService().getRunCount().get(); - omLeader.getKeyManager().getDeletingService().resume(); - - GenericTestUtils.waitFor( - () -> omLeader.getKeyManager().getDeletingService().getRunCount().get() > keyDeleteServiceCount, - 1000, 60000); - - long snapshotDeleteServiceCount = omLeader.getKeyManager().getSnapshotDeletingService().getRunCount().get(); - omLeader.getKeyManager().getSnapshotDeletingService().resume(); - - GenericTestUtils.waitFor( - () -> omLeader.getKeyManager().getSnapshotDeletingService().getRunCount().get() > snapshotDeleteServiceCount, - 1000, 60000); - - String tableKey = SnapshotInfo.getTableKey(volumeName, bucketName, snapshotName); - checkSnapshotIsPurgedFromDB(omLeader, tableKey); - - // Resume the DoubleBuffer and flush the pending transactions. - OzoneManagerDoubleBuffer omDoubleBuffer = - omFollower.getOmRatisServer().getOmStateMachine().getOzoneManagerDoubleBuffer(); - omDoubleBuffer.resume(); - CompletableFuture.supplyAsync(() -> { - omDoubleBuffer.flushTransactions(); - return null; - }); - omDoubleBuffer.awaitFlush(); - checkSnapshotIsPurgedFromDB(omFollower, tableKey); - } - - private void createSnapshot(String volName, String buckName, String snapName) throws IOException { - store.createSnapshot(volName, buckName, snapName); - - String tableKey = SnapshotInfo.getTableKey(volName, buckName, snapName); - SnapshotInfo snapshotInfo = SnapshotUtils.getSnapshotInfo(cluster.getOMLeader(), tableKey); - String fileName = getSnapshotPath(cluster.getOMLeader().getConfiguration(), snapshotInfo); - File snapshotDir = new File(fileName); - if (!RDBCheckpointUtils.waitForCheckpointDirectoryExist(snapshotDir)) { - throw new IOException("Snapshot directory doesn't exist"); - } - } - - private void checkSnapshotIsPurgedFromDB(OzoneManager ozoneManager, String snapshotTableKey) - throws InterruptedException, TimeoutException { - GenericTestUtils.waitFor(() -> { - try { - return ozoneManager.getMetadataManager().getSnapshotInfoTable().get(snapshotTableKey) == null; - } catch (IOException e) { - throw new RuntimeException(e); - } - }, 1000, 60000); - } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingServiceIntegrationTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingService.java similarity index 52% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingServiceIntegrationTest.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingService.java index 254de072e05..be4ea69095b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingServiceIntegrationTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingService.java @@ -19,7 +19,6 @@ package org.apache.hadoop.ozone.om.snapshot; -import org.apache.commons.compress.utils.Lists; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -33,26 +32,20 @@ import org.apache.hadoop.ozone.client.BucketArgs; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.om.KeyManager; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OmSnapshot; -import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.SnapshotChainManager; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; -import org.apache.hadoop.ozone.om.service.DirectoryDeletingService; -import org.apache.hadoop.ozone.om.service.KeyDeletingService; import org.apache.hadoop.ozone.om.service.SnapshotDeletingService; import org.apache.ozone.test.GenericTestUtils; import org.apache.ozone.test.tag.Flaky; import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.MethodOrderer.OrderAnnotation; import org.junit.jupiter.api.Order; @@ -60,41 +53,25 @@ import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; -import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.nio.ByteBuffer; -import java.util.Collections; -import java.util.Iterator; import java.util.List; -import java.util.Map; import java.util.Objects; -import java.util.Random; import java.util.UUID; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; -import java.util.stream.Collectors; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_TIMEOUT; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_DEEP_CLEANING_ENABLED; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.mockito.ArgumentMatchers.anyBoolean; -import static org.mockito.ArgumentMatchers.anyInt; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.when; /** * Test Snapshot Deleting Service. @@ -103,10 +80,10 @@ @Timeout(300) @TestInstance(TestInstance.Lifecycle.PER_CLASS) @TestMethodOrder(OrderAnnotation.class) -public class TestSnapshotDeletingServiceIntegrationTest { +public class TestSnapshotDeletingService { private static final Logger LOG = - LoggerFactory.getLogger(TestSnapshotDeletingServiceIntegrationTest.class); + LoggerFactory.getLogger(TestSnapshotDeletingService.class); private static boolean omRatisEnabled = true; private static final ByteBuffer CONTENT = ByteBuffer.allocate(1024 * 1024 * 16); @@ -131,7 +108,6 @@ public void setup() throws Exception { 1, StorageUnit.MB); conf.setTimeDuration(OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL, 500, TimeUnit.MILLISECONDS); - conf.setBoolean(OZONE_SNAPSHOT_DEEP_CLEANING_ENABLED, true); conf.setTimeDuration(OZONE_SNAPSHOT_DELETING_SERVICE_TIMEOUT, 10000, TimeUnit.MILLISECONDS); conf.setInt(OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL, 500); @@ -171,7 +147,7 @@ public void testSnapshotSplitAndMove() throws Exception { Table snapshotInfoTable = om.getMetadataManager().getSnapshotInfoTable(); - createSnapshotDataForBucket(bucket1); + createSnapshotDataForBucket1(); assertTableRowCount(snapshotInfoTable, 2); GenericTestUtils.waitFor(() -> snapshotDeletingService @@ -198,7 +174,7 @@ public void testMultipleSnapshotKeyReclaim() throws Exception { om.getMetadataManager().getSnapshotInfoTable(); runIndividualTest = false; - createSnapshotDataForBucket(bucket1); + createSnapshotDataForBucket1(); BucketArgs bucketArgs = new BucketArgs.Builder() .setBucketLayout(BucketLayout.LEGACY) @@ -449,7 +425,7 @@ public void testSnapshotWithFSO() throws Exception { while (iterator.hasNext()) { Table.KeyValue next = iterator.next(); String activeDBDeletedKey = next.getKey(); - if (activeDBDeletedKey.matches(".*/key1/.*")) { + if (activeDBDeletedKey.matches(".*/key1.*")) { RepeatedOmKeyInfo activeDBDeleted = next.getValue(); OMMetadataManager metadataManager = cluster.getOzoneManager().getMetadataManager(); @@ -478,228 +454,6 @@ public void testSnapshotWithFSO() throws Exception { rcSnap1.close(); } - private DirectoryDeletingService getMockedDirectoryDeletingService(AtomicBoolean dirDeletionWaitStarted, - AtomicBoolean dirDeletionStarted) - throws InterruptedException, TimeoutException { - OzoneManager ozoneManager = Mockito.spy(om); - om.getKeyManager().getDirDeletingService().shutdown(); - GenericTestUtils.waitFor(() -> om.getKeyManager().getDirDeletingService().getThreadCount() == 0, 1000, - 100000); - DirectoryDeletingService directoryDeletingService = Mockito.spy(new DirectoryDeletingService(10000, - TimeUnit.MILLISECONDS, 100000, ozoneManager, cluster.getConf())); - directoryDeletingService.shutdown(); - GenericTestUtils.waitFor(() -> directoryDeletingService.getThreadCount() == 0, 1000, - 100000); - when(ozoneManager.getMetadataManager()).thenAnswer(i -> { - // Wait for SDS to reach DDS wait block before processing any deleted directories. - GenericTestUtils.waitFor(dirDeletionWaitStarted::get, 1000, 100000); - dirDeletionStarted.set(true); - return i.callRealMethod(); - }); - return directoryDeletingService; - } - - private KeyDeletingService getMockedKeyDeletingService(AtomicBoolean keyDeletionWaitStarted, - AtomicBoolean keyDeletionStarted) - throws InterruptedException, TimeoutException, IOException { - OzoneManager ozoneManager = Mockito.spy(om); - om.getKeyManager().getDeletingService().shutdown(); - GenericTestUtils.waitFor(() -> om.getKeyManager().getDeletingService().getThreadCount() == 0, 1000, - 100000); - KeyManager keyManager = Mockito.spy(om.getKeyManager()); - when(ozoneManager.getKeyManager()).thenReturn(keyManager); - KeyDeletingService keyDeletingService = Mockito.spy(new KeyDeletingService(ozoneManager, - ozoneManager.getScmClient().getBlockClient(), keyManager, 10000, - 100000, cluster.getConf(), false)); - keyDeletingService.shutdown(); - GenericTestUtils.waitFor(() -> keyDeletingService.getThreadCount() == 0, 1000, - 100000); - when(keyManager.getPendingDeletionKeys(anyInt())).thenAnswer(i -> { - // wait for SDS to reach the KDS wait block before processing any key. - GenericTestUtils.waitFor(keyDeletionWaitStarted::get, 1000, 100000); - keyDeletionStarted.set(true); - return i.callRealMethod(); - }); - return keyDeletingService; - } - - @SuppressWarnings("checkstyle:parameternumber") - private SnapshotDeletingService getMockedSnapshotDeletingService(KeyDeletingService keyDeletingService, - DirectoryDeletingService directoryDeletingService, - AtomicBoolean snapshotDeletionStarted, - AtomicBoolean keyDeletionWaitStarted, - AtomicBoolean dirDeletionWaitStarted, - AtomicBoolean keyDeletionStarted, - AtomicBoolean dirDeletionStarted, - OzoneBucket testBucket) - throws InterruptedException, TimeoutException, IOException { - OzoneManager ozoneManager = Mockito.spy(om); - om.getKeyManager().getSnapshotDeletingService().shutdown(); - GenericTestUtils.waitFor(() -> om.getKeyManager().getSnapshotDeletingService().getThreadCount() == 0, 1000, - 100000); - KeyManager keyManager = Mockito.spy(om.getKeyManager()); - OmMetadataManagerImpl omMetadataManager = Mockito.spy((OmMetadataManagerImpl)om.getMetadataManager()); - SnapshotChainManager unMockedSnapshotChainManager = - ((OmMetadataManagerImpl)om.getMetadataManager()).getSnapshotChainManager(); - SnapshotChainManager snapshotChainManager = Mockito.spy(unMockedSnapshotChainManager); - OmSnapshotManager omSnapshotManager = Mockito.spy(om.getOmSnapshotManager()); - when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); - when(ozoneManager.getKeyManager()).thenReturn(keyManager); - when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); - when(omMetadataManager.getSnapshotChainManager()).thenReturn(snapshotChainManager); - when(keyManager.getDeletingService()).thenReturn(keyDeletingService); - when(keyManager.getDirDeletingService()).thenReturn(directoryDeletingService); - SnapshotDeletingService snapshotDeletingService = Mockito.spy(new SnapshotDeletingService(10000, - 100000, ozoneManager)); - snapshotDeletingService.shutdown(); - GenericTestUtils.waitFor(() -> snapshotDeletingService.getThreadCount() == 0, 1000, - 100000); - when(snapshotChainManager.iterator(anyBoolean())).thenAnswer(i -> { - Iterator itr = (Iterator) i.callRealMethod(); - return Lists.newArrayList(itr).stream().filter(uuid -> { - try { - SnapshotInfo snapshotInfo = SnapshotUtils.getSnapshotInfo(om, snapshotChainManager, uuid); - return snapshotInfo.getBucketName().equals(testBucket.getName()) && - snapshotInfo.getVolumeName().equals(testBucket.getVolumeName()); - } catch (IOException e) { - throw new RuntimeException(e); - } - }).iterator(); - }); - when(snapshotChainManager.getLatestGlobalSnapshotId()) - .thenAnswer(i -> unMockedSnapshotChainManager.getLatestGlobalSnapshotId()); - when(snapshotChainManager.getOldestGlobalSnapshotId()) - .thenAnswer(i -> unMockedSnapshotChainManager.getOldestGlobalSnapshotId()); - doAnswer(i -> { - // KDS wait block reached in SDS. - GenericTestUtils.waitFor(() -> { - return keyDeletingService.isRunningOnAOS(); - }, 1000, 100000); - keyDeletionWaitStarted.set(true); - return i.callRealMethod(); - }).when(snapshotDeletingService).waitForKeyDeletingService(); - doAnswer(i -> { - // DDS wait block reached in SDS. - GenericTestUtils.waitFor(directoryDeletingService::isRunningOnAOS, 1000, 100000); - dirDeletionWaitStarted.set(true); - return i.callRealMethod(); - }).when(snapshotDeletingService).waitForDirDeletingService(); - doAnswer(i -> { - // Assert KDS & DDS is not running when SDS starts moving entries & assert all wait block, KDS processing - // AOS block & DDS AOS block have been executed. - Assertions.assertTrue(keyDeletionWaitStarted.get()); - Assertions.assertTrue(dirDeletionWaitStarted.get()); - Assertions.assertTrue(keyDeletionStarted.get()); - Assertions.assertTrue(dirDeletionStarted.get()); - Assertions.assertFalse(keyDeletingService.isRunningOnAOS()); - Assertions.assertFalse(directoryDeletingService.isRunningOnAOS()); - snapshotDeletionStarted.set(true); - return i.callRealMethod(); - }).when(omSnapshotManager).getSnapshot(anyString(), anyString(), anyString()); - return snapshotDeletingService; - } - - @Test - @Order(4) - public void testParallelExcecutionOfKeyDeletionAndSnapshotDeletion() throws Exception { - AtomicBoolean keyDeletionWaitStarted = new AtomicBoolean(false); - AtomicBoolean dirDeletionWaitStarted = new AtomicBoolean(false); - AtomicBoolean keyDeletionStarted = new AtomicBoolean(false); - AtomicBoolean dirDeletionStarted = new AtomicBoolean(false); - AtomicBoolean snapshotDeletionStarted = new AtomicBoolean(false); - Random random = new Random(); - String bucketName = "bucket" + random.nextInt(); - BucketArgs bucketArgs = new BucketArgs.Builder() - .setBucketLayout(BucketLayout.FILE_SYSTEM_OPTIMIZED) - .build(); - OzoneBucket testBucket = TestDataUtil.createBucket( - client, VOLUME_NAME, bucketArgs, bucketName); - // mock keyDeletingService - KeyDeletingService keyDeletingService = getMockedKeyDeletingService(keyDeletionWaitStarted, keyDeletionStarted); - - // mock dirDeletingService - DirectoryDeletingService directoryDeletingService = getMockedDirectoryDeletingService(dirDeletionWaitStarted, - dirDeletionStarted); - - // mock snapshotDeletingService. - SnapshotDeletingService snapshotDeletingService = getMockedSnapshotDeletingService(keyDeletingService, - directoryDeletingService, snapshotDeletionStarted, keyDeletionWaitStarted, dirDeletionWaitStarted, - keyDeletionStarted, dirDeletionStarted, testBucket); - createSnapshotFSODataForBucket(testBucket); - List> renamesKeyEntries; - List>> deletedKeyEntries; - List> deletedDirEntries; - try (ReferenceCounted snapshot = om.getOmSnapshotManager().getSnapshot(testBucket.getVolumeName(), - testBucket.getName(), testBucket.getName() + "snap2")) { - renamesKeyEntries = snapshot.get().getKeyManager().getRenamesKeyEntries(testBucket.getVolumeName(), - testBucket.getName(), "", 1000); - deletedKeyEntries = snapshot.get().getKeyManager().getDeletedKeyEntries(testBucket.getVolumeName(), - testBucket.getName(), "", 1000); - deletedDirEntries = snapshot.get().getKeyManager().getDeletedDirEntries(testBucket.getVolumeName(), - testBucket.getName(), 1000); - } - Thread keyDeletingThread = new Thread(() -> { - try { - keyDeletingService.runPeriodicalTaskNow(); - } catch (Exception e) { - throw new RuntimeException(e); - } - }); - Thread directoryDeletingThread = new Thread(() -> { - try { - directoryDeletingService.runPeriodicalTaskNow(); - } catch (Exception e) { - throw new RuntimeException(e); - } - }); - ExecutorService snapshotDeletingThread = Executors.newFixedThreadPool(1); - Runnable snapshotDeletionRunnable = () -> { - try { - snapshotDeletingService.runPeriodicalTaskNow(); - } catch (Exception e) { - throw new RuntimeException(e); - } - }; - keyDeletingThread.start(); - directoryDeletingThread.start(); - Future future = snapshotDeletingThread.submit(snapshotDeletionRunnable); - GenericTestUtils.waitFor(snapshotDeletionStarted::get, 1000, 30000); - future.get(); - try (ReferenceCounted snapshot = om.getOmSnapshotManager().getSnapshot(testBucket.getVolumeName(), - testBucket.getName(), testBucket.getName() + "snap2")) { - Assertions.assertEquals(Collections.emptyList(), - snapshot.get().getKeyManager().getRenamesKeyEntries(testBucket.getVolumeName(), - testBucket.getName(), "", 1000)); - Assertions.assertEquals(Collections.emptyList(), - snapshot.get().getKeyManager().getDeletedKeyEntries(testBucket.getVolumeName(), - testBucket.getName(), "", 1000)); - Assertions.assertEquals(Collections.emptyList(), - snapshot.get().getKeyManager().getDeletedDirEntries(testBucket.getVolumeName(), - testBucket.getName(), 1000)); - } - List> aosRenamesKeyEntries = - om.getKeyManager().getRenamesKeyEntries(testBucket.getVolumeName(), - testBucket.getName(), "", 1000); - List>> aosDeletedKeyEntries = - om.getKeyManager().getDeletedKeyEntries(testBucket.getVolumeName(), - testBucket.getName(), "", 1000); - List> aosDeletedDirEntries = - om.getKeyManager().getDeletedDirEntries(testBucket.getVolumeName(), - testBucket.getName(), 1000); - renamesKeyEntries.forEach(entry -> Assertions.assertTrue(aosRenamesKeyEntries.contains(entry))); - deletedKeyEntries.forEach(entry -> Assertions.assertTrue(aosDeletedKeyEntries.contains(entry))); - deletedDirEntries.forEach(entry -> Assertions.assertTrue(aosDeletedDirEntries.contains(entry))); - Mockito.reset(snapshotDeletingService); - SnapshotInfo snap2 = SnapshotUtils.getSnapshotInfo(om, testBucket.getVolumeName(), - testBucket.getName(), testBucket.getName() + "snap2"); - Assertions.assertEquals(snap2.getSnapshotStatus(), SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED); - future = snapshotDeletingThread.submit(snapshotDeletionRunnable); - future.get(); - Assertions.assertThrows(IOException.class, () -> SnapshotUtils.getSnapshotInfo(om, testBucket.getVolumeName(), - testBucket.getName(), testBucket.getName() + "snap2")); - cluster.restartOzoneManager(); - } - /* Flow ---- @@ -718,7 +472,7 @@ public void testParallelExcecutionOfKeyDeletionAndSnapshotDeletion() throws Exce create snapshot3 delete snapshot2 */ - private synchronized void createSnapshotDataForBucket(OzoneBucket bucket) throws Exception { + private void createSnapshotDataForBucket1() throws Exception { Table snapshotInfoTable = om.getMetadataManager().getSnapshotInfoTable(); Table deletedTable = @@ -728,147 +482,70 @@ private synchronized void createSnapshotDataForBucket(OzoneBucket bucket) throws OmMetadataManagerImpl metadataManager = (OmMetadataManagerImpl) om.getMetadataManager(); - TestDataUtil.createKey(bucket, bucket.getName() + "key0", ReplicationFactor.THREE, + TestDataUtil.createKey(bucket1, "bucket1key0", ReplicationFactor.THREE, ReplicationType.RATIS, CONTENT); - TestDataUtil.createKey(bucket, bucket.getName() + "key1", ReplicationFactor.THREE, + TestDataUtil.createKey(bucket1, "bucket1key1", ReplicationFactor.THREE, ReplicationType.RATIS, CONTENT); assertTableRowCount(keyTable, 2); // Create Snapshot 1. - client.getProxy().createSnapshot(bucket.getVolumeName(), bucket.getName(), - bucket.getName() + "snap1"); + client.getProxy().createSnapshot(VOLUME_NAME, BUCKET_NAME_ONE, + "bucket1snap1"); assertTableRowCount(snapshotInfoTable, 1); // Overwrite bucket1key0, This is a newer version of the key which should // reclaimed as this is a different version of the key. - TestDataUtil.createKey(bucket, bucket.getName() + "key0", ReplicationFactor.THREE, + TestDataUtil.createKey(bucket1, "bucket1key0", ReplicationFactor.THREE, ReplicationType.RATIS, CONTENT); - TestDataUtil.createKey(bucket, bucket.getName() + "key2", ReplicationFactor.THREE, + TestDataUtil.createKey(bucket1, "bucket1key2", ReplicationFactor.THREE, ReplicationType.RATIS, CONTENT); // Key 1 cannot be reclaimed as it is still referenced by Snapshot 1. - client.getProxy().deleteKey(bucket.getVolumeName(), bucket.getName(), - bucket.getName() + "key1", false); + client.getProxy().deleteKey(VOLUME_NAME, BUCKET_NAME_ONE, + "bucket1key1", false); // Key 2 is deleted here, which will be reclaimed here as // it is not being referenced by previous snapshot. - client.getProxy().deleteKey(bucket.getVolumeName(), bucket.getName(), - bucket.getName() + "key2", false); - client.getProxy().deleteKey(bucket.getVolumeName(), bucket.getName(), - bucket.getName() + "key0", false); + client.getProxy().deleteKey(VOLUME_NAME, BUCKET_NAME_ONE, + "bucket1key2", false); + client.getProxy().deleteKey(VOLUME_NAME, BUCKET_NAME_ONE, + "bucket1key0", false); assertTableRowCount(keyTable, 0); // one copy of bucket1key0 should also be reclaimed as it not same // but original deleted key created during overwrite should not be deleted assertTableRowCount(deletedTable, 2); // Create Snapshot 2. - client.getProxy().createSnapshot(bucket.getVolumeName(), bucket.getName(), - bucket.getName() + "snap2"); + client.getProxy().createSnapshot(VOLUME_NAME, BUCKET_NAME_ONE, + "bucket1snap2"); assertTableRowCount(snapshotInfoTable, 2); // Key 2 is removed from the active Db's // deletedTable when Snapshot 2 is taken. assertTableRowCount(deletedTable, 0); - TestDataUtil.createKey(bucket, bucket.getName() + "key3", ReplicationFactor.THREE, + TestDataUtil.createKey(bucket1, "bucket1key3", ReplicationFactor.THREE, ReplicationType.RATIS, CONTENT); - TestDataUtil.createKey(bucket, bucket.getName() + "key4", ReplicationFactor.THREE, + TestDataUtil.createKey(bucket1, "bucket1key4", ReplicationFactor.THREE, ReplicationType.RATIS, CONTENT); - client.getProxy().deleteKey(bucket.getVolumeName(), bucket.getName(), - bucket.getName() + "key4", false); + client.getProxy().deleteKey(VOLUME_NAME, BUCKET_NAME_ONE, + "bucket1key4", false); assertTableRowCount(keyTable, 1); assertTableRowCount(deletedTable, 0); // Create Snapshot 3. - client.getProxy().createSnapshot(bucket.getVolumeName(), bucket.getName(), - bucket.getName() + "snap3"); + client.getProxy().createSnapshot(VOLUME_NAME, BUCKET_NAME_ONE, + "bucket1snap3"); assertTableRowCount(snapshotInfoTable, 3); SnapshotInfo snapshotInfo = metadataManager.getSnapshotInfoTable() - .get(String.format("/%s/%s/%ssnap2", bucket.getVolumeName(), bucket.getName(), bucket.getName())); + .get("/vol1/bucket1/bucket1snap2"); // Delete Snapshot 2. - client.getProxy().deleteSnapshot(bucket.getVolumeName(), bucket.getName(), - bucket.getName() + "snap2"); + client.getProxy().deleteSnapshot(VOLUME_NAME, BUCKET_NAME_ONE, + "bucket1snap2"); assertTableRowCount(snapshotInfoTable, 2); - verifySnapshotChain(snapshotInfo, String.format("/%s/%s/%ssnap3", bucket.getVolumeName(), bucket.getName(), - bucket.getName())); - } - - - /* - Flow - ---- - create dir0/key0 - create dir1/key1 - overwrite dir0/key0 - create dir2/key2 - create snap1 - rename dir1/key1 -> dir1/key10 - delete dir1/key10 - delete dir2 - create snap2 - delete snap2 - */ - private synchronized void createSnapshotFSODataForBucket(OzoneBucket bucket) throws Exception { - Table snapshotInfoTable = - om.getMetadataManager().getSnapshotInfoTable(); - Table deletedTable = - om.getMetadataManager().getDeletedTable(); - Table deletedDirTable = - om.getMetadataManager().getDeletedDirTable(); - Table keyTable = - om.getMetadataManager().getKeyTable(BucketLayout.FILE_SYSTEM_OPTIMIZED); - Table dirTable = - om.getMetadataManager().getDirectoryTable(); - Table renameTable = om.getMetadataManager().getSnapshotRenamedTable(); - OmMetadataManagerImpl metadataManager = (OmMetadataManagerImpl) - om.getMetadataManager(); - Map countMap = - metadataManager.listTables().entrySet().stream() - .collect(Collectors.toMap(Map.Entry::getKey, e -> { - try { - return (int)metadataManager.countRowsInTable(e.getValue()); - } catch (IOException ex) { - throw new RuntimeException(ex); - } - })); - TestDataUtil.createKey(bucket, "dir0/" + bucket.getName() + "key0", ReplicationFactor.THREE, - ReplicationType.RATIS, CONTENT); - TestDataUtil.createKey(bucket, "dir1/" + bucket.getName() + "key1", ReplicationFactor.THREE, - ReplicationType.RATIS, CONTENT); - assertTableRowCount(keyTable, countMap.get(keyTable.getName()) + 2); - assertTableRowCount(dirTable, countMap.get(dirTable.getName()) + 2); - - // Overwrite bucket1key0, This is a newer version of the key which should - // reclaimed as this is a different version of the key. - TestDataUtil.createKey(bucket, "dir0/" + bucket.getName() + "key0", ReplicationFactor.THREE, - ReplicationType.RATIS, CONTENT); - TestDataUtil.createKey(bucket, "dir2/" + bucket.getName() + "key2", ReplicationFactor.THREE, - ReplicationType.RATIS, CONTENT); - assertTableRowCount(keyTable, countMap.get(keyTable.getName()) + 3); - assertTableRowCount(dirTable, countMap.get(dirTable.getName()) + 3); - assertTableRowCount(deletedTable, countMap.get(deletedTable.getName()) + 1); - // create snap1 - client.getProxy().createSnapshot(bucket.getVolumeName(), bucket.getName(), - bucket.getName() + "snap1"); - bucket.renameKey("dir1/" + bucket.getName() + "key1", "dir1/" + bucket.getName() + "key10"); - bucket.renameKey("dir1/", "dir10/"); - assertTableRowCount(renameTable, countMap.get(renameTable.getName()) + 2); - client.getProxy().deleteKey(bucket.getVolumeName(), bucket.getName(), - "dir10/" + bucket.getName() + "key10", false); - assertTableRowCount(deletedTable, countMap.get(deletedTable.getName()) + 1); - // Key 2 is deleted here, which will be reclaimed here as - // it is not being referenced by previous snapshot. - client.getProxy().deleteKey(bucket.getVolumeName(), bucket.getName(), "dir2", true); - assertTableRowCount(deletedDirTable, countMap.get(deletedDirTable.getName()) + 1); - client.getProxy().createSnapshot(bucket.getVolumeName(), bucket.getName(), - bucket.getName() + "snap2"); - // Delete Snapshot 2. - client.getProxy().deleteSnapshot(bucket.getVolumeName(), bucket.getName(), - bucket.getName() + "snap2"); - assertTableRowCount(snapshotInfoTable, countMap.get(snapshotInfoTable.getName()) + 2); + verifySnapshotChain(snapshotInfo, "/vol1/bucket1/bucket1snap3"); } - private void verifySnapshotChain(SnapshotInfo deletedSnapshot, String nextSnapshot) throws Exception { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDirectoryCleaningService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDirectoryCleaningService.java index 3be0725a009..03df331087b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDirectoryCleaningService.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDirectoryCleaningService.java @@ -57,7 +57,6 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_DEEP_CLEANING_ENABLED; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -80,7 +79,6 @@ public class TestSnapshotDirectoryCleaningService { public static void init() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); conf.setInt(OMConfigKeys.OZONE_SNAPSHOT_DIRECTORY_SERVICE_INTERVAL, 2500); - conf.setBoolean(OZONE_SNAPSHOT_DEEP_CLEANING_ENABLED, true); conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 2500, TimeUnit.MILLISECONDS); conf.setBoolean(OZONE_ACL_ENABLED, true); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java index 4476cbc3e38..cba7311b3b4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java @@ -19,7 +19,6 @@ import java.time.Duration; import java.util.List; -import java.util.Map; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -52,7 +51,6 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.container.ozoneimpl.TestOzoneContainer.runTestOzoneContainerViaDataNode; -import static org.apache.hadoop.ozone.recon.ReconConstants.CONTAINER_COUNT; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; @@ -238,8 +236,6 @@ public void testEmptyMissingContainerDownNode() throws Exception { // Bring down the Datanode that had the container replica. cluster.shutdownHddsDatanode(pipeline.getFirstNode()); - // Since we no longer add EMPTY_MISSING containers to the table, we should - // have zero EMPTY_MISSING containers in the DB but their information will be logged. LambdaTestUtils.await(25000, 1000, () -> { List allEmptyMissingContainers = reconContainerManager.getContainerSchemaManager() @@ -247,19 +243,10 @@ public void testEmptyMissingContainerDownNode() throws Exception { ContainerSchemaDefinition.UnHealthyContainerStates. EMPTY_MISSING, 0, 1000); - - // Check if EMPTY_MISSING containers are not added to the DB and their count is logged - Map> - unhealthyContainerStateStatsMap = reconScm.getContainerHealthTask() - .getUnhealthyContainerStateStatsMap(); - - // Return true if the size of the fetched containers is 0 and the log shows 1 for EMPTY_MISSING state - return allEmptyMissingContainers.size() == 0 && - unhealthyContainerStateStatsMap.get( - ContainerSchemaDefinition.UnHealthyContainerStates.EMPTY_MISSING) - .getOrDefault(CONTAINER_COUNT, 0L) == 1; + return (allEmptyMissingContainers.size() == 1); }); + // Now add a container to key mapping count as 3. This data is used to // identify if container is empty in terms of keys mapped to container. try (RDBBatchOperation rdbBatchOperation = new RDBBatchOperation()) { @@ -285,17 +272,7 @@ public void testEmptyMissingContainerDownNode() throws Exception { ContainerSchemaDefinition.UnHealthyContainerStates. EMPTY_MISSING, 0, 1000); - - - Map> - unhealthyContainerStateStatsMap = reconScm.getContainerHealthTask() - .getUnhealthyContainerStateStatsMap(); - - // Return true if the size of the fetched containers is 0 and the log shows 0 for EMPTY_MISSING state - return allEmptyMissingContainers.size() == 0 && - unhealthyContainerStateStatsMap.get( - ContainerSchemaDefinition.UnHealthyContainerStates.EMPTY_MISSING) - .getOrDefault(CONTAINER_COUNT, 0L) == 0; + return (allEmptyMissingContainers.isEmpty()); }); // Now remove keys from container. This data is used to @@ -306,8 +283,8 @@ public void testEmptyMissingContainerDownNode() throws Exception { reconContainerMetadataManager.commitBatchOperation(rdbBatchOperation); } - // Since we no longer add EMPTY_MISSING containers to the table, we should - // have zero EMPTY_MISSING containers in the DB but their information will be logged. + // Check existing container state in UNHEALTHY_CONTAINER table + // will be updated as EMPTY_MISSING LambdaTestUtils.await(25000, 1000, () -> { List allEmptyMissingContainers = reconContainerManager.getContainerSchemaManager() @@ -315,16 +292,7 @@ public void testEmptyMissingContainerDownNode() throws Exception { ContainerSchemaDefinition.UnHealthyContainerStates. EMPTY_MISSING, 0, 1000); - - Map> - unhealthyContainerStateStatsMap = reconScm.getContainerHealthTask() - .getUnhealthyContainerStateStatsMap(); - - // Return true if the size of the fetched containers is 0 and the log shows 1 for EMPTY_MISSING state - return allEmptyMissingContainers.size() == 0 && - unhealthyContainerStateStatsMap.get( - ContainerSchemaDefinition.UnHealthyContainerStates.EMPTY_MISSING) - .getOrDefault(CONTAINER_COUNT, 0L) == 1; + return (allEmptyMissingContainers.size() == 1); }); // Now restart the cluster and verify the container is no longer missing. diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneRepairShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneRepairShell.java index 9216c909ee4..328fc1ddd8c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneRepairShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneRepairShell.java @@ -22,13 +22,8 @@ import org.apache.hadoop.ozone.debug.DBScanner; import org.apache.hadoop.ozone.debug.RDBParser; import org.apache.hadoop.ozone.om.OMStorage; -import org.apache.hadoop.ozone.repair.OzoneRepair; import org.apache.hadoop.ozone.repair.RDBRepair; import org.apache.hadoop.ozone.repair.TransactionInfoRepair; -import org.apache.hadoop.ozone.repair.quota.QuotaRepair; -import org.apache.hadoop.ozone.repair.quota.QuotaStatus; -import org.apache.hadoop.ozone.repair.quota.QuotaTrigger; -import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; @@ -43,7 +38,6 @@ import java.util.regex.Pattern; import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static java.nio.charset.StandardCharsets.UTF_8; @@ -136,28 +130,4 @@ private String[] parseScanOutput(String output) throws IOException { throw new IllegalStateException("Failed to scan and find raft's highest term and index from TransactionInfo table"); } - @Test - public void testQuotaRepair() throws Exception { - CommandLine cmd = new CommandLine(new OzoneRepair()).addSubcommand(new CommandLine(new QuotaRepair()) - .addSubcommand(new QuotaStatus()).addSubcommand(new QuotaTrigger())); - - String[] args = new String[] {"quota", "status", "--service-host", conf.get(OZONE_OM_ADDRESS_KEY)}; - int exitCode = cmd.execute(args); - assertEquals(0, exitCode); - args = new String[] {"quota", "start", "--service-host", conf.get(OZONE_OM_ADDRESS_KEY)}; - exitCode = cmd.execute(args); - assertEquals(0, exitCode); - GenericTestUtils.waitFor(() -> { - out.reset(); - // verify quota trigger is completed having non-zero lastRunFinishedTime - String[] targs = new String[]{"quota", "status", "--service-host", conf.get(OZONE_OM_ADDRESS_KEY)}; - cmd.execute(targs); - try { - return !out.toString(DEFAULT_ENCODING).contains("\"lastRunFinishedTime\":\"\""); - } catch (Exception ex) { - // do nothing - } - return false; - }, 1000, 10000); - } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java index 4c5325edab1..89f068cdedf 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java @@ -82,7 +82,6 @@ import static org.apache.hadoop.fs.FileSystem.TRASH_PREFIX; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_LISTING_PAGE_SIZE; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; @@ -164,7 +163,6 @@ public class TestOzoneShellHA { @BeforeAll public static void init() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); - conf.setBoolean(OZONE_HBASE_ENHANCEMENTS_ALLOWED, true); conf.setBoolean(OZONE_FS_HSYNC_ENABLED, true); startKMS(); startCluster(conf); @@ -592,7 +590,6 @@ public void testAdminCmdListOpenFiles() final String hostPrefix = OZONE_OFS_URI_SCHEME + "://" + omServiceId; OzoneConfiguration clientConf = getClientConfForOFS(hostPrefix, conf); - clientConf.setBoolean("ozone.client.hbase.enhancements.allowed", true); clientConf.setBoolean(OZONE_FS_HSYNC_ENABLED, true); FileSystem fs = FileSystem.get(clientConf); @@ -712,7 +709,6 @@ public void testAdminCmdListOpenFilesWithDeletedKeys() final String hostPrefix = OZONE_OFS_URI_SCHEME + "://" + omServiceId; OzoneConfiguration clientConf = getClientConfForOFS(hostPrefix, conf); - clientConf.setBoolean("ozone.client.hbase.enhancements.allowed", true); clientConf.setBoolean(OZONE_FS_HSYNC_ENABLED, true); FileSystem fs = FileSystem.get(clientConf); @@ -829,7 +825,6 @@ public void testAdminCmdListOpenFilesWithOverwrittenKeys() final String hostPrefix = OZONE_OFS_URI_SCHEME + "://" + omServiceId; OzoneConfiguration clientConf = getClientConfForOFS(hostPrefix, conf); - clientConf.setBoolean("ozone.client.hbase.enhancements.allowed", true); clientConf.setBoolean(OZONE_FS_HSYNC_ENABLED, true); FileSystem fs = FileSystem.get(clientConf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHAWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHAWithFSO.java index b1dcbc0576e..a0ad35500ca 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHAWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHAWithFSO.java @@ -38,8 +38,6 @@ public static void init() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT, OMConfigKeys.OZONE_BUCKET_LAYOUT_FILE_SYSTEM_OPTIMIZED); - conf.setBoolean(OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, true); - conf.setBoolean("ozone.client.hbase.enhancements.allowed", true); conf.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true); startKMS(); startCluster(conf); diff --git a/hadoop-ozone/integration-test/src/test/resources/ozone-site.xml b/hadoop-ozone/integration-test/src/test/resources/ozone-site.xml index 5ea2eb89dfa..779ed2b785c 100644 --- a/hadoop-ozone/integration-test/src/test/resources/ozone-site.xml +++ b/hadoop-ozone/integration-test/src/test/resources/ozone-site.xml @@ -84,7 +84,7 @@ hdds.container.ratis.log.appender.queue.byte-limit - 32MB + 8MB ozone.om.ratis.log.appender.queue.byte-limit diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index f71dc44fec5..9e0f729be40 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -97,9 +97,8 @@ enum Type { ListMultipartUploads = 82; - // Not used anymore due to HDDS-11251 - ListTrash = 91; // [deprecated = true] - RecoverTrash = 92; // [deprecated = true] + ListTrash = 91; + RecoverTrash = 92; RevokeS3Secret = 93; @@ -150,9 +149,7 @@ enum Type { RenameSnapshot = 131; ListOpenFiles = 132; QuotaRepair = 133; - GetQuotaRepairStatus = 135; - StartQuotaRepair = 136; - SnapshotMoveTableKeys = 137; + GetServerDefaults = 134; } enum SafeMode { @@ -236,9 +233,8 @@ message OMRequest { optional UpdateGetS3SecretRequest updateGetS3SecretRequest = 82; optional ListMultipartUploadsRequest listMultipartUploadsRequest = 83; - // Not used anymore due to HDDS-11251 - optional ListTrashRequest listTrashRequest = 91 [deprecated = true]; - optional RecoverTrashRequest RecoverTrashRequest = 92 [deprecated = true]; + optional ListTrashRequest listTrashRequest = 91; + optional RecoverTrashRequest RecoverTrashRequest = 92; optional RevokeS3SecretRequest RevokeS3SecretRequest = 93; @@ -292,9 +288,7 @@ message OMRequest { optional RenameSnapshotRequest RenameSnapshotRequest = 129; optional ListOpenFilesRequest ListOpenFilesRequest = 130; optional QuotaRepairRequest QuotaRepairRequest = 131; - optional GetQuotaRepairStatusRequest GetQuotaRepairStatusRequest = 133; - optional StartQuotaRepairRequest StartQuotaRepairRequest = 134; - optional SnapshotMoveTableKeysRequest SnapshotMoveTableKeysRequest = 135; + optional ServerDefaultsRequest ServerDefaultsRequest = 132; } message OMResponse { @@ -368,10 +362,8 @@ message OMResponse { optional ListMultipartUploadsResponse listMultipartUploadsResponse = 82; - // Not used anymore due to HDDS-11251 - optional ListTrashResponse listTrashResponse = 91 [deprecated = true]; - optional RecoverTrashResponse RecoverTrashResponse = 92 [deprecated = true]; - + optional ListTrashResponse listTrashResponse = 91; + optional RecoverTrashResponse RecoverTrashResponse = 92; optional PurgePathsResponse purgePathsResponse = 93 [deprecated = true]; optional PurgeDirectoriesResponse purgeDirectoriesResponse = 108; @@ -422,8 +414,7 @@ message OMResponse { optional RenameSnapshotResponse RenameSnapshotResponse = 132; optional ListOpenFilesResponse ListOpenFilesResponse = 133; optional QuotaRepairResponse QuotaRepairResponse = 134; - optional GetQuotaRepairStatusResponse GetQuotaRepairStatusResponse = 136; - optional StartQuotaRepairResponse StartQuotaRepairResponse = 137; + optional ServerDefaultsResponse ServerDefaultsResponse = 135; } enum Status { @@ -557,39 +548,33 @@ enum Status { /** This command acts as a list command for deleted keys that are still present in the deleted table on Ozone Manager. - Not used anymore due to HDDS-11251 */ message ListTrashRequest { - // option deprecated = true; - required string volumeName = 1 [deprecated = true]; - required string bucketName = 2 [deprecated = true]; - optional string startKeyName = 3 [deprecated = true]; - optional string keyPrefix = 4 [deprecated = true]; - optional int32 maxKeys = 5 [deprecated = true]; + required string volumeName = 1; + required string bucketName = 2; + optional string startKeyName = 3; + optional string keyPrefix = 4; + optional int32 maxKeys = 5; } message ListTrashResponse { - // option deprecated = true; - repeated RepeatedKeyInfo deletedKeys = 1 [deprecated = true]; + repeated RepeatedKeyInfo deletedKeys = 1; } /** This command acts as a recover command for deleted keys that are still in deleted table on Ozone Manager. - Not used anymore due to HDDS-11251 */ message RecoverTrashRequest { - // option deprecated = true; - required string volumeName = 1 [deprecated = true]; - required string bucketName = 2 [deprecated = true]; - required string keyName = 3 [deprecated = true]; - required string destinationBucket = 4 [deprecated = true]; + required string volumeName = 1; + required string bucketName = 2; + required string keyName = 3; + required string destinationBucket = 4; } message RecoverTrashResponse { - // option deprecated = true; - required bool response = 1 [deprecated = true]; + required bool response = 1; } message VolumeInfo { @@ -885,7 +870,6 @@ message SnapshotInfo { optional uint64 exclusiveReplicatedSize = 18; // note: shared sizes can be calculated from: referenced - exclusive optional bool deepCleanedDeletedDir = 19; - optional bytes lastTransactionInfo = 20; } message SnapshotDiffJobProto { @@ -1378,8 +1362,6 @@ message PurgeKeysRequest { // if set, will purge keys in a snapshot DB instead of active DB optional string snapshotTableKey = 2; repeated SnapshotMoveKeyInfos keysToUpdate = 3; - // previous snapshotID can also be null & this field would be absent in older requests. - optional NullableUUID expectedPreviousSnapshotID = 4; } message PurgeKeysResponse { @@ -1402,12 +1384,6 @@ message PurgePathsResponse { message PurgeDirectoriesRequest { repeated PurgePathRequest deletedPath = 1; optional string snapshotTableKey = 2; - // previous snapshotID can also be null & this field would be absent in older requests. - optional NullableUUID expectedPreviousSnapshotID = 3; -} - -message NullableUUID { - optional hadoop.hdds.UUID uuid = 1; } message PurgeDirectoriesResponse { @@ -1637,7 +1613,6 @@ message ServiceInfo { repeated ServicePort servicePorts = 3; optional OMRoleInfo omRole = 4; optional int32 OMVersion = 5 [default = 0]; - optional FsServerDefaultsProto serverDefaults = 6; } message MultipartInfoInitiateRequest { @@ -1989,13 +1964,6 @@ message SnapshotMoveDeletedKeysRequest { repeated string deletedDirsToMove = 5; } -message SnapshotMoveTableKeysRequest { - optional hadoop.hdds.UUID fromSnapshotID = 1; - repeated SnapshotMoveKeyInfos deletedKeys = 2; - repeated SnapshotMoveKeyInfos deletedDirs = 3; - repeated hadoop.hdds.KeyValue renamedKeys = 4; -} - message SnapshotMoveKeyInfos { optional string key = 1; repeated KeyInfo keyInfos = 2; @@ -2237,19 +2205,15 @@ message BucketQuotaCount { message QuotaRepairResponse { } +message ServerDefaultsRequest { +} + message FsServerDefaultsProto { optional string keyProviderUri = 1; } -message GetQuotaRepairStatusRequest { -} -message GetQuotaRepairStatusResponse { - optional string status = 1; -} -message StartQuotaRepairRequest { - repeated string buckets = 1; -} -message StartQuotaRepairResponse { +message ServerDefaultsResponse { + required FsServerDefaultsProto serverDefaults = 1; } message OMLockDetailsProto { diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java index ae57c18354d..fb34d19a8bb 100644 --- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java +++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java @@ -116,22 +116,6 @@ public interface OMMetadataManager extends DBStoreHAManager { */ String getBucketKey(String volume, String bucket); - /** - * Given a volume and bucket, return the corresponding DB key prefix. - * - * @param volume - Volume name - * @param bucket - Bucket name - */ - String getBucketKeyPrefix(String volume, String bucket); - - /** - * Given a volume and bucket, return the corresponding DB key prefix for FSO buckets. - * - * @param volume - Volume name - * @param bucket - Bucket name - */ - String getBucketKeyPrefixFSO(String volume, String bucket) throws IOException; - /** * Given a volume, bucket and a key, return the corresponding DB key. * @@ -278,6 +262,24 @@ ListKeysResult listKeys(String volumeName, int maxKeys) throws IOException; + /** + * List trash allows the user to list the keys that were marked as deleted, + * but not actually deleted by Ozone Manager. This allows a user to recover + * keys within a configurable window. + * @param volumeName - The volume name, which can also be a wild card + * using '*'. + * @param bucketName - The bucket name, which can also be a wild card + * using '*'. + * @param startKeyName - List keys from a specific key name. + * @param keyPrefix - List keys using a specific prefix. + * @param maxKeys - The number of keys to be returned. This must be below + * the cluster level set by admins. + * @return The list of keys that are deleted from the deleted table. + * @throws IOException + */ + List listTrash(String volumeName, String bucketName, + String startKeyName, String keyPrefix, int maxKeys) throws IOException; + /** * Returns snapshot info for volume/bucket snapshot path. * @param volumeName volume name @@ -302,6 +304,18 @@ ListSnapshotResponse listSnapshot( String volumeName, String bucketName, String snapshotPrefix, String prevSnapshot, int maxListResult) throws IOException; + /** + * Recover trash allows the user to recover the keys + * that were marked as deleted, but not actually deleted by Ozone Manager. + * @param volumeName - The volume name. + * @param bucketName - The bucket name. + * @param keyName - The key user want to recover. + * @param destinationBucket - The bucket user want to recover to. + * @return The result of recovering operation is success or not. + */ + boolean recoverTrash(String volumeName, String bucketName, + String keyName, String destinationBucket) throws IOException; + /** * Returns a list of volumes owned by a given user; if user is null, returns * all volumes. @@ -647,7 +661,7 @@ String getMultipartKey(long volumeId, long bucketId, long getBucketId(String volume, String bucket) throws IOException; /** - * Returns {@code List} for a key in the deletedTable. + * Returns List<{@link BlockGroup}> for a key in the deletedTable. * @param deletedKey - key to be purged from the deletedTable * @return {@link BlockGroup} */ diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java index e4174efcfcc..68429c36d08 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java @@ -43,7 +43,7 @@ * BucketManager uses MetadataDB to store bucket level information. * Keys used in BucketManager for storing data into MetadataDB * for BucketInfo: - * {volume/bucket} -> bucketInfo + * {volume/bucket} -> bucketInfo */ public class BucketManagerImpl implements BucketManager { private static final Logger LOG = diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/GrpcOzoneManagerServer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/GrpcOzoneManagerServer.java index bb682508524..a83304ade45 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/GrpcOzoneManagerServer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/GrpcOzoneManagerServer.java @@ -63,7 +63,7 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_GRPC_WORKERGROUP_SIZE_KEY; /** - * Separated network server for gRPC transport OzoneManagerService s3g->OM. + * Separated network server for gRPC transport OzoneManagerService s3g->OM. */ public class GrpcOzoneManagerServer { private static final Logger LOG = diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java index a0f3053d731..7a3312c0685 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java @@ -18,7 +18,6 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.common.BlockGroup; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; @@ -29,7 +28,7 @@ import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts; import org.apache.hadoop.ozone.om.fs.OzoneManagerFS; import org.apache.hadoop.hdds.utils.BackgroundService; -import org.apache.hadoop.ozone.om.service.DirectoryDeletingService; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.service.KeyDeletingService; import org.apache.hadoop.ozone.om.service.SnapshotDeletingService; import org.apache.hadoop.ozone.om.service.SnapshotDirectoryCleaningService; @@ -37,7 +36,6 @@ import java.io.IOException; import java.time.Duration; -import java.util.ArrayList; import java.util.List; /** @@ -49,6 +47,7 @@ public interface KeyManager extends OzoneManagerFS, IOzoneAcl { * Start key manager. * * @param configuration + * @throws IOException */ void start(OzoneConfiguration configuration); @@ -108,6 +107,24 @@ ListKeysResult listKeys(String volumeName, String bucketName, String startKey, String keyPrefix, int maxKeys) throws IOException; + /** + * List trash allows the user to list the keys that were marked as deleted, + * but not actually deleted by Ozone Manager. This allows a user to recover + * keys within a configurable window. + * @param volumeName - The volume name, which can also be a wild card + * using '*'. + * @param bucketName - The bucket name, which can also be a wild card + * using '*'. + * @param startKeyName - List keys from a specific key name. + * @param keyPrefix - List keys using a specific prefix. + * @param maxKeys - The number of keys to be returned. This must be below + * the cluster level set by admins. + * @return The list of keys that are deleted from the deleted table. + * @throws IOException + */ + List listTrash(String volumeName, String bucketName, + String startKeyName, String keyPrefix, int maxKeys) throws IOException; + /** * Returns a PendingKeysDeletion. It has a list of pending deletion key info * that ups to the given count.Each entry is a {@link BlockGroup}, which @@ -121,29 +138,6 @@ ListKeysResult listKeys(String volumeName, String bucketName, String startKey, */ PendingKeysDeletion getPendingDeletionKeys(int count) throws IOException; - /** - * Returns a list rename entries from the snapshotRenamedTable. - * - * @param size max number of keys to return. - * @return a Pair of list of {@link org.apache.hadoop.hdds.utils.db.Table.KeyValue} representing the keys in the - * underlying metadataManager. - * @throws IOException - */ - List> getRenamesKeyEntries( - String volume, String bucket, String startKey, int size) throws IOException; - - - /** - * Returns a list deleted entries from the deletedTable. - * - * @param size max number of keys to return. - * @return a Pair of list of {@link org.apache.hadoop.hdds.utils.db.Table.KeyValue} representing the keys in the - * underlying metadataManager. - * @throws IOException - */ - List>> getDeletedKeyEntries( - String volume, String bucket, String startKey, int size) throws IOException; - /** * Returns the names of up to {@code count} open keys whose age is * greater than or equal to {@code expireThreshold}. @@ -241,26 +235,6 @@ OmMultipartUploadListParts listParts(String volumeName, String bucketName, */ Table.KeyValue getPendingDeletionDir() throws IOException; - /** - * Returns an iterator for pending deleted directories. - * @throws IOException - */ - TableIterator> getDeletedDirEntries( - String volume, String bucket) throws IOException; - - default List> getDeletedDirEntries(String volume, String bucket, int size) - throws IOException { - List> deletedDirEntries = new ArrayList<>(size); - try (TableIterator> iterator = - getDeletedDirEntries(volume, bucket)) { - while (deletedDirEntries.size() < size && iterator.hasNext()) { - Table.KeyValue kv = iterator.next(); - deletedDirEntries.add(Table.newKeyValue(kv.getKey(), kv.getValue())); - } - return deletedDirEntries; - } - } - /** * Returns all sub directories under the given parent directory. * @@ -288,7 +262,7 @@ List getPendingDeletionSubFiles(long volumeId, * Returns the instance of Directory Deleting Service. * @return Background service. */ - DirectoryDeletingService getDirDeletingService(); + BackgroundService getDirDeletingService(); /** * Returns the instance of Open Key Cleanup Service. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index 9bdbc70fb99..2cb55135294 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -37,7 +37,6 @@ import java.util.Stack; import java.util.TreeMap; import java.util.concurrent.TimeUnit; -import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -86,8 +85,8 @@ import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; -import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.request.OMClientRequest; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.om.request.util.OMMultipartUploadUtils; @@ -123,6 +122,8 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_LIST_TRASH_KEYS_MAX; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_LIST_TRASH_KEYS_MAX_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL; @@ -143,8 +144,6 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_OPEN_KEY_CLEANUP_SERVICE_INTERVAL_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_OPEN_KEY_CLEANUP_SERVICE_TIMEOUT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_OPEN_KEY_CLEANUP_SERVICE_TIMEOUT_DEFAULT; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_DEEP_CLEANING_ENABLED; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_DEEP_CLEANING_ENABLED_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_DIRECTORY_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_DIRECTORY_SERVICE_INTERVAL_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_DIRECTORY_SERVICE_TIMEOUT; @@ -183,6 +182,7 @@ public class KeyManagerImpl implements KeyManager { private final ScmClient scmClient; private final OMMetadataManager metadataManager; private final long scmBlockSize; + private final int listTrashKeysMax; private final OzoneBlockTokenSecretManager secretManager; private final boolean grpcBlockTokenEnabled; @@ -193,7 +193,7 @@ public class KeyManagerImpl implements KeyManager { private final KeyProviderCryptoExtension kmsProvider; private final boolean enableFileSystemPaths; - private DirectoryDeletingService dirDeletingService; + private BackgroundService dirDeletingService; private final OMPerformanceMetrics metrics; private BackgroundService openKeyCleanupService; @@ -218,6 +218,9 @@ public KeyManagerImpl(OzoneManager om, ScmClient scmClient, this.grpcBlockTokenEnabled = conf.getBoolean( HDDS_BLOCK_TOKEN_ENABLED, HDDS_BLOCK_TOKEN_ENABLED_DEFAULT); + this.listTrashKeysMax = conf.getInt( + OZONE_CLIENT_LIST_TRASH_KEYS_MAX, + OZONE_CLIENT_LIST_TRASH_KEYS_MAX_DEFAULT); this.enableFileSystemPaths = conf.getBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS_DEFAULT); @@ -232,8 +235,6 @@ public KeyManagerImpl(OzoneManager om, ScmClient scmClient, @Override public void start(OzoneConfiguration configuration) { - boolean isSnapshotDeepCleaningEnabled = configuration.getBoolean(OZONE_SNAPSHOT_DEEP_CLEANING_ENABLED, - OZONE_SNAPSHOT_DEEP_CLEANING_ENABLED_DEFAULT); if (keyDeletingService == null) { long blockDeleteInterval = configuration.getTimeDuration( OZONE_BLOCK_DELETING_SERVICE_INTERVAL, @@ -245,7 +246,7 @@ public void start(OzoneConfiguration configuration) { TimeUnit.MILLISECONDS); keyDeletingService = new KeyDeletingService(ozoneManager, scmClient.getBlockClient(), this, blockDeleteInterval, - serviceTimeout, configuration, isSnapshotDeepCleaningEnabled); + serviceTimeout, configuration); keyDeletingService.start(); } @@ -311,14 +312,14 @@ public void start(OzoneConfiguration configuration) { try { snapshotDeletingService = new SnapshotDeletingService( snapshotServiceInterval, snapshotServiceTimeout, - ozoneManager); + ozoneManager, scmClient.getBlockClient()); snapshotDeletingService.start(); } catch (IOException e) { LOG.error("Error starting Snapshot Deleting Service", e); } } - if (isSnapshotDeepCleaningEnabled && snapshotDirectoryCleaningService == null && + if (snapshotDirectoryCleaningService == null && ozoneManager.isFilesystemSnapshotEnabled()) { long dirDeleteInterval = configuration.getTimeDuration( OZONE_SNAPSHOT_DIRECTORY_SERVICE_INTERVAL, @@ -659,6 +660,21 @@ public ListKeysResult listKeys(String volumeName, String bucketName, return listKeysResult; } + @Override + public List listTrash(String volumeName, + String bucketName, String startKeyName, String keyPrefix, + int maxKeys) throws IOException { + + Preconditions.checkNotNull(volumeName); + Preconditions.checkNotNull(bucketName); + Preconditions.checkArgument(maxKeys <= listTrashKeysMax, + "The max keys limit specified is not less than the cluster " + + "allowed maximum limit."); + + return metadataManager.listTrash(volumeName, bucketName, + startKeyName, keyPrefix, maxKeys); + } + @Override public PendingKeysDeletion getPendingDeletionKeys(final int count) throws IOException { @@ -668,60 +684,6 @@ public PendingKeysDeletion getPendingDeletionKeys(final int count) .getPendingDeletionKeys(count, ozoneManager.getOmSnapshotManager()); } - private List> getTableEntries(String startKey, - TableIterator> tableIterator, - Function valueFunction, int size) throws IOException { - List> entries = new ArrayList<>(); - /* Seek to the start key if it not null. The next key in queue is ensured to start with the bucket - prefix, {@link org.apache.hadoop.hdds.utils.db.Table#iterator(bucketPrefix)} would ensure this. - */ - if (startKey != null) { - tableIterator.seek(startKey); - tableIterator.seekToFirst(); - } - int currentCount = 0; - while (tableIterator.hasNext() && currentCount < size) { - Table.KeyValue kv = tableIterator.next(); - if (kv != null) { - entries.add(Table.newKeyValue(kv.getKey(), valueFunction.apply(kv.getValue()))); - currentCount++; - } - } - return entries; - } - - private Optional getBucketPrefix(String volumeName, String bucketName, boolean isFSO) throws IOException { - // Bucket prefix would be empty if both volume & bucket is empty i.e. either null or "". - if (StringUtils.isEmpty(volumeName) && StringUtils.isEmpty(bucketName)) { - return Optional.empty(); - } else if (StringUtils.isEmpty(bucketName) || StringUtils.isEmpty(volumeName)) { - throw new IOException("One of volume : " + volumeName + ", bucket: " + bucketName + " is empty." + - " Either both should be empty or none of the arguments should be empty"); - } - return isFSO ? Optional.of(metadataManager.getBucketKeyPrefixFSO(volumeName, bucketName)) : - Optional.of(metadataManager.getBucketKeyPrefix(volumeName, bucketName)); - } - - @Override - public List> getRenamesKeyEntries( - String volume, String bucket, String startKey, int size) throws IOException { - Optional bucketPrefix = getBucketPrefix(volume, bucket, false); - try (TableIterator> - renamedKeyIter = metadataManager.getSnapshotRenamedTable().iterator(bucketPrefix.orElse(""))) { - return getTableEntries(startKey, renamedKeyIter, Function.identity(), size); - } - } - - @Override - public List>> getDeletedKeyEntries( - String volume, String bucket, String startKey, int size) throws IOException { - Optional bucketPrefix = getBucketPrefix(volume, bucket, false); - try (TableIterator> - delKeyIter = metadataManager.getDeletedTable().iterator(bucketPrefix.orElse(""))) { - return getTableEntries(startKey, delKeyIter, RepeatedOmKeyInfo::cloneOmKeyInfoList, size); - } - } - @Override public ExpiredOpenKeys getExpiredOpenKeys(Duration expireThreshold, int count, BucketLayout bucketLayout, Duration leaseThreshold) throws IOException { @@ -748,7 +710,7 @@ public KeyDeletingService getDeletingService() { } @Override - public DirectoryDeletingService getDirDeletingService() { + public BackgroundService getDirDeletingService() { return dirDeletingService; } @@ -783,7 +745,8 @@ public boolean isSstFilteringSvcEnabled() { TimeUnit.MILLISECONDS); return serviceInterval != DISABLE_VALUE; } - + + @Override public OmMultipartUploadList listMultipartUploads(String volumeName, String bucketName, String prefix) throws OMException { @@ -1384,6 +1347,7 @@ private OmKeyInfo createFakeDirIfShould(String volume, String bucket, return null; } + private OzoneFileStatus getOzoneFileStatusFSO(OmKeyArgs args, String clientAddress, boolean skipFileNotFoundError) throws IOException { final String volumeName = args.getVolumeName(); @@ -1842,13 +1806,17 @@ private List buildFinalStatusList( } fileStatusFinalList.add(fileStatus); } + return sortPipelineInfo(fileStatusFinalList, keyInfoList, omKeyArgs, clientAddress); } + private List sortPipelineInfo( List fileStatusFinalList, List keyInfoList, OmKeyArgs omKeyArgs, String clientAddress) throws IOException { + + if (omKeyArgs.getLatestVersionLocation()) { slimLocationVersion(keyInfoList.toArray(new OmKeyInfo[0])); } @@ -2030,13 +1998,6 @@ public Table.KeyValue getPendingDeletionDir() return null; } - @Override - public TableIterator> getDeletedDirEntries( - String volume, String bucket) throws IOException { - Optional bucketPrefix = getBucketPrefix(volume, bucket, true); - return metadataManager.getDeletedDirTable().iterator(bucketPrefix.orElse("")); - } - @Override public List getPendingDeletionSubDirs(long volumeId, long bucketId, OmKeyInfo parentInfo, long numEntries) throws IOException { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ListIterator.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ListIterator.java index 36edda8941d..86d8352697a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ListIterator.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ListIterator.java @@ -43,7 +43,7 @@ /** * Common class to do listing of resources after merging - * rocksDB table cache and actual table. + * rocksDB table cache & actual table. */ public class ListIterator { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java index cbe5205c10b..1c0ec78cfb2 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java @@ -61,6 +61,7 @@ public class OMMetrics implements OmMetadataReaderMetrics { private @Metric MutableCounterLong numKeyDeletes; private @Metric MutableCounterLong numBucketLists; private @Metric MutableCounterLong numKeyLists; + private @Metric MutableCounterLong numTrashKeyLists; private @Metric MutableCounterLong numVolumeLists; private @Metric MutableCounterLong numKeyCommits; private @Metric MutableCounterLong numKeyHSyncs; @@ -119,6 +120,7 @@ public class OMMetrics implements OmMetadataReaderMetrics { private @Metric MutableCounterLong numKeyDeleteFails; private @Metric MutableCounterLong numBucketListFails; private @Metric MutableCounterLong numKeyListFails; + private @Metric MutableCounterLong numTrashKeyListFails; private @Metric MutableCounterLong numVolumeListFails; private @Metric MutableCounterLong numKeyCommitFails; private @Metric MutableCounterLong numBlockAllocationFails; @@ -418,6 +420,11 @@ public void incNumKeyLists() { numKeyLists.incr(); } + public void incNumTrashKeyLists() { + numKeyOps.incr(); + numTrashKeyLists.incr(); + } + public void incNumVolumeLists() { numVolumeOps.incr(); numVolumeLists.incr(); @@ -829,6 +836,10 @@ public void incNumKeyListFails() { numKeyListFails.incr(); } + public void incNumTrashKeyListFails() { + numTrashKeyListFails.incr(); + } + public void incNumVolumeListFails() { numVolumeListFails.incr(); } @@ -983,6 +994,11 @@ public long getNumKeyLists() { return numKeyLists.value(); } + @VisibleForTesting + public long getNumTrashKeyLists() { + return numTrashKeyLists.value(); + } + @VisibleForTesting public long getNumGetServiceLists() { return numGetServiceLists.value(); @@ -1083,6 +1099,11 @@ public long getNumKeyListFails() { return numKeyListFails.value(); } + @VisibleForTesting + public long getNumTrashKeyListFails() { + return numTrashKeyListFails.value(); + } + @VisibleForTesting public long getNumFSOps() { return numFSOps.value(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMultiTenantManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMultiTenantManager.java index 2c66dd5035e..f68789b5394 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMultiTenantManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMultiTenantManager.java @@ -129,7 +129,7 @@ boolean isTenantAdmin(UserGroupInformation callerUgi, String tenantId, boolean delegated); /** - * List all the user and accessIDs of all users that belong to this Tenant. + * List all the user & accessIDs of all users that belong to this Tenant. * Note this read is unprotected. See OzoneManager#listUserInTenant * @param tenantID * @return List of users diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMultiTenantManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMultiTenantManagerImpl.java index a5954485bbd..1d25a49fc56 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMultiTenantManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMultiTenantManagerImpl.java @@ -246,6 +246,7 @@ private void checkAcquiredAuthorizerWriteLock() throws OMException { * @param tenantId tenant name * @param userRoleName user role name * @param adminRoleName admin role name + * @return Tenant * @throws IOException */ @Override diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index 4873a7db491..22d2b1e50b2 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -34,6 +34,7 @@ import java.util.Set; import java.util.TreeMap; import java.util.TreeSet; +import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -139,8 +140,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager, *

    * OM DB Schema: * - *

    -   * {@code
    +   *
        * Common Tables:
        * |----------------------------------------------------------------------|
        * |  Column Family     |        VALUE                                    |
    @@ -161,10 +161,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager,
        * |----------------------------------------------------------------------|
        * | transactionInfoTable| #TRANSACTIONINFO -> OMTransactionInfo          |
        * |----------------------------------------------------------------------|
    -   * }
    -   * 
    - *
    -   * {@code
    +   *
        * Multi-Tenant Tables:
        * |----------------------------------------------------------------------|
        * | tenantStateTable          | tenantId -> OmDBTenantState              |
    @@ -173,10 +170,8 @@ public class OmMetadataManagerImpl implements OMMetadataManager,
        * |----------------------------------------------------------------------|
        * | principalToAccessIdsTable | userPrincipal -> OmDBUserPrincipalInfo   |
        * |----------------------------------------------------------------------|
    -   * }
    -   * 
    - *
    -   * {@code
    +   *
    +   *
        * Simple Tables:
        * |----------------------------------------------------------------------|
        * |  Column Family     |        VALUE                                    |
    @@ -187,10 +182,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager,
        * |----------------------------------------------------------------------|
        * | openKey            | /volumeName/bucketName/keyName/id->KeyInfo      |
        * |----------------------------------------------------------------------|
    -   * }
    -   * 
    - *
    -   * {@code
    +   *
        * Prefix Tables:
        * |----------------------------------------------------------------------|
        * |  Column Family   |        VALUE                                      |
    @@ -204,10 +196,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager,
        * |  deletedDirTable | /volumeId/bucketId/parentId/dirName/objectId ->   |
        * |                  |                                      KeyInfo      |
        * |----------------------------------------------------------------------|
    -   * }
    -   * 
    - *
    -   * {@code
    +   *
        * Snapshot Tables:
        * |-------------------------------------------------------------------------|
        * |  Column Family        |        VALUE                                    |
    @@ -221,8 +210,6 @@ public class OmMetadataManagerImpl implements OMMetadataManager,
        * |-------------------------------------------------------------------------|
        * | compactionLogTable    | dbTrxId-compactionTime -> compactionLogEntry    |
        * |-------------------------------------------------------------------------|
    -   * }
    -   * 
    */ public static final String USER_TABLE = "userTable"; @@ -837,7 +824,7 @@ public String getUserKey(String user) { /** * Given a volume and bucket, return the corresponding DB key. * - * @param volume - Volume name + * @param volume - User name * @param bucket - Bucket name */ @Override @@ -851,22 +838,6 @@ public String getBucketKey(String volume, String bucket) { return builder.toString(); } - /** - * {@inheritDoc} - */ - @Override - public String getBucketKeyPrefix(String volume, String bucket) { - return getOzoneKey(volume, bucket, OM_KEY_PREFIX); - } - - /** - * {@inheritDoc} - */ - @Override - public String getBucketKeyPrefixFSO(String volume, String bucket) throws IOException { - return getOzoneKeyFSO(volume, bucket, OM_KEY_PREFIX); - } - @Override public String getOzoneKey(String volume, String bucket, String key) { StringBuilder builder = new StringBuilder() @@ -1414,6 +1385,15 @@ public ListKeysResult listKeys(String volumeName, String bucketName, return new ListKeysResult(result, isTruncated); } + // TODO: HDDS-2419 - Complete stub below for core logic + @Override + public List listTrash(String volumeName, String bucketName, + String startKeyName, String keyPrefix, int maxKeys) throws IOException { + + List deletedKeys = new ArrayList<>(); + return deletedKeys; + } + @Override public SnapshotInfo getSnapshotInfo(String volumeName, String bucketName, String snapshotName) throws IOException { @@ -1490,6 +1470,18 @@ public ListSnapshotResponse listSnapshot( return new ListSnapshotResponse(snapshotInfos, lastSnapshot); } + @Override + public boolean recoverTrash(String volumeName, String bucketName, + String keyName, String destinationBucket) throws IOException { + + /* TODO: HDDS-2425 and HDDS-2426 + core logic stub would be added in later patch. + */ + + boolean recoverOperation = true; + return recoverOperation; + } + /** * @param userName volume owner, null for listing all volumes. */ @@ -1624,22 +1616,11 @@ public PendingKeysDeletion getPendingDeletionKeys(final int keyCount, String[] keySplit = kv.getKey().split(OM_KEY_PREFIX); String bucketKey = getBucketKey(keySplit[1], keySplit[2]); OmBucketInfo bucketInfo = getBucketTable().get(bucketKey); - // If Bucket deleted bucketInfo would be null, thus making previous snapshot also null. - SnapshotInfo previousSnapshotInfo = bucketInfo == null ? null : - SnapshotUtils.getLatestSnapshotInfo(bucketInfo.getVolumeName(), - bucketInfo.getBucketName(), ozoneManager, snapshotChainManager); - // previous snapshot is not active or it has not been flushed to disk then don't process the key in this - // iteration. - if (previousSnapshotInfo != null && - (previousSnapshotInfo.getSnapshotStatus() != SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE || - !OmSnapshotManager.areSnapshotChangesFlushedToDB(ozoneManager.getMetadataManager(), - previousSnapshotInfo))) { - continue; - } + // Get the latest snapshot in snapshot path. - try (ReferenceCounted rcLatestSnapshot = previousSnapshotInfo == null ? null : - omSnapshotManager.getSnapshot(previousSnapshotInfo.getVolumeName(), - previousSnapshotInfo.getBucketName(), previousSnapshotInfo.getName())) { + try (ReferenceCounted + rcLatestSnapshot = getLatestActiveSnapshot( + keySplit[1], keySplit[2], omSnapshotManager)) { // Multiple keys with the same path can be queued in one DB entry RepeatedOmKeyInfo infoList = kv.getValue(); @@ -1716,24 +1697,17 @@ public PendingKeysDeletion getPendingDeletionKeys(final int keyCount, List notReclaimableKeyInfoList = notReclaimableKeyInfo.getOmKeyInfoList(); - // If Bucket deleted bucketInfo would be null, thus making previous snapshot also null. - SnapshotInfo newPreviousSnapshotInfo = bucketInfo == null ? null : - SnapshotUtils.getLatestSnapshotInfo(bucketInfo.getVolumeName(), - bucketInfo.getBucketName(), ozoneManager, snapshotChainManager); - // Check if the previous snapshot in the chain hasn't changed. - if (Objects.equals(Optional.ofNullable(newPreviousSnapshotInfo).map(SnapshotInfo::getSnapshotId), - Optional.ofNullable(previousSnapshotInfo).map(SnapshotInfo::getSnapshotId))) { - // If all the versions are not reclaimable, then do nothing. - if (notReclaimableKeyInfoList.size() > 0 && - notReclaimableKeyInfoList.size() != - infoList.getOmKeyInfoList().size()) { - keysToModify.put(kv.getKey(), notReclaimableKeyInfo); - } - if (notReclaimableKeyInfoList.size() != - infoList.getOmKeyInfoList().size()) { - keyBlocksList.addAll(blockGroupList); - } + // If all the versions are not reclaimable, then do nothing. + if (notReclaimableKeyInfoList.size() > 0 && + notReclaimableKeyInfoList.size() != + infoList.getOmKeyInfoList().size()) { + keysToModify.put(kv.getKey(), notReclaimableKeyInfo); + } + + if (notReclaimableKeyInfoList.size() != + infoList.getOmKeyInfoList().size()) { + keyBlocksList.addAll(blockGroupList); } } } @@ -1750,6 +1724,55 @@ private boolean versionExistsInPreviousSnapshot(OmKeyInfo omKeyInfo, delOmKeyInfo != null; } + /** + * Get the latest OmSnapshot for a snapshot path. + */ + public ReferenceCounted getLatestActiveSnapshot( + String volumeName, String bucketName, + OmSnapshotManager snapshotManager) + throws IOException { + + String snapshotPath = volumeName + OM_KEY_PREFIX + bucketName; + Optional latestPathSnapshot = Optional.ofNullable( + snapshotChainManager.getLatestPathSnapshotId(snapshotPath)); + + Optional snapshotInfo = Optional.empty(); + + while (latestPathSnapshot.isPresent()) { + Optional snapTableKey = latestPathSnapshot + .map(uuid -> snapshotChainManager.getTableKey(uuid)); + + snapshotInfo = snapTableKey.isPresent() ? + Optional.ofNullable(getSnapshotInfoTable().get(snapTableKey.get())) : + Optional.empty(); + + if (snapshotInfo.isPresent() && snapshotInfo.get().getSnapshotStatus() == + SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE) { + break; + } + + // Update latestPathSnapshot if current snapshot is deleted. + if (snapshotChainManager.hasPreviousPathSnapshot(snapshotPath, + latestPathSnapshot.get())) { + latestPathSnapshot = Optional.ofNullable(snapshotChainManager + .previousPathSnapshot(snapshotPath, latestPathSnapshot.get())); + } else { + latestPathSnapshot = Optional.empty(); + } + } + + Optional> rcOmSnapshot = + snapshotInfo.isPresent() ? + Optional.ofNullable( + snapshotManager.getSnapshot(volumeName, + bucketName, + snapshotInfo.get().getName()) + ) : + Optional.empty(); + + return rcOmSnapshot.orElse(null); + } + /** * Decide whether the open key is a multipart upload related key. * @param openKeyInfo open key related to multipart upload diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java index dde5b22e793..0d17851ed1f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java @@ -41,7 +41,6 @@ import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.server.ServerUtils; -import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.CodecRegistry; import org.apache.hadoop.hdds.utils.db.DBCheckpoint; @@ -99,7 +98,6 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_REPORT_MAX_PAGE_SIZE; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_REPORT_MAX_PAGE_SIZE_DEFAULT; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_KEY_NAME; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_SNAPSHOT_ERROR; import static org.apache.hadoop.ozone.om.snapshot.SnapshotDiffManager.getSnapshotRootPath; import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.checkSnapshotActive; import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.dropColumnFamilyHandle; @@ -676,73 +674,19 @@ private ReferenceCounted getSnapshot(String snapshotTableKey, boolea } /** - * Checks if the last transaction performed on the snapshot has been flushed to disk. - * @param metadataManager Metadatamanager of Active OM. - * @param snapshotTableKey table key corresponding to snapshot in snapshotInfoTable. - * @return True if the changes have been flushed to DB otherwise false - * @throws IOException + * Returns true if the snapshot is in given status. + * @param key DB snapshot table key + * @param status SnapshotStatus + * @return true if the snapshot is in given status, false otherwise */ - public static boolean areSnapshotChangesFlushedToDB(OMMetadataManager metadataManager, String snapshotTableKey) + public boolean isSnapshotStatus(String key, + SnapshotInfo.SnapshotStatus status) throws IOException { - // Need this info from cache since the snapshot could have been updated only on cache and not on disk. - SnapshotInfo snapshotInfo = metadataManager.getSnapshotInfoTable().get(snapshotTableKey); - return areSnapshotChangesFlushedToDB(metadataManager, snapshotInfo); + return getSnapshotInfo(key).getSnapshotStatus().equals(status); } - /** - * Checks if the last transaction performed on the snapshot has been flushed to disk. - * @param metadataManager Metadatamanager of Active OM. - * @param snapshotInfo SnapshotInfo value. - * @return True if the changes have been flushed to DB otherwise false. It would return true if the snapshot - * provided is null meaning the snapshot doesn't exist. - * @throws IOException - */ - public static boolean areSnapshotChangesFlushedToDB(OMMetadataManager metadataManager, SnapshotInfo snapshotInfo) - throws IOException { - if (snapshotInfo != null) { - TransactionInfo snapshotTransactionInfo = snapshotInfo.getLastTransactionInfo() != null ? - TransactionInfo.fromByteString(snapshotInfo.getLastTransactionInfo()) : null; - TransactionInfo omTransactionInfo = TransactionInfo.readTransactionInfo(metadataManager); - // If transactionInfo field is null then return true to keep things backward compatible. - return snapshotTransactionInfo == null || omTransactionInfo.compareTo(snapshotTransactionInfo) >= 0; - } - return true; - } - - - /** - * Returns OmSnapshot object and skips active check. - * This should only be used for API calls initiated by background service e.g. purgeKeys, purgeSnapshot, - * snapshotMoveDeletedKeys, and SetSnapshotProperty. - */ - public ReferenceCounted getSnapshot(UUID snapshotId) throws IOException { - return snapshotCache.get(snapshotId); - } - - /** - * Returns snapshotInfo from cache if it is present in cache, otherwise it checks RocksDB and return value from there. - * ################################################# - * NOTE: THIS SHOULD BE USED BY SNAPSHOT CACHE ONLY. - * ################################################# - * Sometimes, the follower OM node may be lagging that it gets purgeKeys or snapshotMoveDeletedKeys from a Snapshot, - * and purgeSnapshot for the same Snapshot one after another. And purgeSnapshot's validateAndUpdateCache gets - * executed before doubleBuffer flushes purgeKeys or snapshotMoveDeletedKeys from that Snapshot. - * This should not be a case on the leader node because SnapshotDeletingService checks that deletedTable and - * deletedDirectoryTable in DB don't have entries for the bucket before it sends a purgeSnapshot on a snapshot. - * If that happens, and we just look into the cache, the addToBatch operation will fail when it tries to open - * the DB and purgeKeys from the Snapshot because snapshot is already purged from the SnapshotInfoTable cache. - * Hence, it is needed to look into the table to make sure that snapshot exists somewhere either in cache or in DB. - */ - private SnapshotInfo getSnapshotInfo(String snapshotKey) throws IOException { - SnapshotInfo snapshotInfo = ozoneManager.getMetadataManager().getSnapshotInfoTable().get(snapshotKey); - - if (snapshotInfo == null) { - snapshotInfo = ozoneManager.getMetadataManager().getSnapshotInfoTable().getSkipCache(snapshotKey); - } - if (snapshotInfo == null) { - throw new OMException("Snapshot '" + snapshotKey + "' is not found.", INVALID_SNAPSHOT_ERROR); - } - return snapshotInfo; + public SnapshotInfo getSnapshotInfo(String key) throws IOException { + return SnapshotUtils.getSnapshotInfo(ozoneManager, key); } public static String getSnapshotPrefix(String snapshotName) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index e2acafdd242..a514262cae2 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -106,7 +106,6 @@ import org.apache.hadoop.ozone.om.s3.S3SecretCacheProvider; import org.apache.hadoop.ozone.om.s3.S3SecretStoreProvider; import org.apache.hadoop.ozone.om.service.OMRangerBGSyncService; -import org.apache.hadoop.ozone.om.service.QuotaRepairTask; import org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; import org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature; @@ -173,6 +172,7 @@ import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatusLight; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.S3VolumeContext; import org.apache.hadoop.ozone.om.helpers.ServiceInfo; import org.apache.hadoop.ozone.om.helpers.ServiceInfoEx; @@ -437,6 +437,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl private List ratisReporterList = null; private KeyProviderCryptoExtension kmsProvider; + private OzoneFsServerDefaults serverDefaults; private final OMLayoutVersionManager versionManager; private final ReplicationConfigValidator replicationConfigValidator; @@ -654,6 +655,14 @@ private OzoneManager(OzoneConfiguration conf, StartupOption startupOption) kmsProvider = null; LOG.error("Fail to create Key Provider"); } + Configuration hadoopConfig = + LegacyHadoopConfigurationSource.asHadoopConfiguration(configuration); + URI keyProviderUri = KMSUtil.getKeyProviderUri( + hadoopConfig, + CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH); + String keyProviderUriStr = + (keyProviderUri != null) ? keyProviderUri.toString() : null; + serverDefaults = new OzoneFsServerDefaults(keyProviderUriStr); if (secConfig.isSecurityEnabled()) { omComponent = OM_DAEMON + "-" + omId; HddsProtos.OzoneManagerDetailsProto omInfo = @@ -782,9 +791,8 @@ private void setInstanceVariablesFromConf() { * * @param conf OzoneConfiguration * @return OM instance - * @throws IOException AuthenticationException in case OM instance - * creation fails, - * @throws AuthenticationException + * @throws IOException, AuthenticationException in case OM instance + * creation fails. */ public static OzoneManager createOm(OzoneConfiguration conf) throws IOException, AuthenticationException { @@ -866,13 +874,7 @@ private void instantiateServices(boolean withNewSnapshot) throws IOException { prefixManager = new PrefixManagerImpl(this, metadataManager, isRatisEnabled); keyManager = new KeyManagerImpl(this, scmClient, configuration, perfMetrics); - // If authorizer is not initialized or the authorizer is Native - // re-initialize the authorizer, else for non-native authorizer - // like ranger we can reuse previous value if it is initialized - if (null == accessAuthorizer || accessAuthorizer.isNative()) { - accessAuthorizer = OzoneAuthorizerFactory.forOM(this); - } - + accessAuthorizer = OzoneAuthorizerFactory.forOM(this); omMetadataReader = new OmMetadataReader(keyManager, prefixManager, this, LOG, AUDIT, metrics, accessAuthorizer); // Active DB's OmMetadataReader instance does not need to be reference @@ -2063,7 +2065,6 @@ private void addOMNodeToPeers(String newOMNodeId) throws IOException { } catch (IOException e) { LOG.error("{}: Couldn't add OM {} to peer list.", getOMNodeId(), newOMNodeId); - return; } if (omRatisSnapshotProvider == null) { @@ -2968,6 +2969,39 @@ public ListKeysLightResult listKeysLight(String volumeName, return new ListKeysLightResult(basicKeysList, listKeysResult.isTruncated()); } + @Override + public List listTrash(String volumeName, + String bucketName, String startKeyName, String keyPrefix, int maxKeys) + throws IOException { + boolean auditSuccess = true; + Map auditMap = buildAuditMap(volumeName); + auditMap.put(OzoneConsts.BUCKET, bucketName); + auditMap.put(OzoneConsts.START_KEY, startKeyName); + auditMap.put(OzoneConsts.KEY_PREFIX, keyPrefix); + auditMap.put(OzoneConsts.MAX_KEYS, String.valueOf(maxKeys)); + try { + if (isAclEnabled) { + omMetadataReader.checkAcls(ResourceType.BUCKET, + StoreType.OZONE, ACLType.LIST, + volumeName, bucketName, keyPrefix); + } + metrics.incNumTrashKeyLists(); + return keyManager.listTrash(volumeName, bucketName, + startKeyName, keyPrefix, maxKeys); + } catch (IOException ex) { + metrics.incNumTrashKeyListFails(); + auditSuccess = false; + AUDIT.logReadFailure(buildAuditMessageForFailure(OMAction.LIST_TRASH, + auditMap, ex)); + throw ex; + } finally { + if (auditSuccess) { + AUDIT.logReadSuccess(buildAuditMessageForSuccess(OMAction.LIST_TRASH, + auditMap)); + } + } + } + @Override public SnapshotInfo getSnapshotInfo(String volumeName, String bucketName, String snapshotName) throws IOException { @@ -3137,15 +3171,6 @@ public List getServiceList() throws IOException { .setType(ServicePort.Type.RPC) .setValue(omRpcAddress.getPort()) .build()); - Configuration hadoopConfig = - LegacyHadoopConfigurationSource.asHadoopConfiguration(configuration); - URI keyProviderUri = KMSUtil.getKeyProviderUri( - hadoopConfig, - CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH); - String keyProviderUriStr = - (keyProviderUri != null) ? keyProviderUri.toString() : null; - omServiceInfoBuilder.setServerDefaults( - new OzoneFsServerDefaults(keyProviderUriStr)); if (httpServer != null && httpServer.getHttpAddress() != null) { omServiceInfoBuilder.addServicePort(ServicePort.newBuilder() @@ -4756,15 +4781,8 @@ public boolean setSafeMode(SafeModeAction action, boolean isChecked) } @Override - public String getQuotaRepairStatus() throws IOException { - checkAdminUserPrivilege("quota repair status"); - return QuotaRepairTask.getStatus(); - } - - @Override - public void startQuotaRepair(List buckets) throws IOException { - checkAdminUserPrivilege("start quota repair"); - new QuotaRepairTask(this).repair(buckets); + public OzoneFsServerDefaults getServerDefaults() { + return serverDefaults; } /** @@ -5024,11 +5042,4 @@ public void awaitDoubleBufferFlush() throws InterruptedException { getOmServerProtocol().awaitDoubleBufferFlush(); } } - - public void checkFeatureEnabled(OzoneManagerVersion feature) throws OMException { - String disabledFeatures = configuration.get(OMConfigKeys.OZONE_OM_FEATURES_DISABLED, ""); - if (disabledFeatures.contains(feature.name())) { - throw new OMException("Feature disabled: " + feature, OMException.ResultCodes.NOT_SUPPORTED_OPERATION); - } - } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerUtils.java index c693e529580..5a4ff643157 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerUtils.java @@ -51,8 +51,6 @@ private OzoneManagerUtils() { * OzoneManagerStateMachine#runCommand function and ensures sequential * execution path. * Below is the call trace to perform OM client request operation: - *
    -   * {@code
        * OzoneManagerStateMachine#applyTransaction ->
        * OzoneManagerStateMachine#runCommand ->
        * OzoneManagerRequestHandler#handleWriteRequest ->
    @@ -62,8 +60,6 @@ private OzoneManagerUtils() {
        * OzoneManagerUtils#getBucketLayout ->
        * OzoneManagerUtils#getOmBucketInfo ->
        * omMetadataManager().getBucketTable().get(buckKey)
    -   * }
    -   * 
    */ public static OmBucketInfo getBucketInfo(OMMetadataManager metaMgr, @@ -168,8 +164,12 @@ private static OmBucketInfo resolveBucketInfoLink( * buck-src has the actual BucketLayout that will be used by the * links. */ - return resolveBucketInfoLink(metadataManager, buckInfo.getSourceVolume(), - buckInfo.getSourceBucket(), visited); + try { + return resolveBucketInfoLink(metadataManager, + buckInfo.getSourceVolume(), buckInfo.getSourceBucket(), visited); + } catch (IOException e) { + throw e; + } } return buckInfo; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotChainManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotChainManager.java index e4102665d62..60353590e75 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotChainManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotChainManager.java @@ -24,10 +24,8 @@ import org.slf4j.LoggerFactory; import java.io.IOException; -import java.io.UncheckedIOException; import java.util.Collections; import java.util.HashMap; -import java.util.Iterator; import java.util.LinkedHashMap; import java.util.Map; import java.util.NoSuchElementException; @@ -58,7 +56,6 @@ public class SnapshotChainManager { private final ConcurrentMap snapshotIdToTableKey; private UUID latestGlobalSnapshotId; private final boolean snapshotChainCorrupted; - private UUID oldestGlobalSnapshotId; public SnapshotChainManager(OMMetadataManager metadataManager) { globalSnapshotChain = Collections.synchronizedMap(new LinkedHashMap<>()); @@ -107,8 +104,6 @@ private void addSnapshotGlobal(UUID snapshotID, UUID prevGlobalID) // On add snapshot, set previous snapshot entry nextSnapshotID = // snapshotID globalSnapshotChain.get(prevGlobalID).setNextSnapshotId(snapshotID); - } else { - oldestGlobalSnapshotId = snapshotID; } globalSnapshotChain.put(snapshotID, @@ -176,6 +171,7 @@ private boolean deleteSnapshotGlobal(UUID snapshotID) throws IOException { // for node removal UUID next = globalSnapshotChain.get(snapshotID).getNextSnapshotId(); UUID prev = globalSnapshotChain.get(snapshotID).getPreviousSnapshotId(); + if (prev != null && !globalSnapshotChain.containsKey(prev)) { throw new IOException(String.format( "Global snapshot chain corruption. " + @@ -201,9 +197,6 @@ private boolean deleteSnapshotGlobal(UUID snapshotID) throws IOException { if (latestGlobalSnapshotId.equals(snapshotID)) { latestGlobalSnapshotId = prev; } - if (snapshotID.equals(oldestGlobalSnapshotId)) { - oldestGlobalSnapshotId = next; - } return true; } else { // snapshotID not found in snapshot chain, log warning and return @@ -369,16 +362,13 @@ public synchronized void updateSnapshot(SnapshotInfo snapshotInfo) { public synchronized boolean deleteSnapshot(SnapshotInfo snapshotInfo) throws IOException { validateSnapshotChain(); - return deleteSnapshotGlobal(snapshotInfo.getSnapshotId()) && - deleteSnapshotPath(snapshotInfo.getSnapshotPath(), snapshotInfo.getSnapshotId()); - } - - /** - * Remove the snapshot from snapshotIdToSnapshotTableKey map. - */ - public synchronized void removeFromSnapshotIdToTable(UUID snapshotId) throws IOException { - validateSnapshotChain(); - snapshotIdToTableKey.remove(snapshotId); + boolean status = deleteSnapshotGlobal(snapshotInfo.getSnapshotId()) && + deleteSnapshotPath(snapshotInfo.getSnapshotPath(), + snapshotInfo.getSnapshotId()); + if (status) { + snapshotIdToTableKey.remove(snapshotInfo.getSnapshotId()); + } + return status; } /** @@ -389,42 +379,6 @@ public UUID getLatestGlobalSnapshotId() throws IOException { return latestGlobalSnapshotId; } - /** - * Get oldest of global snapshot in snapshot chain. - */ - public UUID getOldestGlobalSnapshotId() throws IOException { - validateSnapshotChain(); - return oldestGlobalSnapshotId; - } - - public Iterator iterator(final boolean reverse) throws IOException { - validateSnapshotChain(); - return new Iterator() { - private UUID currentSnapshotId = reverse ? getLatestGlobalSnapshotId() : getOldestGlobalSnapshotId(); - @Override - public boolean hasNext() { - return currentSnapshotId != null; - } - - @Override - public UUID next() { - try { - UUID prevSnapshotId = currentSnapshotId; - if (reverse && hasPreviousGlobalSnapshot(currentSnapshotId) || - !reverse && hasNextGlobalSnapshot(currentSnapshotId)) { - currentSnapshotId = - reverse ? previousGlobalSnapshot(currentSnapshotId) : nextGlobalSnapshot(currentSnapshotId); - } else { - currentSnapshotId = null; - } - return prevSnapshotId; - } catch (IOException e) { - throw new UncheckedIOException("Error while getting next snapshot for " + currentSnapshotId, e); - } - } - }; - } - /** * Get latest path snapshot in snapshot chain. */ diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java index bd462224e9d..6e1c9da34cb 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java @@ -17,6 +17,7 @@ package org.apache.hadoop.ozone.om; import com.google.common.base.Preconditions; +import com.google.protobuf.RpcController; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.ClientVersion; import org.apache.hadoop.ozone.om.exceptions.OMException; @@ -34,12 +35,15 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; +import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; import org.apache.hadoop.ozone.om.request.OMClientRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Progressable; import org.apache.ratis.protocol.ClientId; +import org.apache.ratis.protocol.Message; +import org.apache.ratis.protocol.RaftClientRequest; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -65,6 +69,8 @@ */ public class TrashOzoneFileSystem extends FileSystem { + private static final RpcController NULL_RPC_CONTROLLER = null; + private static final int OZONE_FS_ITERATE_BATCH_SIZE = 100; private static final int OZONE_MAX_LIST_KEYS_SIZE = 10000; @@ -91,15 +97,34 @@ public TrashOzoneFileSystem(OzoneManager ozoneManager) throws IOException { ozoneConfiguration = OzoneConfiguration.of(getConf()); } + private RaftClientRequest getRatisRequest( + OzoneManagerProtocolProtos.OMRequest omRequest) { + return RaftClientRequest.newBuilder() + .setClientId(CLIENT_ID) + .setServerId(ozoneManager.getOmRatisServer().getRaftPeerId()) + .setGroupId(ozoneManager.getOmRatisServer().getRaftGroupId()) + .setCallId(runCount.getAndIncrement()) + .setMessage( + Message.valueOf( + OMRatisHelper.convertRequestToByteString(omRequest))) + .setType(RaftClientRequest.writeRequestType()) + .build(); + + } + private void submitRequest(OzoneManagerProtocolProtos.OMRequest omRequest) throws Exception { ozoneManager.getMetrics().incNumTrashWriteRequests(); if (ozoneManager.isRatisEnabled()) { - // perform preExecute as ratis submit do no perform preExecute - OMClientRequest omClientRequest = OzoneManagerRatisUtils.createClientRequest(omRequest, ozoneManager); + OMClientRequest omClientRequest = + OzoneManagerRatisUtils.createClientRequest(omRequest, ozoneManager); omRequest = omClientRequest.preExecute(ozoneManager); + RaftClientRequest req = getRatisRequest(omRequest); + ozoneManager.getOmRatisServer().submitRequest(omRequest, req); + } else { + ozoneManager.getOmServerProtocol(). + submitRequest(NULL_RPC_CONTROLLER, omRequest); } - OzoneManagerRatisUtils.submitRequest(ozoneManager, omRequest, CLIENT_ID, runCount.getAndIncrement()); } @Override diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/multitenant/AuthorizerLock.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/multitenant/AuthorizerLock.java index 2d59c6259ad..1dcb0f0cd61 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/multitenant/AuthorizerLock.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/multitenant/AuthorizerLock.java @@ -43,7 +43,7 @@ public interface AuthorizerLock { /** * @return stamp that can be passed to - * {@link #validateOptimisticRead(long)} to check if a write lock was + * {@link this#validateOptimisticRead(long)} to check if a write lock was * acquired since the stamp was issued. * @throws IOException If an ongoing write prevents the lock from moving to * the read state for longer than the timeout. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java index 753088183b5..a6fcc40dda1 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java @@ -46,7 +46,6 @@ import org.apache.hadoop.ozone.om.codec.OMDBDefinition; import org.apache.hadoop.ozone.om.response.CleanupTableInfo; import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.Time; @@ -217,8 +216,8 @@ private OzoneManagerDoubleBuffer(Builder b) { } public OzoneManagerDoubleBuffer start() { - isRunning.set(true); daemon.start(); + isRunning.set(true); return this; } @@ -427,12 +426,8 @@ private String addToBatch(Queue buffer, BatchOperation batchOperation) { * in RocksDB callback flush. If multiple operations are flushed in one * specific batch, we are not sure at the flush of which specific operation * the callback is coming. - * PurgeSnapshot is also considered a barrier, since purgeSnapshot transaction on a standalone basis is an - * idempotent operation. Once the directory gets deleted the previous transactions that have been performed on the - * snapshotted rocksdb would start failing on replay since those transactions have not been committed but the - * directory could have been partially deleted/ fully deleted. This could also lead to inconsistencies in the DB - * reads from the purged rocksdb if operations are not performed consciously. - * There could be a possibility of race condition that is exposed to rocksDB behaviour for the batch. + * There could be a possibility of race condition that is exposed to rocksDB + * behaviour for the batch. * Hence, we treat createSnapshot as separate batch flush. *

    * e.g. requestBuffer = [request1, request2, snapshotRequest1, @@ -440,17 +435,19 @@ private String addToBatch(Queue buffer, BatchOperation batchOperation) { * response = [[request1, request2], [snapshotRequest1], [request3], * [snapshotRequest2], [request4]] */ - private synchronized List> splitReadyBufferAtCreateSnapshot() { + private List> splitReadyBufferAtCreateSnapshot() { final List> response = new ArrayList<>(); + OMResponse previousOmResponse = null; for (final Entry entry : readyBuffer) { OMResponse omResponse = entry.getResponse().getOMResponse(); // New queue gets created in three conditions: // 1. It is first element in the response, - // 2. Current request is createSnapshot/purgeSnapshot request. - // 3. Previous request was createSnapshot/purgeSnapshot request. - if (response.isEmpty() || isStandaloneBatchCmdTypes(omResponse) - || isStandaloneBatchCmdTypes(previousOmResponse)) { + // 2. Current request is createSnapshot request. + // 3. Previous request was createSnapshot request. + if (response.isEmpty() || omResponse.hasCreateSnapshotResponse() + || (previousOmResponse != null && + previousOmResponse.hasCreateSnapshotResponse())) { response.add(new LinkedList<>()); } @@ -461,15 +458,6 @@ private synchronized List> splitReadyBufferAtCreateSnapshot() { return response; } - private static boolean isStandaloneBatchCmdTypes(OMResponse response) { - if (response == null) { - return false; - } - final OzoneManagerProtocolProtos.Type type = response.getCmdType(); - return type == OzoneManagerProtocolProtos.Type.SnapshotPurge - || type == OzoneManagerProtocolProtos.Type.CreateSnapshot; - } - private void addCleanupEntry(Entry entry, Map> cleanupEpochs) { Class responseClass = entry.getResponse().getClass(); @@ -624,7 +612,7 @@ int getCurrentBufferSize() { return currentBuffer.size(); } - synchronized int getReadyBufferSize() { + int getReadyBufferSize() { return readyBuffer.size(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java index af4d42ad68a..78d6ed89d2d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java @@ -301,23 +301,15 @@ private RaftClientRequest createRaftRequest(OMRequest omRequest) { } /** - * API used internally from OzoneManager Server when requests need to be submitted. + * API used internally from OzoneManager Server when requests needs to be + * submitted to ratis, where the crafted RaftClientRequest is passed along. * @param omRequest - * @param cliId - * @param callId + * @param raftClientRequest * @return OMResponse * @throws ServiceException */ - public OMResponse submitRequest(OMRequest omRequest, ClientId cliId, long callId) throws ServiceException { - RaftClientRequest raftClientRequest = RaftClientRequest.newBuilder() - .setClientId(cliId) - .setServerId(getRaftPeerId()) - .setGroupId(getRaftGroupId()) - .setCallId(callId) - .setMessage(Message.valueOf( - OMRatisHelper.convertRequestToByteString(omRequest))) - .setType(RaftClientRequest.writeRequestType()) - .build(); + public OMResponse submitRequest(OMRequest omRequest, + RaftClientRequest raftClientRequest) throws ServiceException { RaftClientReply raftClientReply = submitRequestToRatis(raftClientRequest); return createOmResponse(omRequest, raftClientReply); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java index 5a1612e021a..5dc640c742c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java @@ -19,7 +19,6 @@ import com.google.common.base.Preconditions; import com.google.common.base.Strings; -import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; import java.io.File; import java.nio.file.InvalidPathException; @@ -78,7 +77,6 @@ import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotCreateRequest; import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotDeleteRequest; import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotMoveDeletedKeysRequest; -import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotMoveTableKeysRequest; import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotPurgeRequest; import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotRenameRequest; import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotSetPropertyRequest; @@ -100,7 +98,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; import org.apache.ratis.grpc.GrpcTlsConfig; -import org.apache.ratis.protocol.ClientId; import org.rocksdb.RocksDBException; import java.io.IOException; @@ -120,7 +117,6 @@ public final class OzoneManagerRatisUtils { private static final Logger LOG = LoggerFactory .getLogger(OzoneManagerRatisUtils.class); - private static final RpcController NULL_RPC_CONTROLLER = null; private OzoneManagerRatisUtils() { } @@ -233,8 +229,6 @@ public static OMClientRequest createClientRequest(OMRequest omRequest, return new OMSnapshotRenameRequest(omRequest); case SnapshotMoveDeletedKeys: return new OMSnapshotMoveDeletedKeysRequest(omRequest); - case SnapshotMoveTableKeys: - return new OMSnapshotMoveTableKeysRequest(omRequest); case SnapshotPurge: return new OMSnapshotPurgeRequest(omRequest); case SetSnapshotProperty: @@ -407,9 +401,9 @@ private static OMClientRequest getOMAclRequest(OMRequest omRequest, } /** - * Convert exception result to {@link org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status}. + * Convert exception result to {@link OzoneManagerProtocolProtos.Status}. * @param exception - * @return Status + * @return OzoneManagerProtocolProtos.Status */ public static Status exceptionToResponseStatus(Exception exception) { if (exception instanceof OMException) { @@ -508,13 +502,4 @@ public static GrpcTlsConfig createServerTlsConfig(SecurityConfig conf, return null; } - - public static OzoneManagerProtocolProtos.OMResponse submitRequest( - OzoneManager om, OMRequest omRequest, ClientId clientId, long callId) throws ServiceException { - if (om.isRatisEnabled()) { - return om.getOmRatisServer().submitRequest(omRequest, clientId, callId); - } else { - return om.getOmServerProtocol().submitRequest(NULL_RPC_CONTROLLER, omRequest); - } - } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java index 17f9663ae1f..25a204ded27 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java @@ -438,6 +438,7 @@ public InetAddress getRemoteAddress() throws IOException { * Return String created from OMRequest userInfo. If userInfo is not * set, returns null. * @return String + * @throws IOException */ @VisibleForTesting public String getHostName() { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java index f73255da117..9ae6b7e5d50 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java @@ -45,6 +45,7 @@ public interface RequestAuditor { * @param auditMap * @param throwable * @param userInfo + * @return */ OMAuditLogger.Builder buildAuditMessage( AuditAction op, Map auditMap, Throwable throwable, UserInfo userInfo); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java index 802cfa54e60..53d4c83c3a9 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java @@ -257,7 +257,7 @@ dirKeyInfo, missingParentInfos, result, getBucketLayout(), * @param bucketInfo * @param omPathInfo * @param trxnLogIndex - * @return {@code List} + * @return * @throws IOException */ public static List getAllParentInfo(OzoneManager ozoneManager, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java index 8f2a768c525..3e7549b176e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java @@ -1050,7 +1050,7 @@ public static long getParentID(long volumeId, long bucketId, String keyName, * @param volumeName - volume name. * @param bucketName - bucket name. * @param keyName - key name. - * @return {@code long} + * @return * @throws IOException */ public static long getParentId(OMMetadataManager omMetadataManager, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java index 29ed5d9fc7b..2c182a6a5f5 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java @@ -24,17 +24,13 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.UUID; - import org.apache.commons.lang3.tuple.Pair; -import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; @@ -47,10 +43,8 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import static org.apache.hadoop.hdds.HddsUtils.fromProtobuf; import static org.apache.hadoop.ozone.OzoneConsts.DELETED_HSYNC_KEY; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; -import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.validatePreviousSnapshotId; /** * Handles purging of keys from OM DB. @@ -70,34 +64,21 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn List purgeRequests = purgeDirsRequest.getDeletedPathList(); + + SnapshotInfo fromSnapshotInfo = null; Set> lockSet = new HashSet<>(); Map, OmBucketInfo> volBucketInfoMap = new HashMap<>(); - OmMetadataManagerImpl omMetadataManager = (OmMetadataManagerImpl) ozoneManager.getMetadataManager(); + OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); Map openKeyInfoMap = new HashMap<>(); + OMMetrics omMetrics = ozoneManager.getMetrics(); - OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( - getOmRequest()); - final SnapshotInfo fromSnapshotInfo; try { - fromSnapshotInfo = fromSnapshot != null ? SnapshotUtils.getSnapshotInfo(ozoneManager, - fromSnapshot) : null; - // Checking if this request is an old request or new one. - if (purgeDirsRequest.hasExpectedPreviousSnapshotID()) { - // Validating previous snapshot since while purging deletes, a snapshot create request could make this purge - // directory request invalid on AOS since the deletedDirectory would be in the newly created snapshot. Adding - // subdirectories could lead to not being able to reclaim sub-files and subdirectories since the - // file/directory would be present in the newly created snapshot. - // Validating previous snapshot can ensure the chain hasn't changed. - UUID expectedPreviousSnapshotId = purgeDirsRequest.getExpectedPreviousSnapshotID().hasUuid() - ? fromProtobuf(purgeDirsRequest.getExpectedPreviousSnapshotID().getUuid()) : null; - validatePreviousSnapshotId(fromSnapshotInfo, omMetadataManager.getSnapshotChainManager(), - expectedPreviousSnapshotId); + if (fromSnapshot != null) { + fromSnapshotInfo = ozoneManager.getMetadataManager() + .getSnapshotInfoTable() + .get(fromSnapshot); } - } catch (IOException e) { - LOG.error("Error occurred while performing OMDirectoriesPurge. ", e); - return new OMDirectoriesPurgeResponseWithFSO(createErrorOMResponse(omResponse, e)); - } - try { + for (OzoneManagerProtocolProtos.PurgePathRequest path : purgeRequests) { for (OzoneManagerProtocolProtos.KeyInfo key : path.getMarkDeletedSubDirsList()) { @@ -169,11 +150,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn } } } - if (fromSnapshotInfo != null) { - fromSnapshotInfo.setLastTransactionInfo(TransactionInfo.valueOf(termIndex).toByteString()); - omMetadataManager.getSnapshotInfoTable().addCacheEntry(new CacheKey<>(fromSnapshotInfo.getTableKey()), - CacheValue.get(termIndex.getIndex(), fromSnapshotInfo)); - } } catch (IOException ex) { // Case of IOException for fromProtobuf will not happen // as this is created and send within OM @@ -189,8 +165,12 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn } } - return new OMDirectoriesPurgeResponseWithFSO( + OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( + getOmRequest()); + OMClientResponse omClientResponse = new OMDirectoriesPurgeResponseWithFSO( omResponse.build(), purgeRequests, ozoneManager.isRatisEnabled(), getBucketLayout(), volBucketInfoMap, fromSnapshotInfo, openKeyInfoMap); + + return omClientResponse; } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java index 378e0cb12ce..b8bf89a3542 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java @@ -28,9 +28,9 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.ozone.OzoneManagerVersion; import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.ozone.OmUtils; +import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; @@ -96,10 +96,6 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { KeyArgs keyArgs = commitKeyRequest.getKeyArgs(); - if (keyArgs.hasExpectedDataGeneration()) { - ozoneManager.checkFeatureEnabled(OzoneManagerVersion.ATOMIC_REWRITE_KEY); - } - // Verify key name final boolean checkKeyNameEnabled = ozoneManager.getConfiguration() .getBoolean(OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_KEY, @@ -110,7 +106,9 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { } boolean isHsync = commitKeyRequest.hasHsync() && commitKeyRequest.getHsync(); boolean isRecovery = commitKeyRequest.hasRecovery() && commitKeyRequest.getRecovery(); - boolean enableHsync = OzoneFSUtils.canEnableHsync(ozoneManager.getConfiguration(), false); + boolean enableHsync = ozoneManager.getConfiguration().getBoolean( + OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, + OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED_DEFAULT); // If hsynced is called for a file, then this file is hsynced, otherwise it's not hsynced. // Currently, file lease recovery by design only supports recover hsynced file @@ -458,6 +456,7 @@ protected List getOmKeyLocationInfos( * @param omMetrics om metrics * @param exception exception trace * @param omKeyInfo omKeyInfo + * @param result result * @param result stores the result of the execution */ @SuppressWarnings("parameternumber") @@ -554,7 +553,7 @@ public static OMRequest blockCommitKeyWithBucketLayoutFromOldClient( public static OMRequest disallowHsync( OMRequest req, ValidationContext ctx) throws OMException { if (!ctx.versionManager() - .isAllowed(OMLayoutFeature.HBASE_SUPPORT)) { + .isAllowed(OMLayoutFeature.HSYNC)) { CommitKeyRequest commitKeyRequest = req.getCommitKeyRequest(); boolean isHSync = commitKeyRequest.hasHsync() && commitKeyRequest.getHsync(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java index dee5bb0fe0e..d0ed0eacecd 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java @@ -28,7 +28,6 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.hdds.client.ReplicationConfig; -import org.apache.hadoop.ozone.OzoneManagerVersion; import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.om.OMConfigKeys; @@ -94,10 +93,6 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { KeyArgs keyArgs = createKeyRequest.getKeyArgs(); - if (keyArgs.hasExpectedDataGeneration()) { - ozoneManager.checkFeatureEnabled(OzoneManagerVersion.ATOMIC_REWRITE_KEY); - } - // Verify key name OmUtils.verifyKeyNameWithSnapshotReservedWord(keyArgs.getKeyName()); final boolean checkKeyNameEnabled = ozoneManager.getConfiguration() diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java index f40adb7495f..b370c286e0f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java @@ -246,7 +246,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn * @param keyName - key name. * @param uploadID - Multi part upload ID for this key. * @param omMetadataManager - * @return {@code String} + * @return * @throws IOException */ @Override diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java index a5e8cb14525..9ed92183968 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java @@ -21,12 +21,6 @@ import java.io.IOException; import java.util.ArrayList; -import org.apache.hadoop.hdds.utils.TransactionInfo; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; @@ -43,10 +37,6 @@ import org.slf4j.LoggerFactory; import java.util.List; -import java.util.UUID; - -import static org.apache.hadoop.hdds.HddsUtils.fromProtobuf; -import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.validatePreviousSnapshotId; /** * Handles purging of keys from OM DB. @@ -63,60 +53,38 @@ public OMKeyPurgeRequest(OMRequest omRequest) { @Override public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { PurgeKeysRequest purgeKeysRequest = getOmRequest().getPurgeKeysRequest(); - List bucketDeletedKeysList = purgeKeysRequest.getDeletedKeysList(); - List keysToUpdateList = purgeKeysRequest.getKeysToUpdateList(); - String fromSnapshot = purgeKeysRequest.hasSnapshotTableKey() ? purgeKeysRequest.getSnapshotTableKey() : null; - OmMetadataManagerImpl omMetadataManager = (OmMetadataManagerImpl) ozoneManager.getMetadataManager(); + List bucketDeletedKeysList = purgeKeysRequest + .getDeletedKeysList(); + List keysToUpdateList = purgeKeysRequest + .getKeysToUpdateList(); + String fromSnapshot = purgeKeysRequest.hasSnapshotTableKey() ? + purgeKeysRequest.getSnapshotTableKey() : null; + List keysToBePurgedList = new ArrayList<>(); OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( getOmRequest()); - - - final SnapshotInfo fromSnapshotInfo; - try { - fromSnapshotInfo = fromSnapshot != null ? SnapshotUtils.getSnapshotInfo(ozoneManager, - fromSnapshot) : null; - // Checking if this request is an old request or new one. - if (purgeKeysRequest.hasExpectedPreviousSnapshotID()) { - // Validating previous snapshot since while purging deletes, a snapshot create request could make this purge - // key request invalid on AOS since the deletedKey would be in the newly created snapshot. This would add an - // redundant tombstone entry in the deletedTable. It is better to skip the transaction. - UUID expectedPreviousSnapshotId = purgeKeysRequest.getExpectedPreviousSnapshotID().hasUuid() - ? fromProtobuf(purgeKeysRequest.getExpectedPreviousSnapshotID().getUuid()) : null; - validatePreviousSnapshotId(fromSnapshotInfo, omMetadataManager.getSnapshotChainManager(), - expectedPreviousSnapshotId); - } - } catch (IOException e) { - LOG.error("Error occurred while performing OmKeyPurge. ", e); - return new OMKeyPurgeResponse(createErrorOMResponse(omResponse, e)); - } - - List keysToBePurgedList = new ArrayList<>(); + OMClientResponse omClientResponse = null; for (DeletedKeys bucketWithDeleteKeys : bucketDeletedKeysList) { - keysToBePurgedList.addAll(bucketWithDeleteKeys.getKeysList()); - } - - if (keysToBePurgedList.isEmpty()) { - return new OMKeyPurgeResponse(createErrorOMResponse(omResponse, - new OMException("None of the keys can be purged be purged since a new snapshot was created for all the " + - "buckets, making this request invalid", OMException.ResultCodes.KEY_DELETION_ERROR))); + for (String deletedKey : bucketWithDeleteKeys.getKeysList()) { + keysToBePurgedList.add(deletedKey); + } } - // Setting transaction info for snapshot, this is to prevent duplicate purge requests to OM from background - // services. try { - if (fromSnapshotInfo != null) { - fromSnapshotInfo.setLastTransactionInfo(TransactionInfo.valueOf(termIndex).toByteString()); - omMetadataManager.getSnapshotInfoTable().addCacheEntry(new CacheKey<>(fromSnapshotInfo.getTableKey()), - CacheValue.get(termIndex.getIndex(), fromSnapshotInfo)); + SnapshotInfo fromSnapshotInfo = null; + if (fromSnapshot != null) { + fromSnapshotInfo = ozoneManager.getMetadataManager() + .getSnapshotInfoTable().get(fromSnapshot); } - } catch (IOException e) { - return new OMKeyPurgeResponse(createErrorOMResponse(omResponse, e)); + omClientResponse = new OMKeyPurgeResponse(omResponse.build(), + keysToBePurgedList, fromSnapshotInfo, keysToUpdateList); + } catch (IOException ex) { + omClientResponse = new OMKeyPurgeResponse( + createErrorOMResponse(omResponse, ex)); } - return new OMKeyPurgeResponse(omResponse.build(), - keysToBePurgedList, fromSnapshotInfo, keysToUpdateList); + return omClientResponse; } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java index e57b6d99fd4..72365221d3b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java @@ -395,7 +395,7 @@ private Map buildAuditMap( * level, e.g. source is /vol1/buck1/dir1/key1 and dest is /vol1/buck1). * * @param request - * @return {@code String} + * @return * @throws OMException */ @Override @@ -410,7 +410,7 @@ protected String extractDstKey(RenameKeyRequest request) throws OMException { * Returns the validated and normalized source key name. * * @param keyArgs - * @return {@code String} + * @return * @throws OMException */ @Override diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java index 88c5ad91405..09e5d8bca06 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java @@ -611,7 +611,7 @@ protected void getFileEncryptionInfoForMpuKey(KeyArgs keyArgs, /** * Get FileEncryptionInfoProto from KeyArgs. * @param keyArgs - * @return FileEncryptionInfo + * @return */ protected FileEncryptionInfo getFileEncryptionInfo(KeyArgs keyArgs) { FileEncryptionInfo encryptionInfo = null; @@ -623,7 +623,7 @@ protected FileEncryptionInfo getFileEncryptionInfo(KeyArgs keyArgs) { /** * Check bucket quota in bytes. - * @param metadataManager + * @paran metadataManager * @param omBucketInfo * @param allocateSize * @throws IOException @@ -911,7 +911,7 @@ private OmKeyInfo prepareMultipartFileInfo( * @param keyName - key name. * @param uploadID - Multi part upload ID for this key. * @param omMetadataManager - * @return {@code String} + * @return * @throws IOException */ protected String getDBMultipartOpenKey(String volumeName, String bucketName, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java index 2ded4f6a83e..3aa4151cea3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java @@ -21,7 +21,6 @@ import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; @@ -167,7 +166,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn ((RDBStore) omMetadataManager.getStore()).getDb() .getLatestSequenceNumber(); snapshotInfo.setDbTxSequenceNumber(dbLatestSequenceNumber); - snapshotInfo.setLastTransactionInfo(TransactionInfo.valueOf(termIndex).toByteString()); + // Snapshot referenced size should be bucket's used bytes OmBucketInfo omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveDeletedKeysRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveDeletedKeysRequest.java index 18055bdda40..122108ad65f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveDeletedKeysRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveDeletedKeysRequest.java @@ -20,11 +20,9 @@ package org.apache.hadoop.ozone.om.request.snapshot; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.utils.TransactionInfo; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.SnapshotChainManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; @@ -62,6 +60,7 @@ public OMSnapshotMoveDeletedKeysRequest(OMRequest omRequest) { @Override @DisallowedUntilLayoutVersion(FILESYSTEM_SNAPSHOT) public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + OmSnapshotManager omSnapshotManager = ozoneManager.getOmSnapshotManager(); OmMetadataManagerImpl omMetadataManager = (OmMetadataManagerImpl) ozoneManager.getMetadataManager(); SnapshotChainManager snapshotChainManager = @@ -79,26 +78,19 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn OzoneManagerProtocolProtos.OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(getOmRequest()); try { - // Check the snapshot exists. - SnapshotInfo snapshotInfo = SnapshotUtils.getSnapshotInfo(ozoneManager, fromSnapshot.getTableKey()); - - nextSnapshot = SnapshotUtils.getNextSnapshot(ozoneManager, snapshotChainManager, snapshotInfo); + nextSnapshot = SnapshotUtils.getNextActiveSnapshot(fromSnapshot, + snapshotChainManager, omSnapshotManager); // Get next non-deleted snapshot. - List nextDBKeysList = moveDeletedKeysRequest.getNextDBKeysList(); - List reclaimKeysList = moveDeletedKeysRequest.getReclaimKeysList(); - List renamedKeysList = moveDeletedKeysRequest.getRenamedKeysList(); - List movedDirs = moveDeletedKeysRequest.getDeletedDirsToMoveList(); + List nextDBKeysList = + moveDeletedKeysRequest.getNextDBKeysList(); + List reclaimKeysList = + moveDeletedKeysRequest.getReclaimKeysList(); + List renamedKeysList = + moveDeletedKeysRequest.getRenamedKeysList(); + List movedDirs = + moveDeletedKeysRequest.getDeletedDirsToMoveList(); - // Update lastTransactionInfo for fromSnapshot and the nextSnapshot. - fromSnapshot.setLastTransactionInfo(TransactionInfo.valueOf(termIndex).toByteString()); - omMetadataManager.getSnapshotInfoTable().addCacheEntry(new CacheKey<>(fromSnapshot.getTableKey()), - CacheValue.get(termIndex.getIndex(), fromSnapshot)); - if (nextSnapshot != null) { - nextSnapshot.setLastTransactionInfo(TransactionInfo.valueOf(termIndex).toByteString()); - omMetadataManager.getSnapshotInfoTable().addCacheEntry(new CacheKey<>(nextSnapshot.getTableKey()), - CacheValue.get(termIndex.getIndex(), nextSnapshot)); - } omClientResponse = new OMSnapshotMoveDeletedKeysResponse( omResponse.build(), fromSnapshot, nextSnapshot, nextDBKeysList, reclaimKeysList, renamedKeysList, movedDirs); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveTableKeysRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveTableKeysRequest.java deleted file mode 100644 index 0eb0d3cd166..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveTableKeysRequest.java +++ /dev/null @@ -1,184 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.ozone.om.request.snapshot; - -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.utils.TransactionInfo; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.SnapshotChainManager; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; -import org.apache.hadoop.ozone.om.request.OMClientRequest; -import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotMoveTableKeysResponse; -import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; -import org.apache.hadoop.ozone.om.upgrade.DisallowedUntilLayoutVersion; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotMoveKeyInfos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotMoveTableKeysRequest; -import org.apache.ratis.server.protocol.TermIndex; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashSet; -import java.util.List; -import java.util.Set; - -import static org.apache.hadoop.hdds.HddsUtils.fromProtobuf; -import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.FILESYSTEM_SNAPSHOT; - -/** - * Handles OMSnapshotMoveTableKeysRequest Request. - * This is an OM internal request. Does not need @RequireSnapshotFeatureState. - */ -public class OMSnapshotMoveTableKeysRequest extends OMClientRequest { - - private static final Logger LOG = LoggerFactory.getLogger(OMSnapshotMoveTableKeysRequest.class); - - public OMSnapshotMoveTableKeysRequest(OMRequest omRequest) { - super(omRequest); - } - - @Override - public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { - OmMetadataManagerImpl omMetadataManager = (OmMetadataManagerImpl) ozoneManager.getMetadataManager(); - SnapshotChainManager snapshotChainManager = omMetadataManager.getSnapshotChainManager(); - SnapshotMoveTableKeysRequest moveTableKeysRequest = getOmRequest().getSnapshotMoveTableKeysRequest(); - SnapshotInfo fromSnapshot = SnapshotUtils.getSnapshotInfo(ozoneManager, - snapshotChainManager, fromProtobuf(moveTableKeysRequest.getFromSnapshotID())); - String bucketKeyPrefix = omMetadataManager.getBucketKeyPrefix(fromSnapshot.getVolumeName(), - fromSnapshot.getBucketName()); - String bucketKeyPrefixFSO = omMetadataManager.getBucketKeyPrefixFSO(fromSnapshot.getVolumeName(), - fromSnapshot.getBucketName()); - - Set keys = new HashSet<>(); - List deletedKeys = new ArrayList<>(moveTableKeysRequest.getDeletedKeysList().size()); - - //validate deleted key starts with bucket prefix.[///] - for (SnapshotMoveKeyInfos deletedKey : moveTableKeysRequest.getDeletedKeysList()) { - // Filter only deleted keys with at least one keyInfo per key. - if (!deletedKey.getKeyInfosList().isEmpty()) { - deletedKeys.add(deletedKey); - if (!deletedKey.getKey().startsWith(bucketKeyPrefix)) { - throw new OMException("Deleted Key: " + deletedKey + " doesn't start with prefix " + bucketKeyPrefix, - OMException.ResultCodes.INVALID_KEY_NAME); - } - if (keys.contains(deletedKey.getKey())) { - throw new OMException("Duplicate Deleted Key: " + deletedKey + " in request", - OMException.ResultCodes.INVALID_REQUEST); - } else { - keys.add(deletedKey.getKey()); - } - } - } - - keys.clear(); - List renamedKeysList = new ArrayList<>(moveTableKeysRequest.getRenamedKeysList().size()); - //validate rename key starts with bucket prefix.[///] - for (HddsProtos.KeyValue renamedKey : moveTableKeysRequest.getRenamedKeysList()) { - if (renamedKey.hasKey() && renamedKey.hasValue()) { - renamedKeysList.add(renamedKey); - if (!renamedKey.getKey().startsWith(bucketKeyPrefix)) { - throw new OMException("Rename Key: " + renamedKey + " doesn't start with prefix " + bucketKeyPrefix, - OMException.ResultCodes.INVALID_KEY_NAME); - } - if (keys.contains(renamedKey.getKey())) { - throw new OMException("Duplicate rename Key: " + renamedKey + " in request", - OMException.ResultCodes.INVALID_REQUEST); - } else { - keys.add(renamedKey.getKey()); - } - } - } - keys.clear(); - - // Filter only deleted dirs with only one keyInfo per key. - List deletedDirs = new ArrayList<>(moveTableKeysRequest.getDeletedDirsList().size()); - //validate deleted key starts with bucket FSO path prefix.[///] - for (SnapshotMoveKeyInfos deletedDir : moveTableKeysRequest.getDeletedDirsList()) { - // Filter deleted directories with exactly one keyInfo per key. - if (deletedDir.getKeyInfosList().size() == 1) { - deletedDirs.add(deletedDir); - if (!deletedDir.getKey().startsWith(bucketKeyPrefixFSO)) { - throw new OMException("Deleted dir: " + deletedDir + " doesn't start with prefix " + - bucketKeyPrefixFSO, OMException.ResultCodes.INVALID_KEY_NAME); - } - if (keys.contains(deletedDir.getKey())) { - throw new OMException("Duplicate deleted dir Key: " + deletedDir + " in request", - OMException.ResultCodes.INVALID_REQUEST); - } else { - keys.add(deletedDir.getKey()); - } - } - } - return getOmRequest().toBuilder().setSnapshotMoveTableKeysRequest( - moveTableKeysRequest.toBuilder().clearDeletedDirs().clearDeletedKeys().clearRenamedKeys() - .addAllDeletedKeys(deletedKeys).addAllDeletedDirs(deletedDirs) - .addAllRenamedKeys(renamedKeysList).build()).build(); - } - - @Override - @DisallowedUntilLayoutVersion(FILESYSTEM_SNAPSHOT) - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - OmMetadataManagerImpl omMetadataManager = (OmMetadataManagerImpl) ozoneManager.getMetadataManager(); - SnapshotChainManager snapshotChainManager = omMetadataManager.getSnapshotChainManager(); - - SnapshotMoveTableKeysRequest moveTableKeysRequest = getOmRequest().getSnapshotMoveTableKeysRequest(); - - OMClientResponse omClientResponse; - OzoneManagerProtocolProtos.OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(getOmRequest()); - try { - SnapshotInfo fromSnapshot = SnapshotUtils.getSnapshotInfo(ozoneManager, - snapshotChainManager, fromProtobuf(moveTableKeysRequest.getFromSnapshotID())); - // If there is no snapshot in the chain after the current snapshot move the keys to Active Object Store. - SnapshotInfo nextSnapshot = SnapshotUtils.getNextSnapshot(ozoneManager, snapshotChainManager, fromSnapshot); - - // If next snapshot is not active then ignore move. Since this could be a redundant operations. - if (nextSnapshot != null && nextSnapshot.getSnapshotStatus() != SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE) { - throw new OMException("Next snapshot : " + nextSnapshot + " in chain is not active.", - OMException.ResultCodes.INVALID_SNAPSHOT_ERROR); - } - - // Update lastTransactionInfo for fromSnapshot and the nextSnapshot. - fromSnapshot.setLastTransactionInfo(TransactionInfo.valueOf(termIndex).toByteString()); - omMetadataManager.getSnapshotInfoTable().addCacheEntry(new CacheKey<>(fromSnapshot.getTableKey()), - CacheValue.get(termIndex.getIndex(), fromSnapshot)); - if (nextSnapshot != null) { - nextSnapshot.setLastTransactionInfo(TransactionInfo.valueOf(termIndex).toByteString()); - omMetadataManager.getSnapshotInfoTable().addCacheEntry(new CacheKey<>(nextSnapshot.getTableKey()), - CacheValue.get(termIndex.getIndex(), nextSnapshot)); - } - omClientResponse = new OMSnapshotMoveTableKeysResponse(omResponse.build(), fromSnapshot, nextSnapshot, - moveTableKeysRequest.getDeletedKeysList(), moveTableKeysRequest.getDeletedDirsList(), - moveTableKeysRequest.getRenamedKeysList()); - } catch (IOException ex) { - omClientResponse = new OMSnapshotMoveTableKeysResponse(createErrorOMResponse(omResponse, ex)); - } - return omClientResponse; - } -} - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java index 38c51d4de5c..2a9cfa6baf0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java @@ -19,13 +19,12 @@ package org.apache.hadoop.ozone.om.request.snapshot; -import org.apache.hadoop.hdds.utils.TransactionInfo; -import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.SnapshotChainManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; @@ -55,13 +54,6 @@ public class OMSnapshotPurgeRequest extends OMClientRequest { private static final Logger LOG = LoggerFactory.getLogger(OMSnapshotPurgeRequest.class); - /** - * This map contains up to date snapshotInfo and works as a local cache for OMSnapshotPurgeRequest. - * Since purge and other updates happen in sequence inside validateAndUpdateCache, we can get updated snapshotInfo - * from this map rather than getting form snapshotInfoTable which creates a deep copy for every get call. - */ - private final Map updatedSnapshotInfos = new HashMap<>(); - public OMSnapshotPurgeRequest(OMRequest omRequest) { super(omRequest); } @@ -72,6 +64,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn final long trxnLogIndex = termIndex.getIndex(); + OmSnapshotManager omSnapshotManager = ozoneManager.getOmSnapshotManager(); OmMetadataManagerImpl omMetadataManager = (OmMetadataManagerImpl) ozoneManager.getMetadataManager(); SnapshotChainManager snapshotChainManager = @@ -87,6 +80,9 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn try { List snapshotDbKeys = snapshotPurgeRequest .getSnapshotDBKeysList(); + Map updatedSnapInfos = new HashMap<>(); + Map updatedPathPreviousAndGlobalSnapshots = + new HashMap<>(); // Each snapshot purge operation does three things: // 1. Update the deep clean flag for the next active snapshot (So that it can be @@ -96,36 +92,37 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn // There is no need to take lock for snapshot purge as of now. We can simply rely on OMStateMachine // because it executes transaction sequentially. for (String snapTableKey : snapshotDbKeys) { - SnapshotInfo fromSnapshot = getUpdatedSnapshotInfo(snapTableKey, omMetadataManager); + SnapshotInfo fromSnapshot = omMetadataManager.getSnapshotInfoTable().get(snapTableKey); if (fromSnapshot == null) { // Snapshot may have been purged in the previous iteration of SnapshotDeletingService. LOG.warn("The snapshot {} is not longer in snapshot table, It maybe removed in the previous " + "Snapshot purge request.", snapTableKey); continue; } - SnapshotInfo nextSnapshot = SnapshotUtils.getNextSnapshot(ozoneManager, snapshotChainManager, fromSnapshot); + + SnapshotInfo nextSnapshot = + SnapshotUtils.getNextActiveSnapshot(fromSnapshot, snapshotChainManager, omSnapshotManager); // Step 1: Update the deep clean flag for the next active snapshot - updateSnapshotInfoAndCache(nextSnapshot, omMetadataManager, trxnLogIndex); + updateSnapshotInfoAndCache(nextSnapshot, omMetadataManager, trxnLogIndex, updatedSnapInfos); // Step 2: Update the snapshot chain. - updateSnapshotChainAndCache(omMetadataManager, fromSnapshot, trxnLogIndex); - // Step 3: Purge the snapshot from SnapshotInfoTable cache and also remove from the map. + updateSnapshotChainAndCache(omMetadataManager, fromSnapshot, trxnLogIndex, + updatedPathPreviousAndGlobalSnapshots); + // Remove and close snapshot's RocksDB instance from SnapshotCache. + omSnapshotManager.invalidateCacheEntry(fromSnapshot.getSnapshotId()); + // Step 3: Purge the snapshot from SnapshotInfoTable cache. omMetadataManager.getSnapshotInfoTable() .addCacheEntry(new CacheKey<>(fromSnapshot.getTableKey()), CacheValue.get(trxnLogIndex)); - updatedSnapshotInfos.remove(fromSnapshot.getTableKey()); - } - // Update the snapshotInfo lastTransactionInfo. - for (SnapshotInfo snapshotInfo : updatedSnapshotInfos.values()) { - snapshotInfo.setLastTransactionInfo(TransactionInfo.valueOf(termIndex).toByteString()); - omMetadataManager.getSnapshotInfoTable().addCacheEntry(new CacheKey<>(snapshotInfo.getTableKey()), - CacheValue.get(termIndex.getIndex(), snapshotInfo)); } - omClientResponse = new OMSnapshotPurgeResponse(omResponse.build(), snapshotDbKeys, updatedSnapshotInfos); + omClientResponse = new OMSnapshotPurgeResponse(omResponse.build(), + snapshotDbKeys, updatedSnapInfos, + updatedPathPreviousAndGlobalSnapshots); omMetrics.incNumSnapshotPurges(); - LOG.info("Successfully executed snapshotPurgeRequest: {{}} along with updating snapshots:{}.", - snapshotPurgeRequest, updatedSnapshotInfos); + LOG.info("Successfully executed snapshotPurgeRequest: {{}} along with updating deep clean flags for " + + "snapshots: {} and global and previous for snapshots:{}.", + snapshotPurgeRequest, updatedSnapInfos.keySet(), updatedPathPreviousAndGlobalSnapshots.keySet()); } catch (IOException ex) { omClientResponse = new OMSnapshotPurgeResponse( createErrorOMResponse(omResponse, ex)); @@ -136,8 +133,9 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn return omClientResponse; } - private void updateSnapshotInfoAndCache(SnapshotInfo snapInfo, OmMetadataManagerImpl omMetadataManager, - long trxnLogIndex) throws IOException { + private void updateSnapshotInfoAndCache(SnapshotInfo snapInfo, + OmMetadataManagerImpl omMetadataManager, long trxnLogIndex, + Map updatedSnapInfos) throws IOException { if (snapInfo != null) { // Setting next snapshot deep clean to false, Since the // current snapshot is deleted. We can potentially @@ -147,7 +145,7 @@ private void updateSnapshotInfoAndCache(SnapshotInfo snapInfo, OmMetadataManager // Update table cache first omMetadataManager.getSnapshotInfoTable().addCacheEntry(new CacheKey<>(snapInfo.getTableKey()), CacheValue.get(trxnLogIndex, snapInfo)); - updatedSnapshotInfos.put(snapInfo.getTableKey(), snapInfo); + updatedSnapInfos.put(snapInfo.getTableKey(), snapInfo); } } @@ -160,7 +158,8 @@ private void updateSnapshotInfoAndCache(SnapshotInfo snapInfo, OmMetadataManager private void updateSnapshotChainAndCache( OmMetadataManagerImpl metadataManager, SnapshotInfo snapInfo, - long trxnLogIndex + long trxnLogIndex, + Map updatedPathPreviousAndGlobalSnapshots ) throws IOException { if (snapInfo == null) { return; @@ -199,36 +198,43 @@ private void updateSnapshotChainAndCache( } SnapshotInfo nextPathSnapInfo = - nextPathSnapshotKey != null ? getUpdatedSnapshotInfo(nextPathSnapshotKey, metadataManager) : null; + nextPathSnapshotKey != null ? metadataManager.getSnapshotInfoTable().get(nextPathSnapshotKey) : null; + + SnapshotInfo nextGlobalSnapInfo = + nextGlobalSnapshotKey != null ? metadataManager.getSnapshotInfoTable().get(nextGlobalSnapshotKey) : null; + // Updates next path snapshot's previous snapshot ID if (nextPathSnapInfo != null) { nextPathSnapInfo.setPathPreviousSnapshotId(snapInfo.getPathPreviousSnapshotId()); metadataManager.getSnapshotInfoTable().addCacheEntry( new CacheKey<>(nextPathSnapInfo.getTableKey()), CacheValue.get(trxnLogIndex, nextPathSnapInfo)); + updatedPathPreviousAndGlobalSnapshots + .put(nextPathSnapInfo.getTableKey(), nextPathSnapInfo); } - SnapshotInfo nextGlobalSnapInfo = - nextGlobalSnapshotKey != null ? getUpdatedSnapshotInfo(nextGlobalSnapshotKey, metadataManager) : null; - - if (nextGlobalSnapInfo != null) { - nextGlobalSnapInfo.setGlobalPreviousSnapshotId(snapInfo.getGlobalPreviousSnapshotId()); + // Updates next global snapshot's previous snapshot ID + // If both next global and path snapshot are same, it may overwrite + // nextPathSnapInfo.setPathPreviousSnapshotID(), adding this check + // will prevent it. + if (nextGlobalSnapInfo != null && nextPathSnapInfo != null && + nextGlobalSnapInfo.getSnapshotId().equals(nextPathSnapInfo.getSnapshotId())) { + nextPathSnapInfo.setGlobalPreviousSnapshotId(snapInfo.getGlobalPreviousSnapshotId()); + metadataManager.getSnapshotInfoTable().addCacheEntry( + new CacheKey<>(nextPathSnapInfo.getTableKey()), + CacheValue.get(trxnLogIndex, nextPathSnapInfo)); + updatedPathPreviousAndGlobalSnapshots + .put(nextPathSnapInfo.getTableKey(), nextPathSnapInfo); + } else if (nextGlobalSnapInfo != null) { + nextGlobalSnapInfo.setGlobalPreviousSnapshotId( + snapInfo.getGlobalPreviousSnapshotId()); metadataManager.getSnapshotInfoTable().addCacheEntry( new CacheKey<>(nextGlobalSnapInfo.getTableKey()), CacheValue.get(trxnLogIndex, nextGlobalSnapInfo)); + updatedPathPreviousAndGlobalSnapshots + .put(nextGlobalSnapInfo.getTableKey(), nextGlobalSnapInfo); } snapshotChainManager.deleteSnapshot(snapInfo); } - - private SnapshotInfo getUpdatedSnapshotInfo(String snapshotTableKey, OMMetadataManager omMetadataManager) - throws IOException { - SnapshotInfo snapshotInfo = updatedSnapshotInfos.get(snapshotTableKey); - - if (snapshotInfo == null) { - snapshotInfo = omMetadataManager.getSnapshotInfoTable().get(snapshotTableKey); - updatedSnapshotInfos.put(snapshotTableKey, snapshotInfo); - } - return snapshotInfo; - } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeRequest.java index df74edfb1c8..904b082e2d4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeRequest.java @@ -129,6 +129,7 @@ protected static PersistedUserVolumeInfo addVolumeToOwnerList( * @param dbVolumeKey * @param dbUserKey * @param transactionLogIndex + * @throws IOException */ protected static void createVolume( final OMMetadataManager omMetadataManager, OmVolumeArgs omVolumeArgs, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMDirectoriesPurgeResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMDirectoriesPurgeResponseWithFSO.java index 782063d3244..edb13f8cf98 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMDirectoriesPurgeResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMDirectoriesPurgeResponseWithFSO.java @@ -48,13 +48,12 @@ import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE; -import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.SNAPSHOT_INFO_TABLE; /** * Response for {@link OMDirectoriesPurgeRequestWithFSO} request. */ @CleanupTableInfo(cleanupTables = {DELETED_TABLE, DELETED_DIR_TABLE, - DIRECTORY_TABLE, FILE_TABLE, SNAPSHOT_INFO_TABLE}) + DIRECTORY_TABLE, FILE_TABLE}) public class OMDirectoriesPurgeResponseWithFSO extends OmKeyResponse { private static final Logger LOG = LoggerFactory.getLogger(OMDirectoriesPurgeResponseWithFSO.class); @@ -78,10 +77,6 @@ public OMDirectoriesPurgeResponseWithFSO(@Nonnull OMResponse omResponse, this.openKeyInfoMap = openKeyInfoMap; } - public OMDirectoriesPurgeResponseWithFSO(OMResponse omResponse) { - super(omResponse); - } - @Override public void addToDBBatch(OMMetadataManager metadataManager, BatchOperation batchOp) throws IOException { @@ -91,7 +86,10 @@ public void addToDBBatch(OMMetadataManager metadataManager, .getOzoneManager().getOmSnapshotManager(); try (ReferenceCounted - rcFromSnapshotInfo = omSnapshotManager.getSnapshot(fromSnapshotInfo.getSnapshotId())) { + rcFromSnapshotInfo = omSnapshotManager.getSnapshot( + fromSnapshotInfo.getVolumeName(), + fromSnapshotInfo.getBucketName(), + fromSnapshotInfo.getName())) { OmSnapshot fromSnapshot = rcFromSnapshotInfo.get(); DBStore fromSnapshotStore = fromSnapshot.getMetadataManager() .getStore(); @@ -102,7 +100,6 @@ public void addToDBBatch(OMMetadataManager metadataManager, fromSnapshotStore.commitBatchOperation(writeBatch); } } - metadataManager.getSnapshotInfoTable().putWithBatch(batchOp, fromSnapshotInfo.getTableKey(), fromSnapshotInfo); } else { processPaths(metadataManager, batchOp); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java index cd2f7d190f4..b16ba95d78f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java @@ -39,13 +39,12 @@ import jakarta.annotation.Nonnull; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; -import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.SNAPSHOT_INFO_TABLE; import static org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotMoveDeletedKeysResponse.createRepeatedOmKeyInfo; /** * Response for {@link OMKeyPurgeRequest} request. */ -@CleanupTableInfo(cleanupTables = {DELETED_TABLE, SNAPSHOT_INFO_TABLE}) +@CleanupTableInfo(cleanupTables = {DELETED_TABLE}) public class OMKeyPurgeResponse extends OmKeyResponse { private List purgeKeyList; private SnapshotInfo fromSnapshot; @@ -76,13 +75,18 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, if (fromSnapshot != null) { OmSnapshotManager omSnapshotManager = - ((OmMetadataManagerImpl) omMetadataManager).getOzoneManager().getOmSnapshotManager(); + ((OmMetadataManagerImpl) omMetadataManager) + .getOzoneManager().getOmSnapshotManager(); try (ReferenceCounted rcOmFromSnapshot = - omSnapshotManager.getSnapshot(fromSnapshot.getSnapshotId())) { + omSnapshotManager.getSnapshot( + fromSnapshot.getVolumeName(), + fromSnapshot.getBucketName(), + fromSnapshot.getName())) { OmSnapshot fromOmSnapshot = rcOmFromSnapshot.get(); - DBStore fromSnapshotStore = fromOmSnapshot.getMetadataManager().getStore(); + DBStore fromSnapshotStore = + fromOmSnapshot.getMetadataManager().getStore(); // Init Batch Operation for snapshot db. try (BatchOperation writeBatch = fromSnapshotStore.initBatchOperation()) { @@ -91,7 +95,6 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, fromSnapshotStore.commitBatchOperation(writeBatch); } } - omMetadataManager.getSnapshotInfoTable().putWithBatch(batchOperation, fromSnapshot.getTableKey(), fromSnapshot); } else { processKeys(batchOperation, omMetadataManager); processKeysToUpdate(batchOperation, omMetadataManager); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotMoveDeletedKeysResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotMoveDeletedKeysResponse.java index 7d1b7f237b2..3726faacfd7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotMoveDeletedKeysResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotMoveDeletedKeysResponse.java @@ -40,7 +40,6 @@ import java.util.List; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.SNAPSHOT_INFO_TABLE; -import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.createMergedRepeatedOmKeyInfoFromDeletedTableEntry; /** * Response for OMSnapshotMoveDeletedKeysRequest. @@ -92,13 +91,19 @@ protected void addToDBBatch(OMMetadataManager omMetadataManager, .getOzoneManager().getOmSnapshotManager(); try (ReferenceCounted rcOmFromSnapshot = - omSnapshotManager.getSnapshot(fromSnapshot.getSnapshotId())) { + omSnapshotManager.getSnapshot( + fromSnapshot.getVolumeName(), + fromSnapshot.getBucketName(), + fromSnapshot.getName())) { OmSnapshot fromOmSnapshot = rcOmFromSnapshot.get(); if (nextSnapshot != null) { try (ReferenceCounted - rcOmNextSnapshot = omSnapshotManager.getSnapshot(nextSnapshot.getSnapshotId())) { + rcOmNextSnapshot = omSnapshotManager.getSnapshot( + nextSnapshot.getVolumeName(), + nextSnapshot.getBucketName(), + nextSnapshot.getName())) { OmSnapshot nextOmSnapshot = rcOmNextSnapshot.get(); RDBStore nextSnapshotStore = @@ -134,11 +139,6 @@ protected void addToDBBatch(OMMetadataManager omMetadataManager, } } - // Flush snapshot info to rocksDB. - omMetadataManager.getSnapshotInfoTable().putWithBatch(batchOperation, fromSnapshot.getTableKey(), fromSnapshot); - if (nextSnapshot != null) { - omMetadataManager.getSnapshotInfoTable().putWithBatch(batchOperation, nextSnapshot.getTableKey(), nextSnapshot); - } } private void deleteDirsFromSnapshot(BatchOperation batchOp, @@ -200,7 +200,8 @@ private void processKeys(BatchOperation batchOp, } for (SnapshotMoveKeyInfos dBKey : nextDBKeysList) { - RepeatedOmKeyInfo omKeyInfos = createMergedRepeatedOmKeyInfoFromDeletedTableEntry(dBKey, metadataManager); + RepeatedOmKeyInfo omKeyInfos = + createRepeatedOmKeyInfo(dBKey, metadataManager); if (omKeyInfos == null) { continue; } @@ -223,5 +224,36 @@ public static RepeatedOmKeyInfo createRepeatedOmKeyInfo( return result; } + + private RepeatedOmKeyInfo createRepeatedOmKeyInfo( + SnapshotMoveKeyInfos snapshotMoveKeyInfos, + OMMetadataManager metadataManager) throws IOException { + String dbKey = snapshotMoveKeyInfos.getKey(); + List keyInfoList = snapshotMoveKeyInfos.getKeyInfosList(); + // When older version of keys are moved to the next snapshot's deletedTable + // The newer version might also be in the next snapshot's deletedTable and + // it might overwrite. This is to avoid that and also avoid having + // orphans blocks. + RepeatedOmKeyInfo result = metadataManager.getDeletedTable().get(dbKey); + + for (KeyInfo keyInfo : keyInfoList) { + OmKeyInfo omKeyInfo = OmKeyInfo.getFromProtobuf(keyInfo); + if (result == null) { + result = new RepeatedOmKeyInfo(omKeyInfo); + } else if (!isSameAsLatestOmKeyInfo(omKeyInfo, result)) { + result.addOmKeyInfo(omKeyInfo); + } + } + + return result; + } + + private boolean isSameAsLatestOmKeyInfo(OmKeyInfo omKeyInfo, + RepeatedOmKeyInfo result) { + int size = result.getOmKeyInfoList().size(); + assert size > 0; + OmKeyInfo keyInfoFromRepeated = result.getOmKeyInfoList().get(size - 1); + return omKeyInfo.equals(keyInfoFromRepeated); + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotMoveTableKeysResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotMoveTableKeysResponse.java deleted file mode 100644 index b06570afb14..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotMoveTableKeysResponse.java +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package org.apache.hadoop.ozone.om.response.snapshot; - -import jakarta.annotation.Nonnull; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.utils.db.BatchOperation; -import org.apache.hadoop.hdds.utils.db.RDBStore; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.OmSnapshot; -import org.apache.hadoop.ozone.om.OmSnapshotManager; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; -import org.apache.hadoop.ozone.om.response.CleanupTableInfo; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotMoveKeyInfos; - -import java.io.IOException; -import java.util.List; - -import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.SNAPSHOT_INFO_TABLE; -import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.createMergedRepeatedOmKeyInfoFromDeletedTableEntry; - -/** - * Response for OMSnapshotMoveDeletedKeysRequest. - */ -@CleanupTableInfo(cleanupTables = {SNAPSHOT_INFO_TABLE}) -public class OMSnapshotMoveTableKeysResponse extends OMClientResponse { - - private SnapshotInfo fromSnapshot; - private SnapshotInfo nextSnapshot; - private List deletedKeys; - private List renameKeysList; - private List deletedDirs; - - public OMSnapshotMoveTableKeysResponse(OMResponse omResponse, - @Nonnull SnapshotInfo fromSnapshot, SnapshotInfo nextSnapshot, - List deletedKeys, - List deletedDirs, - List renamedKeys) { - super(omResponse); - this.fromSnapshot = fromSnapshot; - this.nextSnapshot = nextSnapshot; - this.deletedKeys = deletedKeys; - this.renameKeysList = renamedKeys; - this.deletedDirs = deletedDirs; - } - - /** - * For when the request is not successful. - * For a successful request, the other constructor should be used. - */ - public OMSnapshotMoveTableKeysResponse(@Nonnull OMResponse omResponse) { - super(omResponse); - checkStatusNotOK(); - } - - @Override - protected void addToDBBatch(OMMetadataManager omMetadataManager, BatchOperation batchOperation) throws IOException { - OmSnapshotManager omSnapshotManager = ((OmMetadataManagerImpl) omMetadataManager) - .getOzoneManager().getOmSnapshotManager(); - - try (ReferenceCounted rcOmFromSnapshot = - omSnapshotManager.getSnapshot(fromSnapshot.getSnapshotId())) { - - OmSnapshot fromOmSnapshot = rcOmFromSnapshot.get(); - - if (nextSnapshot != null) { - try (ReferenceCounted - rcOmNextSnapshot = omSnapshotManager.getSnapshot(nextSnapshot.getSnapshotId())) { - - OmSnapshot nextOmSnapshot = rcOmNextSnapshot.get(); - RDBStore nextSnapshotStore = (RDBStore) nextOmSnapshot.getMetadataManager().getStore(); - // Init Batch Operation for snapshot db. - try (BatchOperation writeBatch = nextSnapshotStore.initBatchOperation()) { - addKeysToNextSnapshot(writeBatch, nextOmSnapshot.getMetadataManager()); - nextSnapshotStore.commitBatchOperation(writeBatch); - nextSnapshotStore.getDb().flushWal(true); - nextSnapshotStore.getDb().flush(); - } - } - } else { - // Handle the case where there is no next Snapshot. - addKeysToNextSnapshot(batchOperation, omMetadataManager); - } - - // Update From Snapshot Deleted Table. - RDBStore fromSnapshotStore = (RDBStore) fromOmSnapshot.getMetadataManager().getStore(); - try (BatchOperation fromSnapshotBatchOp = fromSnapshotStore.initBatchOperation()) { - deleteKeysFromSnapshot(fromSnapshotBatchOp, fromOmSnapshot.getMetadataManager()); - fromSnapshotStore.commitBatchOperation(fromSnapshotBatchOp); - fromSnapshotStore.getDb().flushWal(true); - fromSnapshotStore.getDb().flush(); - } - } - - // Flush snapshot info to rocksDB. - omMetadataManager.getSnapshotInfoTable().putWithBatch(batchOperation, fromSnapshot.getTableKey(), fromSnapshot); - if (nextSnapshot != null) { - omMetadataManager.getSnapshotInfoTable().putWithBatch(batchOperation, nextSnapshot.getTableKey(), nextSnapshot); - } - } - - private void deleteKeysFromSnapshot(BatchOperation batchOp, OMMetadataManager fromSnapshotMetadataManager) - throws IOException { - for (SnapshotMoveKeyInfos deletedOmKeyInfo : deletedKeys) { - // Delete keys from current snapshot that are moved to next snapshot. - fromSnapshotMetadataManager.getDeletedTable().deleteWithBatch(batchOp, deletedOmKeyInfo.getKey()); - } - - // Delete rename keys from current snapshot that are moved to next snapshot. - for (HddsProtos.KeyValue renameEntry : renameKeysList) { - fromSnapshotMetadataManager.getSnapshotRenamedTable().deleteWithBatch(batchOp, renameEntry.getKey()); - } - - // Delete deletedDir from current snapshot that are moved to next snapshot. - for (SnapshotMoveKeyInfos deletedDirInfo : deletedDirs) { - fromSnapshotMetadataManager.getDeletedDirTable().deleteWithBatch(batchOp, deletedDirInfo.getKey()); - } - - } - - private void addKeysToNextSnapshot(BatchOperation batchOp, OMMetadataManager metadataManager) throws IOException { - - // Add renamed keys to the next snapshot or active DB. - for (HddsProtos.KeyValue renameEntry : renameKeysList) { - metadataManager.getSnapshotRenamedTable().putWithBatch(batchOp, renameEntry.getKey(), renameEntry.getValue()); - } - // Add deleted keys to the next snapshot or active DB. - for (SnapshotMoveKeyInfos deletedKeyInfo : deletedKeys) { - RepeatedOmKeyInfo omKeyInfos = createMergedRepeatedOmKeyInfoFromDeletedTableEntry(deletedKeyInfo, - metadataManager); - metadataManager.getDeletedTable().putWithBatch(batchOp, deletedKeyInfo.getKey(), omKeyInfos); - } - // Add deleted dir keys to the next snapshot or active DB. - for (SnapshotMoveKeyInfos deletedDirInfo : deletedDirs) { - metadataManager.getDeletedDirTable().putWithBatch(batchOp, deletedDirInfo.getKey(), - OmKeyInfo.getFromProtobuf(deletedDirInfo.getKeyInfosList().get(0))); - } - } -} - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java index 81a020653f7..ea9e68cc9ad 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java @@ -49,15 +49,18 @@ public class OMSnapshotPurgeResponse extends OMClientResponse { LoggerFactory.getLogger(OMSnapshotPurgeResponse.class); private final List snapshotDbKeys; private final Map updatedSnapInfos; + private final Map updatedPreviousAndGlobalSnapInfos; public OMSnapshotPurgeResponse( @Nonnull OMResponse omResponse, @Nonnull List snapshotDbKeys, - Map updatedSnapInfos + Map updatedSnapInfos, + Map updatedPreviousAndGlobalSnapInfos ) { super(omResponse); this.snapshotDbKeys = snapshotDbKeys; this.updatedSnapInfos = updatedSnapInfos; + this.updatedPreviousAndGlobalSnapInfos = updatedPreviousAndGlobalSnapInfos; } /** @@ -69,6 +72,7 @@ public OMSnapshotPurgeResponse(@Nonnull OMResponse omResponse) { checkStatusNotOK(); this.snapshotDbKeys = null; this.updatedSnapInfos = null; + this.updatedPreviousAndGlobalSnapInfos = null; } @Override @@ -78,6 +82,8 @@ protected void addToDBBatch(OMMetadataManager omMetadataManager, OmMetadataManagerImpl metadataManager = (OmMetadataManagerImpl) omMetadataManager; updateSnapInfo(metadataManager, batchOperation, updatedSnapInfos); + updateSnapInfo(metadataManager, batchOperation, + updatedPreviousAndGlobalSnapInfos); for (String dbKey: snapshotDbKeys) { // Skip the cache here because snapshot is purged from cache in OMSnapshotPurgeRequest. SnapshotInfo snapshotInfo = omMetadataManager @@ -90,15 +96,8 @@ protected void addToDBBatch(OMMetadataManager omMetadataManager, continue; } - // Remove and close snapshot's RocksDB instance from SnapshotCache. - ((OmMetadataManagerImpl) omMetadataManager).getOzoneManager().getOmSnapshotManager() - .invalidateCacheEntry(snapshotInfo.getSnapshotId()); - // Remove the snapshot from snapshotId to snapshotTableKey map. - ((OmMetadataManagerImpl) omMetadataManager).getSnapshotChainManager() - .removeFromSnapshotIdToTable(snapshotInfo.getSnapshotId()); // Delete Snapshot checkpoint directory. deleteCheckpointDirectory(omMetadataManager, snapshotInfo); - // Delete snapshotInfo from the table. omMetadataManager.getSnapshotInfoTable().deleteWithBatch(batchOperation, dbKey); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java index d5da77ca0aa..429e286287c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java @@ -19,7 +19,6 @@ import com.google.common.annotations.VisibleForTesting; import com.google.protobuf.ServiceException; import org.apache.commons.lang3.tuple.Pair; -import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.apache.hadoop.hdds.utils.BackgroundService; @@ -32,12 +31,14 @@ import org.apache.hadoop.ozone.common.DeleteBlockGroupResult; import org.apache.hadoop.ozone.om.KeyManager; import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.SnapshotChainManager; +import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeletedKeys; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; @@ -47,6 +48,8 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; import org.apache.hadoop.util.Time; import org.apache.ratis.protocol.ClientId; +import org.apache.ratis.protocol.Message; +import org.apache.ratis.protocol.RaftClientRequest; import org.apache.ratis.util.Preconditions; import java.io.IOException; @@ -72,7 +75,7 @@ public abstract class AbstractKeyDeletingService extends BackgroundService private final OzoneManager ozoneManager; private final ScmBlockLocationProtocol scmClient; - private final ClientId clientId = ClientId.randomId(); + private static ClientId clientId = ClientId.randomId(); private final AtomicLong deletedDirsCount; private final AtomicLong movedDirsCount; private final AtomicLong movedFilesCount; @@ -96,7 +99,7 @@ public AbstractKeyDeletingService(String serviceName, long interval, protected int processKeyDeletes(List keyBlocksList, KeyManager manager, HashMap keysToModify, - String snapTableKey, UUID expectedPreviousSnapshotId) throws IOException { + String snapTableKey) throws IOException { long startTime = Time.monotonicNow(); int delCount = 0; @@ -119,7 +122,7 @@ protected int processKeyDeletes(List keyBlocksList, startTime = Time.monotonicNow(); if (isRatisEnabled()) { delCount = submitPurgeKeysRequest(blockDeletionResults, - keysToModify, snapTableKey, expectedPreviousSnapshotId); + keysToModify, snapTableKey); } else { // TODO: Once HA and non-HA paths are merged, we should have // only one code path here. Purge keys should go through an @@ -171,7 +174,7 @@ private int deleteAllKeys(List results, * @param keysToModify Updated list of RepeatedOmKeyInfo */ private int submitPurgeKeysRequest(List results, - HashMap keysToModify, String snapTableKey, UUID expectedPreviousSnapshotId) { + HashMap keysToModify, String snapTableKey) { Map, List> purgeKeysMapPerBucket = new HashMap<>(); @@ -202,12 +205,6 @@ private int submitPurgeKeysRequest(List results, if (snapTableKey != null) { purgeKeysRequest.setSnapshotTableKey(snapTableKey); } - OzoneManagerProtocolProtos.NullableUUID.Builder expectedPreviousSnapshotNullableUUID = - OzoneManagerProtocolProtos.NullableUUID.newBuilder(); - if (expectedPreviousSnapshotId != null) { - expectedPreviousSnapshotNullableUUID.setUuid(HddsUtils.toProtobuf(expectedPreviousSnapshotId)); - } - purgeKeysRequest.setExpectedPreviousSnapshotID(expectedPreviousSnapshotNullableUUID.build()); // Add keys to PurgeKeysRequest bucket wise. for (Map.Entry, List> entry : @@ -250,7 +247,10 @@ private int submitPurgeKeysRequest(List results, // Submit PurgeKeys request to OM try { - OzoneManagerRatisUtils.submitRequest(ozoneManager, omRequest, clientId, runCount.get()); + RaftClientRequest raftClientRequest = + createRaftClientRequestForPurge(omRequest); + ozoneManager.getOmRatisServer().submitRequest(omRequest, + raftClientRequest); } catch (ServiceException e) { LOG.error("PurgeKey request failed. Will retry at next run."); return 0; @@ -259,6 +259,20 @@ private int submitPurgeKeysRequest(List results, return deletedCount; } + protected RaftClientRequest createRaftClientRequestForPurge( + OMRequest omRequest) { + return RaftClientRequest.newBuilder() + .setClientId(clientId) + .setServerId(ozoneManager.getOmRatisServer().getRaftPeerId()) + .setGroupId(ozoneManager.getOmRatisServer().getRaftGroupId()) + .setCallId(runCount.get()) + .setMessage( + Message.valueOf( + OMRatisHelper.convertRequestToByteString(omRequest))) + .setType(RaftClientRequest.writeRequestType()) + .build(); + } + /** * Parse Volume and Bucket Name from ObjectKey and add it to given map of * keys to be purged per bucket. @@ -279,21 +293,13 @@ private void addToMap(Map, List> map, String object } protected void submitPurgePaths(List requests, - String snapTableKey, - UUID expectedPreviousSnapshotId) { + String snapTableKey) { OzoneManagerProtocolProtos.PurgeDirectoriesRequest.Builder purgeDirRequest = OzoneManagerProtocolProtos.PurgeDirectoriesRequest.newBuilder(); if (snapTableKey != null) { purgeDirRequest.setSnapshotTableKey(snapTableKey); } - OzoneManagerProtocolProtos.NullableUUID.Builder expectedPreviousSnapshotNullableUUID = - OzoneManagerProtocolProtos.NullableUUID.newBuilder(); - if (expectedPreviousSnapshotId != null) { - expectedPreviousSnapshotNullableUUID.setUuid(HddsUtils.toProtobuf(expectedPreviousSnapshotId)); - } - purgeDirRequest.setExpectedPreviousSnapshotID(expectedPreviousSnapshotNullableUUID.build()); - purgeDirRequest.addAllDeletedPath(requests); OzoneManagerProtocolProtos.OMRequest omRequest = @@ -305,7 +311,15 @@ protected void submitPurgePaths(List requests, // Submit Purge paths request to OM try { - OzoneManagerRatisUtils.submitRequest(ozoneManager, omRequest, clientId, runCount.get()); + if (isRatisEnabled()) { + RaftClientRequest raftClientRequest = + createRaftClientRequestForPurge(omRequest); + ozoneManager.getOmRatisServer().submitRequest(omRequest, + raftClientRequest); + } else { + getOzoneManager().getOmServerProtocol() + .submitRequest(null, omRequest); + } } catch (ServiceException e) { LOG.error("PurgePaths request failed. Will retry at next run."); } @@ -399,8 +413,7 @@ public long optimizeDirDeletesAndSubmitRequest(long remainNum, List> allSubDirList, List purgePathRequestList, String snapTableKey, long startTime, - int remainingBufLimit, KeyManager keyManager, - UUID expectedPreviousSnapshotId) { + int remainingBufLimit, KeyManager keyManager) { // Optimization to handle delete sub-dir and keys to remove quickly // This case will be useful to handle when depth of directory is high @@ -440,7 +453,7 @@ public long optimizeDirDeletesAndSubmitRequest(long remainNum, } if (!purgePathRequestList.isEmpty()) { - submitPurgePaths(purgePathRequestList, snapTableKey, expectedPreviousSnapshotId); + submitPurgePaths(purgePathRequestList, snapTableKey); } if (dirNum != 0 || subDirNum != 0 || subFileNum != 0) { @@ -563,6 +576,26 @@ protected boolean isBufferLimitCrossed( return cLimit + increment >= maxLimit; } + protected SnapshotInfo getPreviousActiveSnapshot(SnapshotInfo snapInfo, + SnapshotChainManager chainManager, OmSnapshotManager omSnapshotManager) + throws IOException { + SnapshotInfo currSnapInfo = snapInfo; + while (chainManager.hasPreviousPathSnapshot( + currSnapInfo.getSnapshotPath(), currSnapInfo.getSnapshotId())) { + + UUID prevPathSnapshot = chainManager.previousPathSnapshot( + currSnapInfo.getSnapshotPath(), currSnapInfo.getSnapshotId()); + String tableKey = chainManager.getTableKey(prevPathSnapshot); + SnapshotInfo prevSnapInfo = omSnapshotManager.getSnapshotInfo(tableKey); + if (prevSnapInfo.getSnapshotStatus() == + SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE) { + return prevSnapInfo; + } + currSnapInfo = prevSnapInfo; + } + return null; + } + protected boolean isKeyReclaimable( Table previousKeyTable, Table renamedTable, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java index b3000515998..c8703c3c4c6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java @@ -33,20 +33,16 @@ import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PurgePathRequest; import org.apache.hadoop.util.Time; +import org.apache.ratis.protocol.ClientId; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.ArrayList; import java.util.List; -import java.util.Objects; -import java.util.Optional; -import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -74,6 +70,8 @@ public class DirectoryDeletingService extends AbstractKeyDeletingService { public static final Logger LOG = LoggerFactory.getLogger(DirectoryDeletingService.class); + private static ClientId clientId = ClientId.randomId(); + // Use only a single thread for DirDeletion. Multiple threads would read // or write to same tables and can send deletion requests for same key // multiple times. @@ -84,7 +82,6 @@ public class DirectoryDeletingService extends AbstractKeyDeletingService { private final long pathLimitPerTask; private final int ratisByteLimit; private final AtomicBoolean suspended; - private AtomicBoolean isRunningOnAOS; public DirectoryDeletingService(long interval, TimeUnit unit, long serviceTimeout, OzoneManager ozoneManager, @@ -101,7 +98,6 @@ public DirectoryDeletingService(long interval, TimeUnit unit, // always go to 90% of max limit for request as other header will be added this.ratisByteLimit = (int) (limit * 0.9); this.suspended = new AtomicBoolean(false); - this.isRunningOnAOS = new AtomicBoolean(false); } private boolean shouldRun() { @@ -112,10 +108,6 @@ private boolean shouldRun() { return getOzoneManager().isLeaderReady() && !suspended.get(); } - public boolean isRunningOnAOS() { - return isRunningOnAOS.get(); - } - /** * Suspend the service. */ @@ -135,16 +127,11 @@ public void resume() { @Override public BackgroundTaskQueue getTasks() { BackgroundTaskQueue queue = new BackgroundTaskQueue(); - queue.add(new DirectoryDeletingService.DirDeletingTask(this)); + queue.add(new DirectoryDeletingService.DirDeletingTask()); return queue; } - private final class DirDeletingTask implements BackgroundTask { - private final DirectoryDeletingService directoryDeletingService; - - private DirDeletingTask(DirectoryDeletingService service) { - this.directoryDeletingService = service; - } + private class DirDeletingTask implements BackgroundTask { @Override public int getPriority() { @@ -157,7 +144,6 @@ public BackgroundTaskResult call() { if (LOG.isDebugEnabled()) { LOG.debug("Running DirectoryDeletingService"); } - isRunningOnAOS.set(true); getRunCount().incrementAndGet(); long dirNum = 0L; long subDirNum = 0L; @@ -169,15 +155,9 @@ public BackgroundTaskResult call() { = new ArrayList<>((int) remainNum); Table.KeyValue pendingDeletedDirInfo; - try (TableIterator> deleteTableIterator = getOzoneManager().getMetadataManager(). getDeletedDirTable().iterator()) { - // This is to avoid race condition b/w purge request and snapshot chain updation. For AOS taking the global - // snapshotId since AOS could process multiple buckets in one iteration. - UUID expectedPreviousSnapshotId = - ((OmMetadataManagerImpl)getOzoneManager().getMetadataManager()).getSnapshotChainManager() - .getLatestGlobalSnapshotId(); long startTime = Time.monotonicNow(); while (remainNum > 0 && deleteTableIterator.hasNext()) { @@ -224,17 +204,14 @@ public BackgroundTaskResult call() { remainNum, dirNum, subDirNum, subFileNum, allSubDirList, purgePathRequestList, null, startTime, ratisByteLimit - consumedSize, - getOzoneManager().getKeyManager(), expectedPreviousSnapshotId); + getOzoneManager().getKeyManager()); } catch (IOException e) { LOG.error("Error while running delete directories and files " + "background task. Will retry at next run.", e); } - isRunningOnAOS.set(false); - synchronized (directoryDeletingService) { - this.directoryDeletingService.notify(); - } } + // place holder by returning empty results of this call back. return BackgroundTaskResult.EmptyTaskResult.newResult(); } @@ -247,23 +224,12 @@ private boolean previousSnapshotHasDir( getOzoneManager().getOmSnapshotManager(); OmMetadataManagerImpl metadataManager = (OmMetadataManagerImpl) getOzoneManager().getMetadataManager(); - SnapshotInfo previousSnapshotInfo = SnapshotUtils.getLatestSnapshotInfo(deletedDirInfo.getVolumeName(), - deletedDirInfo.getBucketName(), getOzoneManager(), metadataManager.getSnapshotChainManager()); - if (previousSnapshotInfo == null) { - return false; - } - // previous snapshot is not active or it has not been flushed to disk then don't process the key in this - // iteration. - if (previousSnapshotInfo.getSnapshotStatus() != SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE || - !OmSnapshotManager.areSnapshotChangesFlushedToDB(getOzoneManager().getMetadataManager(), - previousSnapshotInfo)) { - return true; - } + try (ReferenceCounted rcLatestSnapshot = - omSnapshotManager.getSnapshot( + metadataManager.getLatestActiveSnapshot( deletedDirInfo.getVolumeName(), deletedDirInfo.getBucketName(), - previousSnapshotInfo.getName())) { + omSnapshotManager)) { if (rcLatestSnapshot != null) { String dbRenameKey = metadataManager @@ -284,14 +250,8 @@ private boolean previousSnapshotHasDir( String prevDbKey = prevDirTableDBKey == null ? metadataManager.getOzoneDeletePathDirKey(key) : prevDirTableDBKey; OmDirectoryInfo prevDirInfo = prevDirTable.get(prevDbKey); - //Checking if the previous snapshot in the chain hasn't changed while checking if the deleted directory is - // present in the previous snapshot. If the chain has changed, the deleted directory could have been moved - // to the newly created snapshot. - SnapshotInfo newPreviousSnapshotInfo = SnapshotUtils.getLatestSnapshotInfo(deletedDirInfo.getVolumeName(), - deletedDirInfo.getBucketName(), getOzoneManager(), metadataManager.getSnapshotChainManager()); - return (!Objects.equals(Optional.ofNullable(newPreviousSnapshotInfo).map(SnapshotInfo::getSnapshotId), - Optional.ofNullable(previousSnapshotInfo).map(SnapshotInfo::getSnapshotId))) || (prevDirInfo != null && - prevDirInfo.getObjectID() == deletedDirInfo.getObjectID()); + return prevDirInfo != null && + prevDirInfo.getObjectID() == deletedDirInfo.getObjectID(); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java index 9a4f74eba59..c4285482872 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java @@ -23,9 +23,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.Optional; import java.util.Set; -import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; @@ -44,9 +42,9 @@ import org.apache.hadoop.ozone.om.OmSnapshot; import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; +import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; +import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotSize; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetSnapshotPropertyRequest; @@ -69,6 +67,8 @@ import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.ratis.protocol.ClientId; +import org.apache.ratis.protocol.Message; +import org.apache.ratis.protocol.RaftClientRequest; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -95,15 +95,11 @@ public class KeyDeletingService extends AbstractKeyDeletingService { private final Map exclusiveReplicatedSizeMap; private final Set completedExclusiveSizeSet; private final Map snapshotSeekMap; - private AtomicBoolean isRunningOnAOS; - private final boolean deepCleanSnapshots; - private final SnapshotChainManager snapshotChainManager; public KeyDeletingService(OzoneManager ozoneManager, ScmBlockLocationProtocol scmClient, KeyManager manager, long serviceInterval, - long serviceTimeout, ConfigurationSource conf, - boolean deepCleanSnapshots) { + long serviceTimeout, ConfigurationSource conf) { super(KeyDeletingService.class.getSimpleName(), serviceInterval, TimeUnit.MILLISECONDS, KEY_DELETING_CORE_POOL_SIZE, serviceTimeout, ozoneManager, scmClient); @@ -118,9 +114,6 @@ public KeyDeletingService(OzoneManager ozoneManager, this.exclusiveReplicatedSizeMap = new HashMap<>(); this.completedExclusiveSizeSet = new HashSet<>(); this.snapshotSeekMap = new HashMap<>(); - this.isRunningOnAOS = new AtomicBoolean(false); - this.deepCleanSnapshots = deepCleanSnapshots; - this.snapshotChainManager = ((OmMetadataManagerImpl)manager.getMetadataManager()).getSnapshotChainManager(); } /** @@ -133,14 +126,10 @@ public AtomicLong getDeletedKeyCount() { return deletedKeyCount; } - public boolean isRunningOnAOS() { - return isRunningOnAOS.get(); - } - @Override public BackgroundTaskQueue getTasks() { BackgroundTaskQueue queue = new BackgroundTaskQueue(); - queue.add(new KeyDeletingTask(this)); + queue.add(new KeyDeletingTask()); return queue; } @@ -183,12 +172,7 @@ public void setKeyLimitPerTask(int keyLimitPerTask) { * the blocks info in its deletedBlockLog), it removes these keys from the * DB. */ - private final class KeyDeletingTask implements BackgroundTask { - private final KeyDeletingService deletingService; - - private KeyDeletingTask(KeyDeletingService service) { - this.deletingService = service; - } + private class KeyDeletingTask implements BackgroundTask { @Override public int getPriority() { @@ -202,7 +186,7 @@ public BackgroundTaskResult call() { if (shouldRun()) { final long run = getRunCount().incrementAndGet(); LOG.debug("Running KeyDeletingService {}", run); - isRunningOnAOS.set(true); + int delCount = 0; try { // TODO: [SNAPSHOT] HDDS-7968. Reclaim eligible key blocks in @@ -210,9 +194,7 @@ public BackgroundTaskResult call() { // doesn't have enough entries left. // OM would have to keep track of which snapshot the key is coming // from if the above would be done inside getPendingDeletionKeys(). - // This is to avoid race condition b/w purge request and snapshot chain update. For AOS taking the global - // snapshotId since AOS could process multiple buckets in one iteration. - UUID expectedPreviousSnapshotId = snapshotChainManager.getLatestGlobalSnapshotId(); + PendingKeysDeletion pendingKeysDeletion = manager .getPendingDeletionKeys(getKeyLimitPerTask()); List keyBlocksList = pendingKeysDeletion @@ -220,7 +202,7 @@ public BackgroundTaskResult call() { if (keyBlocksList != null && !keyBlocksList.isEmpty()) { delCount = processKeyDeletes(keyBlocksList, getOzoneManager().getKeyManager(), - pendingKeysDeletion.getKeysToModify(), null, expectedPreviousSnapshotId); + pendingKeysDeletion.getKeysToModify(), null); deletedKeyCount.addAndGet(delCount); } } catch (IOException e) { @@ -229,7 +211,7 @@ public BackgroundTaskResult call() { } try { - if (deepCleanSnapshots && delCount < keyLimitPerTask) { + if (delCount < keyLimitPerTask) { processSnapshotDeepClean(delCount); } } catch (Exception e) { @@ -238,11 +220,6 @@ public BackgroundTaskResult call() { } } - isRunningOnAOS.set(false); - synchronized (deletingService) { - this.deletingService.notify(); - } - // By design, no one cares about the results of this call back. return EmptyTaskResult.newResult(); } @@ -265,23 +242,15 @@ private void processSnapshotDeepClean(int delCount) while (delCount < keyLimitPerTask && iterator.hasNext()) { List keysToPurge = new ArrayList<>(); HashMap keysToModify = new HashMap<>(); - SnapshotInfo currSnapInfo = snapshotInfoTable.get(iterator.next().getKey()); + SnapshotInfo currSnapInfo = iterator.next().getValue(); + // Deep clean only on active snapshot. Deleted Snapshots will be // cleaned up by SnapshotDeletingService. - if (currSnapInfo == null || currSnapInfo.getSnapshotStatus() != SNAPSHOT_ACTIVE || + if (currSnapInfo.getSnapshotStatus() != SNAPSHOT_ACTIVE || currSnapInfo.getDeepClean()) { continue; } - SnapshotInfo prevSnapInfo = SnapshotUtils.getPreviousSnapshot(getOzoneManager(), snapChainManager, - currSnapInfo); - if (prevSnapInfo != null && - (prevSnapInfo.getSnapshotStatus() != SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE || - !OmSnapshotManager.areSnapshotChangesFlushedToDB(getOzoneManager().getMetadataManager(), - prevSnapInfo))) { - continue; - } - try (ReferenceCounted rcCurrOmSnapshot = omSnapshotManager.getSnapshot( currSnapInfo.getVolumeName(), @@ -310,13 +279,13 @@ private void processSnapshotDeepClean(int delCount) } String snapshotBucketKey = dbBucketKey + OzoneConsts.OM_KEY_PREFIX; - SnapshotInfo previousSnapshot = SnapshotUtils.getPreviousSnapshot(getOzoneManager(), snapChainManager, - currSnapInfo); + SnapshotInfo previousSnapshot = getPreviousActiveSnapshot( + currSnapInfo, snapChainManager, omSnapshotManager); SnapshotInfo previousToPrevSnapshot = null; if (previousSnapshot != null) { - previousToPrevSnapshot = SnapshotUtils.getPreviousSnapshot(getOzoneManager(), snapChainManager, - previousSnapshot); + previousToPrevSnapshot = getPreviousActiveSnapshot( + previousSnapshot, snapChainManager, omSnapshotManager); } Table previousKeyTable = null; @@ -445,8 +414,7 @@ private void processSnapshotDeepClean(int delCount) if (!keysToPurge.isEmpty()) { processKeyDeletes(keysToPurge, currOmSnapshot.getKeyManager(), - keysToModify, currSnapInfo.getTableKey(), - Optional.ofNullable(previousSnapshot).map(SnapshotInfo::getSnapshotId).orElse(null)); + keysToModify, currSnapInfo.getTableKey()); } } finally { IOUtils.closeQuietly(rcPrevOmSnapshot, rcPrevToPrevOmSnapshot); @@ -515,7 +483,24 @@ private void updateDeepCleanedSnapshots(List deepCleanedSnapshots) { public void submitRequest(OMRequest omRequest, ClientId clientId) { try { - OzoneManagerRatisUtils.submitRequest(getOzoneManager(), omRequest, clientId, getRunCount().get()); + if (isRatisEnabled()) { + OzoneManagerRatisServer server = getOzoneManager().getOmRatisServer(); + + RaftClientRequest raftClientRequest = RaftClientRequest.newBuilder() + .setClientId(clientId) + .setServerId(server.getRaftPeerId()) + .setGroupId(server.getRaftGroupId()) + .setCallId(getRunCount().get()) + .setMessage(Message.valueOf( + OMRatisHelper.convertRequestToByteString(omRequest))) + .setType(RaftClientRequest.writeRequestType()) + .build(); + + server.submitRequest(omRequest, raftClientRequest); + } else { + getOzoneManager().getOmServerProtocol() + .submitRequest(null, omRequest); + } } catch (ServiceException e) { LOG.error("Snapshot deep cleaning request failed. " + "Will retry at next run.", e); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/MultipartUploadCleanupService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/MultipartUploadCleanupService.java index f1084155e98..1199a0c6506 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/MultipartUploadCleanupService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/MultipartUploadCleanupService.java @@ -29,13 +29,16 @@ import org.apache.hadoop.ozone.om.KeyManager; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; +import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; +import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ExpiredMultipartUploadsBucket; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadsExpiredAbortRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; import org.apache.hadoop.util.Time; import org.apache.ratis.protocol.ClientId; +import org.apache.ratis.protocol.Message; +import org.apache.ratis.protocol.RaftClientRequest; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -205,7 +208,24 @@ private OMRequest createRequest(List private void submitRequest(OMRequest omRequest) { try { - OzoneManagerRatisUtils.submitRequest(ozoneManager, omRequest, clientId, runCount.get()); + if (isRatisEnabled()) { + OzoneManagerRatisServer server = ozoneManager.getOmRatisServer(); + + RaftClientRequest raftClientRequest = RaftClientRequest.newBuilder() + .setClientId(clientId) + .setServerId(server.getRaftPeerId()) + .setGroupId(server.getRaftGroupId()) + .setCallId(runCount.get()) + .setMessage(Message.valueOf( + OMRatisHelper.convertRequestToByteString(omRequest))) + .setType(RaftClientRequest.writeRequestType()) + .build(); + + server.submitRequest(omRequest, raftClientRequest); + } else { + ozoneManager.getOmServerProtocol().submitRequest(null, + omRequest); + } } catch (ServiceException e) { LOG.error("Expired multipart info delete request failed. " + "Will retry at next run.", e); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/OMRangerBGSyncService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/OMRangerBGSyncService.java index 768c77ad16e..45112037c1b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/OMRangerBGSyncService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/OMRangerBGSyncService.java @@ -47,6 +47,7 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; import org.apache.hadoop.ozone.om.exceptions.OMNotLeaderException; +import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; import org.apache.hadoop.ozone.om.helpers.OmDBAccessIdInfo; import org.apache.hadoop.ozone.om.helpers.OmDBTenantState; import org.apache.hadoop.ozone.om.multitenant.AuthorizerLock; @@ -54,11 +55,12 @@ import org.apache.hadoop.ozone.om.multitenant.MultiTenantAccessController; import org.apache.hadoop.ozone.om.multitenant.MultiTenantAccessController.Policy; import org.apache.hadoop.ozone.om.multitenant.MultiTenantAccessController.Role; -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetRangerServiceVersionRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; import org.apache.ratis.protocol.ClientId; +import org.apache.ratis.protocol.Message; +import org.apache.ratis.protocol.RaftClientRequest; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -373,6 +375,19 @@ long getRangerOzoneServicePolicyVersion() throws IOException { return policyVersion; } + private RaftClientRequest newRaftClientRequest(OMRequest omRequest) { + return RaftClientRequest.newBuilder() + .setClientId(CLIENT_ID) + .setServerId(ozoneManager.getOmRatisServer().getRaftPeerId()) + .setGroupId(ozoneManager.getOmRatisServer().getRaftGroupId()) + .setCallId(runCount.get()) + .setMessage( + Message.valueOf( + OMRatisHelper.convertRequestToByteString(omRequest))) + .setType(RaftClientRequest.writeRequestType()) + .build(); + } + public void setOMDBRangerServiceVersion(long version) throws ServiceException { // OM DB update goes through Ratis @@ -387,7 +402,9 @@ public void setOMDBRangerServiceVersion(long version) .build(); try { - OzoneManagerRatisUtils.submitRequest(ozoneManager, omRequest, CLIENT_ID, runCount.get()); + RaftClientRequest raftClientRequest = newRaftClientRequest(omRequest); + ozoneManager.getOmRatisServer().submitRequest(omRequest, + raftClientRequest); } catch (ServiceException e) { LOG.error("SetRangerServiceVersion request failed. " + "Will retry at next run."); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/OpenKeyCleanupService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/OpenKeyCleanupService.java index c0d958f6121..ab556230194 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/OpenKeyCleanupService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/OpenKeyCleanupService.java @@ -31,7 +31,8 @@ import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; +import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; +import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CommitKeyRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteOpenKeysRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; @@ -40,6 +41,8 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; import org.apache.hadoop.util.Time; import org.apache.ratis.protocol.ClientId; +import org.apache.ratis.protocol.Message; +import org.apache.ratis.protocol.RaftClientRequest; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -265,7 +268,24 @@ private OMRequest createDeleteOpenKeysRequest( private OMResponse submitRequest(OMRequest omRequest) { try { - return OzoneManagerRatisUtils.submitRequest(ozoneManager, omRequest, clientId, runCount.get()); + if (isRatisEnabled()) { + OzoneManagerRatisServer server = ozoneManager.getOmRatisServer(); + + RaftClientRequest raftClientRequest = RaftClientRequest.newBuilder() + .setClientId(clientId) + .setServerId(server.getRaftPeerId()) + .setGroupId(server.getRaftGroupId()) + .setCallId(runCount.get()) + .setMessage(Message.valueOf( + OMRatisHelper.convertRequestToByteString(omRequest))) + .setType(RaftClientRequest.writeRequestType()) + .build(); + + return server.submitRequest(omRequest, raftClientRequest); + } else { + return ozoneManager.getOmServerProtocol().submitRequest( + null, omRequest); + } } catch (ServiceException e) { LOG.error("Open key " + omRequest.getCmdType() + " request failed. Will retry at next run.", e); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/QuotaRepairTask.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/QuotaRepairTask.java index 1a29ee8d96b..b3e64c98c5d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/QuotaRepairTask.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/QuotaRepairTask.java @@ -26,7 +26,6 @@ import java.io.UncheckedIOException; import java.nio.file.Paths; import java.util.ArrayList; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -49,13 +48,15 @@ import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; +import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.ratis.protocol.ClientId; +import org.apache.ratis.protocol.Message; +import org.apache.ratis.protocol.RaftClientRequest; import org.codehaus.jackson.map.ObjectMapper; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -73,31 +74,27 @@ public class QuotaRepairTask { private static final int TASK_THREAD_CNT = 3; private static final AtomicBoolean IN_PROGRESS = new AtomicBoolean(false); private static final RepairStatus REPAIR_STATUS = new RepairStatus(); - private static final AtomicLong RUN_CNT = new AtomicLong(0); private final OzoneManager om; + private final AtomicLong runCount = new AtomicLong(0); private ExecutorService executor; public QuotaRepairTask(OzoneManager ozoneManager) { this.om = ozoneManager; } - public CompletableFuture repair() throws IOException { - return repair(Collections.emptyList()); - } - - public CompletableFuture repair(List buckets) throws IOException { + public CompletableFuture repair() throws Exception { // lock in progress operation and reject any other if (!IN_PROGRESS.compareAndSet(false, true)) { LOG.info("quota repair task already running"); - throw new OMException("Quota repair is already running", OMException.ResultCodes.QUOTA_ERROR); + return CompletableFuture.supplyAsync(() -> false); } - REPAIR_STATUS.reset(RUN_CNT.get() + 1); - return CompletableFuture.supplyAsync(() -> repairTask(buckets)); + REPAIR_STATUS.reset(runCount.get() + 1); + return CompletableFuture.supplyAsync(() -> repairTask()); } public static String getStatus() { return REPAIR_STATUS.toString(); } - private boolean repairTask(List buckets) { + private boolean repairTask() { LOG.info("Starting quota repair task {}", REPAIR_STATUS); OMMetadataManager activeMetaManager = null; try { @@ -107,7 +104,7 @@ private boolean repairTask(List buckets) { = OzoneManagerProtocolProtos.QuotaRepairRequest.newBuilder(); // repair active db activeMetaManager = createActiveDBCheckpoint(om.getMetadataManager(), om.getConfiguration()); - repairActiveDb(activeMetaManager, builder, buckets); + repairActiveDb(activeMetaManager, builder); // TODO: repair snapshots for quota @@ -119,12 +116,12 @@ private boolean repairTask(List buckets) { .setClientId(clientId.toString()) .build(); OzoneManagerProtocolProtos.OMResponse response = submitRequest(omRequest, clientId); - if (response != null && response.getSuccess()) { - REPAIR_STATUS.updateStatus(builder, om.getMetadataManager()); - } else { + if (response != null && !response.getSuccess()) { LOG.error("update quota repair count response failed"); REPAIR_STATUS.updateStatus("Response for update DB is failed"); return false; + } else { + REPAIR_STATUS.updateStatus(builder, om.getMetadataManager()); } } catch (Exception exp) { LOG.error("quota repair count failed", exp); @@ -148,15 +145,11 @@ private boolean repairTask(List buckets) { private void repairActiveDb( OMMetadataManager metadataManager, - OzoneManagerProtocolProtos.QuotaRepairRequest.Builder builder, - List buckets) throws Exception { + OzoneManagerProtocolProtos.QuotaRepairRequest.Builder builder) throws Exception { Map nameBucketInfoMap = new HashMap<>(); Map idBucketInfoMap = new HashMap<>(); Map oriBucketInfoMap = new HashMap<>(); - prepareAllBucketInfo(nameBucketInfoMap, idBucketInfoMap, oriBucketInfoMap, metadataManager, buckets); - if (nameBucketInfoMap.isEmpty()) { - throw new OMException("no matching buckets", OMException.ResultCodes.BUCKET_NOT_FOUND); - } + prepareAllBucketInfo(nameBucketInfoMap, idBucketInfoMap, oriBucketInfoMap, metadataManager); repairCount(nameBucketInfoMap, idBucketInfoMap, metadataManager); @@ -181,21 +174,31 @@ private void repairActiveDb( } // update volume to support quota - if (buckets.isEmpty()) { - builder.setSupportVolumeOldQuota(true); - } else { - builder.setSupportVolumeOldQuota(false); - } + builder.setSupportVolumeOldQuota(true); } private OzoneManagerProtocolProtos.OMResponse submitRequest( - OzoneManagerProtocolProtos.OMRequest omRequest, ClientId clientId) throws Exception { + OzoneManagerProtocolProtos.OMRequest omRequest, ClientId clientId) { try { - return OzoneManagerRatisUtils.submitRequest(om, omRequest, clientId, RUN_CNT.getAndIncrement()); + if (om.isRatisEnabled()) { + OzoneManagerRatisServer server = om.getOmRatisServer(); + RaftClientRequest raftClientRequest = RaftClientRequest.newBuilder() + .setClientId(clientId) + .setServerId(om.getOmRatisServer().getRaftPeerId()) + .setGroupId(om.getOmRatisServer().getRaftGroupId()) + .setCallId(runCount.getAndIncrement()) + .setMessage(Message.valueOf(OMRatisHelper.convertRequestToByteString(omRequest))) + .setType(RaftClientRequest.writeRequestType()) + .build(); + return server.submitRequest(omRequest, raftClientRequest); + } else { + return om.getOmServerProtocol().submitRequest( + null, omRequest); + } } catch (ServiceException e) { LOG.error("repair quota count " + omRequest.getCmdType() + " request failed.", e); - throw e; } + return null; } private OMMetadataManager createActiveDBCheckpoint( @@ -225,42 +228,24 @@ private static String cleanTempCheckPointPath(OMMetadataManager omMetaManager) t private void prepareAllBucketInfo( Map nameBucketInfoMap, Map idBucketInfoMap, - Map oriBucketInfoMap, OMMetadataManager metadataManager, - List buckets) throws IOException { - if (!buckets.isEmpty()) { - for (String bucketkey : buckets) { - OmBucketInfo bucketInfo = metadataManager.getBucketTable().get(bucketkey); - if (null == bucketInfo) { - continue; - } - populateBucket(nameBucketInfoMap, idBucketInfoMap, oriBucketInfoMap, metadataManager, bucketInfo); - } - return; - } + Map oriBucketInfoMap, OMMetadataManager metadataManager) throws IOException { try (TableIterator> iterator = metadataManager.getBucketTable().iterator()) { while (iterator.hasNext()) { Table.KeyValue entry = iterator.next(); OmBucketInfo bucketInfo = entry.getValue(); - populateBucket(nameBucketInfoMap, idBucketInfoMap, oriBucketInfoMap, metadataManager, bucketInfo); + String bucketNameKey = buildNamePath(bucketInfo.getVolumeName(), + bucketInfo.getBucketName()); + oriBucketInfoMap.put(bucketNameKey, bucketInfo.copyObject()); + bucketInfo.incrUsedNamespace(-bucketInfo.getUsedNamespace()); + bucketInfo.incrUsedBytes(-bucketInfo.getUsedBytes()); + nameBucketInfoMap.put(bucketNameKey, bucketInfo); + idBucketInfoMap.put(buildIdPath(metadataManager.getVolumeId(bucketInfo.getVolumeName()), + bucketInfo.getObjectID()), bucketInfo); } } } - private static void populateBucket( - Map nameBucketInfoMap, Map idBucketInfoMap, - Map oriBucketInfoMap, OMMetadataManager metadataManager, - OmBucketInfo bucketInfo) throws IOException { - String bucketNameKey = buildNamePath(bucketInfo.getVolumeName(), - bucketInfo.getBucketName()); - oriBucketInfoMap.put(bucketNameKey, bucketInfo.copyObject()); - bucketInfo.incrUsedNamespace(-bucketInfo.getUsedNamespace()); - bucketInfo.incrUsedBytes(-bucketInfo.getUsedBytes()); - nameBucketInfoMap.put(bucketNameKey, bucketInfo); - idBucketInfoMap.put(buildIdPath(metadataManager.getVolumeId(bucketInfo.getVolumeName()), - bucketInfo.getObjectID()), bucketInfo); - } - private boolean isChange(OmBucketInfo lBucketInfo, OmBucketInfo rBucketInfo) { if (lBucketInfo.getUsedNamespace() != rBucketInfo.getUsedNamespace() || lBucketInfo.getUsedBytes() != rBucketInfo.getUsedBytes()) { @@ -483,9 +468,8 @@ public String toString() { } Map status = new HashMap<>(); status.put("taskId", taskId); - status.put("lastRunStartTime", lastRunStartTime > 0 ? new java.util.Date(lastRunStartTime).toString() : ""); - status.put("lastRunFinishedTime", lastRunFinishedTime > 0 ? new java.util.Date(lastRunFinishedTime).toString() - : ""); + status.put("lastRunStartTime", lastRunStartTime); + status.put("lastRunFinishedTime", lastRunFinishedTime); status.put("errorMsg", errorMsg); status.put("bucketCountDiffMap", bucketCountDiffMap); try { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java index edc6c7a1629..99e3903447d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java @@ -20,49 +20,57 @@ import com.google.common.annotations.VisibleForTesting; import com.google.protobuf.ServiceException; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.apache.hadoop.hdds.utils.BackgroundTask; import org.apache.hadoop.hdds.utils.BackgroundTaskQueue; import org.apache.hadoop.hdds.utils.BackgroundTaskResult; import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.ClientVersion; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.common.BlockGroup; import org.apache.hadoop.ozone.lock.BootstrapStateHandler; -import org.apache.hadoop.ozone.om.KeyManager; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OmSnapshot; import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.SnapshotChainManager; +import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; +import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PurgePathRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotMoveDeletedKeysRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotMoveKeyInfos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotMoveTableKeysRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotPurgeRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; +import org.apache.hadoop.util.Time; import org.apache.ratis.protocol.ClientId; +import org.apache.ratis.protocol.Message; +import org.apache.ratis.protocol.RaftClientRequest; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.ArrayList; -import java.util.Iterator; import java.util.List; -import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; -import java.util.stream.Collectors; -import static org.apache.hadoop.hdds.HddsUtils.toProtobuf; +import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_KEY_DELETING_LIMIT_PER_TASK; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_KEY_DELETING_LIMIT_PER_TASK_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.SNAPSHOT_DELETING_LIMIT_PER_TASK; @@ -88,17 +96,16 @@ public class SnapshotDeletingService extends AbstractKeyDeletingService { private final AtomicBoolean suspended; private final OzoneConfiguration conf; private final AtomicLong successRunCount; - private final int keyLimitPerTask; - private final int snapshotDeletionPerTask; + private final long snapshotDeletionPerTask; + private final int keyLimitPerSnapshot; private final int ratisByteLimit; - private final long serviceTimeout; public SnapshotDeletingService(long interval, long serviceTimeout, - OzoneManager ozoneManager) + OzoneManager ozoneManager, ScmBlockLocationProtocol scmClient) throws IOException { super(SnapshotDeletingService.class.getSimpleName(), interval, TimeUnit.MILLISECONDS, SNAPSHOT_DELETING_CORE_POOL_SIZE, - serviceTimeout, ozoneManager, null); + serviceTimeout, ozoneManager, scmClient); this.ozoneManager = ozoneManager; this.omSnapshotManager = ozoneManager.getOmSnapshotManager(); OmMetadataManagerImpl omMetadataManager = (OmMetadataManagerImpl) @@ -107,7 +114,8 @@ public SnapshotDeletingService(long interval, long serviceTimeout, this.successRunCount = new AtomicLong(0); this.suspended = new AtomicBoolean(false); this.conf = ozoneManager.getConfiguration(); - this.snapshotDeletionPerTask = conf.getInt(SNAPSHOT_DELETING_LIMIT_PER_TASK, + this.snapshotDeletionPerTask = conf + .getLong(SNAPSHOT_DELETING_LIMIT_PER_TASK, SNAPSHOT_DELETING_LIMIT_PER_TASK_DEFAULT); int limit = (int) conf.getStorageSize( OMConfigKeys.OZONE_OM_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT, @@ -115,35 +123,9 @@ public SnapshotDeletingService(long interval, long serviceTimeout, StorageUnit.BYTES); // always go to 90% of max limit for request as other header will be added this.ratisByteLimit = (int) (limit * 0.9); - this.keyLimitPerTask = conf.getInt( + this.keyLimitPerSnapshot = conf.getInt( OZONE_SNAPSHOT_KEY_DELETING_LIMIT_PER_TASK, OZONE_SNAPSHOT_KEY_DELETING_LIMIT_PER_TASK_DEFAULT); - this.serviceTimeout = serviceTimeout; - } - - // Wait for a notification from KeyDeletingService if the key deletion is running. This is to ensure, merging of - // entries do not start while the AOS is still processing the deleted keys. - @VisibleForTesting - public void waitForKeyDeletingService() throws InterruptedException { - KeyDeletingService keyDeletingService = getOzoneManager().getKeyManager().getDeletingService(); - synchronized (keyDeletingService) { - while (keyDeletingService.isRunningOnAOS()) { - keyDeletingService.wait(serviceTimeout); - } - } - } - - // Wait for a notification from DirectoryDeletingService if the directory deletion is running. This is to ensure, - // merging of entries do not start while the AOS is still processing the deleted keys. - @VisibleForTesting - public void waitForDirDeletingService() throws InterruptedException { - DirectoryDeletingService directoryDeletingService = getOzoneManager().getKeyManager() - .getDirDeletingService(); - synchronized (directoryDeletingService) { - while (directoryDeletingService.isRunningOnAOS()) { - directoryDeletingService.wait(serviceTimeout); - } - } } private class SnapshotDeletingTask implements BackgroundTask { @@ -157,89 +139,317 @@ public BackgroundTaskResult call() throws InterruptedException { getRunCount().incrementAndGet(); - try { - int remaining = keyLimitPerTask; - Iterator iterator = chainManager.iterator(true); - List snapshotsToBePurged = new ArrayList<>(); + ReferenceCounted rcOmSnapshot = null; + ReferenceCounted rcOmPreviousSnapshot = null; + + Table snapshotInfoTable = + ozoneManager.getMetadataManager().getSnapshotInfoTable(); + List purgeSnapshotKeys = new ArrayList<>(); + try (TableIterator> iterator = snapshotInfoTable.iterator()) { + long snapshotLimit = snapshotDeletionPerTask; - while (iterator.hasNext() && snapshotLimit > 0 && remaining > 0) { - SnapshotInfo snapInfo = SnapshotUtils.getSnapshotInfo(ozoneManager, chainManager, iterator.next()); + + while (iterator.hasNext() && snapshotLimit > 0) { + SnapshotInfo snapInfo = iterator.next().getValue(); + + // Only Iterate in deleted snapshot if (shouldIgnoreSnapshot(snapInfo)) { continue; } - LOG.info("Started Snapshot Deletion Processing for snapshot : {}", snapInfo.getTableKey()); - SnapshotInfo nextSnapshot = SnapshotUtils.getNextSnapshot(ozoneManager, chainManager, snapInfo); - // Continue if the next snapshot is not active. This is to avoid unnecessary copies from one snapshot to - // another. - if (nextSnapshot != null && - nextSnapshot.getSnapshotStatus() != SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE) { + + // Note: Can refactor this to use try-with-resources. + // Handling RC decrements manually for now to minimize conflicts. + rcOmSnapshot = omSnapshotManager.getSnapshot( + snapInfo.getVolumeName(), + snapInfo.getBucketName(), + snapInfo.getName()); + OmSnapshot omSnapshot = rcOmSnapshot.get(); + + Table snapshotDeletedTable = + omSnapshot.getMetadataManager().getDeletedTable(); + Table snapshotDeletedDirTable = + omSnapshot.getMetadataManager().getDeletedDirTable(); + + Table renamedTable = + omSnapshot.getMetadataManager().getSnapshotRenamedTable(); + + long volumeId = ozoneManager.getMetadataManager() + .getVolumeId(snapInfo.getVolumeName()); + // Get bucketInfo for the snapshot bucket to get bucket layout. + String dbBucketKey = ozoneManager.getMetadataManager().getBucketKey( + snapInfo.getVolumeName(), snapInfo.getBucketName()); + OmBucketInfo bucketInfo = ozoneManager.getMetadataManager() + .getBucketTable().get(dbBucketKey); + + if (bucketInfo == null) { + // Decrement ref count + rcOmSnapshot.close(); + rcOmSnapshot = null; + throw new IllegalStateException("Bucket " + "/" + + snapInfo.getVolumeName() + "/" + snapInfo.getBucketName() + + " is not found. BucketInfo should not be null for snapshotted" + + " bucket. The OM is in unexpected state."); + } + + String snapshotBucketKey = dbBucketKey + OzoneConsts.OM_KEY_PREFIX; + String dbBucketKeyForDir = ozoneManager.getMetadataManager() + .getBucketKey(Long.toString(volumeId), + Long.toString(bucketInfo.getObjectID())) + OM_KEY_PREFIX; + + if (isSnapshotReclaimable(snapshotDeletedTable, + snapshotDeletedDirTable, snapshotBucketKey, dbBucketKeyForDir)) { + purgeSnapshotKeys.add(snapInfo.getTableKey()); + // Decrement ref count + rcOmSnapshot.close(); + rcOmSnapshot = null; continue; } - // nextSnapshot = null means entries would be moved to AOS. - if (nextSnapshot == null) { - waitForKeyDeletingService(); - waitForDirDeletingService(); + //TODO: [SNAPSHOT] Add lock to deletedTable and Active DB. + SnapshotInfo previousSnapshot = getPreviousActiveSnapshot( + snapInfo, chainManager, omSnapshotManager); + Table previousKeyTable = null; + Table previousDirTable = null; + OmSnapshot omPreviousSnapshot = null; + + // Split RepeatedOmKeyInfo and update current snapshot deletedKeyTable + // and next snapshot deletedKeyTable. + if (previousSnapshot != null) { + rcOmPreviousSnapshot = omSnapshotManager.getSnapshot( + previousSnapshot.getVolumeName(), + previousSnapshot.getBucketName(), + previousSnapshot.getName()); + omPreviousSnapshot = rcOmPreviousSnapshot.get(); + + previousKeyTable = omPreviousSnapshot + .getMetadataManager().getKeyTable(bucketInfo.getBucketLayout()); + previousDirTable = omPreviousSnapshot + .getMetadataManager().getDirectoryTable(); } - try (ReferenceCounted snapshot = omSnapshotManager.getSnapshot( - snapInfo.getVolumeName(), snapInfo.getBucketName(), snapInfo.getName())) { - KeyManager snapshotKeyManager = snapshot.get().getKeyManager(); - int moveCount = 0; - // Get all entries from deletedKeyTable. - List>> deletedKeyEntries = - snapshotKeyManager.getDeletedKeyEntries(snapInfo.getVolumeName(), snapInfo.getBucketName(), - null, remaining); - moveCount += deletedKeyEntries.size(); - // Get all entries from deletedDirTable. - List> deletedDirEntries = snapshotKeyManager.getDeletedDirEntries( - snapInfo.getVolumeName(), snapInfo.getBucketName(), remaining - moveCount); - moveCount += deletedDirEntries.size(); - // Get all entries from snapshotRenamedTable. - List> renameEntries = snapshotKeyManager.getRenamesKeyEntries( - snapInfo.getVolumeName(), snapInfo.getBucketName(), null, remaining - moveCount); - moveCount += renameEntries.size(); - if (moveCount > 0) { - List deletedKeys = new ArrayList<>(deletedKeyEntries.size()); - List deletedDirs = new ArrayList<>(deletedDirEntries.size()); - List renameKeys = new ArrayList<>(renameEntries.size()); - - // Convert deletedKeyEntries to SnapshotMoveKeyInfos. - for (Table.KeyValue> deletedEntry : deletedKeyEntries) { - deletedKeys.add(SnapshotMoveKeyInfos.newBuilder().setKey(deletedEntry.getKey()) - .addAllKeyInfos(deletedEntry.getValue() - .stream().map(val -> val.getProtobuf(ClientVersion.CURRENT_VERSION)) - .collect(Collectors.toList())).build()); + + // Move key to either next non deleted snapshot's deletedTable + // or keep it in current snapshot deleted table. + List toReclaimList = new ArrayList<>(); + List toNextDBList = new ArrayList<>(); + // A list of renamed keys/files/dirs + List renamedList = new ArrayList<>(); + List dirsToMove = new ArrayList<>(); + + long remainNum = handleDirectoryCleanUp(snapshotDeletedDirTable, + previousDirTable, renamedTable, dbBucketKeyForDir, snapInfo, + omSnapshot, dirsToMove, renamedList); + int deletionCount = 0; + + try (TableIterator> deletedIterator = snapshotDeletedTable + .iterator()) { + + List keysToPurge = new ArrayList<>(); + deletedIterator.seek(snapshotBucketKey); + + while (deletedIterator.hasNext() && + deletionCount < remainNum) { + Table.KeyValue + deletedKeyValue = deletedIterator.next(); + String deletedKey = deletedKeyValue.getKey(); + + // Exit if it is out of the bucket scope. + if (!deletedKey.startsWith(snapshotBucketKey)) { + // If snapshot deletedKeyTable doesn't have any + // entry in the snapshot scope it can be reclaimed + break; + } + + RepeatedOmKeyInfo repeatedOmKeyInfo = deletedKeyValue.getValue(); + + SnapshotMoveKeyInfos.Builder toReclaim = SnapshotMoveKeyInfos + .newBuilder() + .setKey(deletedKey); + SnapshotMoveKeyInfos.Builder toNextDb = SnapshotMoveKeyInfos + .newBuilder() + .setKey(deletedKey); + HddsProtos.KeyValue.Builder renamedKey = HddsProtos.KeyValue + .newBuilder(); + + for (OmKeyInfo keyInfo : repeatedOmKeyInfo.getOmKeyInfoList()) { + splitRepeatedOmKeyInfo(toReclaim, toNextDb, renamedKey, + keyInfo, previousKeyTable, renamedTable, + bucketInfo, volumeId); } - // Convert deletedDirEntries to SnapshotMoveKeyInfos. - for (Table.KeyValue deletedDirEntry : deletedDirEntries) { - deletedDirs.add(SnapshotMoveKeyInfos.newBuilder().setKey(deletedDirEntry.getKey()) - .addKeyInfos(deletedDirEntry.getValue().getProtobuf(ClientVersion.CURRENT_VERSION)).build()); + // If all the KeyInfos are reclaimable in RepeatedOmKeyInfo + // then no need to update current snapshot deletedKeyTable. + if (!(toReclaim.getKeyInfosCount() == + repeatedOmKeyInfo.getOmKeyInfoList().size())) { + toReclaimList.add(toReclaim.build()); + toNextDBList.add(toNextDb.build()); + } else { + // The key can be reclaimed here. + List blocksForKeyDelete = omSnapshot + .getMetadataManager() + .getBlocksForKeyDelete(deletedKey); + if (blocksForKeyDelete != null) { + keysToPurge.addAll(blocksForKeyDelete); + } } - // Convert renamedEntries to KeyValue. - for (Table.KeyValue renameEntry : renameEntries) { - renameKeys.add(HddsProtos.KeyValue.newBuilder().setKey(renameEntry.getKey()) - .setValue(renameEntry.getValue()).build()); + if (renamedKey.hasKey() && renamedKey.hasValue()) { + renamedList.add(renamedKey.build()); } - submitSnapshotMoveDeletedKeys(snapInfo, deletedKeys, renameKeys, deletedDirs); - remaining -= moveCount; - } else { - snapshotsToBePurged.add(snapInfo.getTableKey()); + deletionCount++; } + + // Delete keys From deletedTable + processKeyDeletes(keysToPurge, omSnapshot.getKeyManager(), + null, snapInfo.getTableKey()); + successRunCount.incrementAndGet(); + } catch (IOException ex) { + LOG.error("Error while running Snapshot Deleting Service for " + + "snapshot " + snapInfo.getTableKey() + " with snapshotId " + + snapInfo.getSnapshotId() + ". Processed " + deletionCount + + " keys and " + (keyLimitPerSnapshot - remainNum) + + " directories and files", ex); } - successRunCount.incrementAndGet(); snapshotLimit--; - } - if (!snapshotsToBePurged.isEmpty()) { - submitSnapshotPurgeRequest(snapshotsToBePurged); + // Submit Move request to OM. + submitSnapshotMoveDeletedKeys(snapInfo, toReclaimList, + toNextDBList, renamedList, dirsToMove); + + // Properly decrement ref count for rcOmPreviousSnapshot + if (rcOmPreviousSnapshot != null) { + rcOmPreviousSnapshot.close(); + rcOmPreviousSnapshot = null; + } } } catch (IOException e) { LOG.error("Error while running Snapshot Deleting Service", e); + } finally { + // Decrement ref counts + if (rcOmPreviousSnapshot != null) { + rcOmPreviousSnapshot.close(); + } + if (rcOmSnapshot != null) { + rcOmSnapshot.close(); + } } + submitSnapshotPurgeRequest(purgeSnapshotKeys); + return BackgroundTaskResult.EmptyTaskResult.newResult(); } + private boolean isSnapshotReclaimable( + Table snapshotDeletedTable, + Table snapshotDeletedDirTable, + String snapshotBucketKey, String dbBucketKeyForDir) throws IOException { + + boolean isDirTableCleanedUp = false; + boolean isKeyTableCleanedUp = false; + try (TableIterator> iterator = snapshotDeletedTable.iterator();) { + iterator.seek(snapshotBucketKey); + // If the next entry doesn't start with snapshotBucketKey then + // deletedKeyTable is already cleaned up. + isKeyTableCleanedUp = !iterator.hasNext() || !iterator.next().getKey() + .startsWith(snapshotBucketKey); + } + + try (TableIterator> + iterator = snapshotDeletedDirTable.iterator()) { + iterator.seek(dbBucketKeyForDir); + // If the next entry doesn't start with dbBucketKeyForDir then + // deletedDirTable is already cleaned up. + isDirTableCleanedUp = !iterator.hasNext() || !iterator.next().getKey() + .startsWith(dbBucketKeyForDir); + } + + return (isDirTableCleanedUp || snapshotDeletedDirTable.isEmpty()) && + (isKeyTableCleanedUp || snapshotDeletedTable.isEmpty()); + } + + @SuppressWarnings("checkstyle:ParameterNumber") + private long handleDirectoryCleanUp( + Table snapshotDeletedDirTable, + Table previousDirTable, + Table renamedTable, + String dbBucketKeyForDir, SnapshotInfo snapInfo, + OmSnapshot omSnapshot, List dirsToMove, + List renamedList) { + + long dirNum = 0L; + long subDirNum = 0L; + long subFileNum = 0L; + long remainNum = keyLimitPerSnapshot; + int consumedSize = 0; + List purgePathRequestList = new ArrayList<>(); + List> allSubDirList + = new ArrayList<>(keyLimitPerSnapshot); + try (TableIterator> deletedDirIterator = + snapshotDeletedDirTable.iterator()) { + + long startTime = Time.monotonicNow(); + deletedDirIterator.seek(dbBucketKeyForDir); + + while (deletedDirIterator.hasNext()) { + Table.KeyValue deletedDir = + deletedDirIterator.next(); + String deletedDirKey = deletedDir.getKey(); + + // Exit for dirs out of snapshot scope. + if (!deletedDirKey.startsWith(dbBucketKeyForDir)) { + break; + } + + if (isDirReclaimable(deletedDir, previousDirTable, + renamedTable, renamedList)) { + // Reclaim here + PurgePathRequest request = prepareDeleteDirRequest( + remainNum, deletedDir.getValue(), deletedDir.getKey(), + allSubDirList, omSnapshot.getKeyManager()); + if (isBufferLimitCrossed(ratisByteLimit, consumedSize, + request.getSerializedSize())) { + if (purgePathRequestList.size() != 0) { + // if message buffer reaches max limit, avoid sending further + remainNum = 0; + break; + } + // if directory itself is having a lot of keys / files, + // reduce capacity to minimum level + remainNum = MIN_ERR_LIMIT_PER_TASK; + request = prepareDeleteDirRequest( + remainNum, deletedDir.getValue(), deletedDir.getKey(), + allSubDirList, omSnapshot.getKeyManager()); + } + consumedSize += request.getSerializedSize(); + purgePathRequestList.add(request); + remainNum = remainNum - request.getDeletedSubFilesCount(); + remainNum = remainNum - request.getMarkDeletedSubDirsCount(); + // Count up the purgeDeletedDir, subDirs and subFiles + if (request.getDeletedDir() != null + && !request.getDeletedDir().isEmpty()) { + dirNum++; + } + subDirNum += request.getMarkDeletedSubDirsCount(); + subFileNum += request.getDeletedSubFilesCount(); + } else { + dirsToMove.add(deletedDir.getKey()); + } + } + + remainNum = optimizeDirDeletesAndSubmitRequest(remainNum, dirNum, + subDirNum, subFileNum, allSubDirList, purgePathRequestList, + snapInfo.getTableKey(), startTime, ratisByteLimit - consumedSize, + omSnapshot.getKeyManager()); + } catch (IOException e) { + LOG.error("Error while running delete directories and files for " + + "snapshot " + snapInfo.getTableKey() + " in snapshot deleting " + + "background task. Will retry at next run.", e); + } + + return remainNum; + } + private void submitSnapshotPurgeRequest(List purgeSnapshotKeys) { if (!purgeSnapshotKeys.isEmpty()) { SnapshotPurgeRequest snapshotPurgeRequest = SnapshotPurgeRequest @@ -257,36 +467,92 @@ private void submitSnapshotPurgeRequest(List purgeSnapshotKeys) { } } - private void submitSnapshotMoveDeletedKeys(SnapshotInfo snapInfo, - List deletedKeys, - List renamedList, - List dirsToMove) { + @SuppressWarnings("checkstyle:ParameterNumber") + private void splitRepeatedOmKeyInfo(SnapshotMoveKeyInfos.Builder toReclaim, + SnapshotMoveKeyInfos.Builder toNextDb, + HddsProtos.KeyValue.Builder renamedKey, OmKeyInfo keyInfo, + Table previousKeyTable, + Table renamedTable, + OmBucketInfo bucketInfo, long volumeId) throws IOException { + + if (isKeyReclaimable(previousKeyTable, renamedTable, + keyInfo, bucketInfo, volumeId, renamedKey)) { + // Update in current db's deletedKeyTable + toReclaim.addKeyInfos(keyInfo + .getProtobuf(ClientVersion.CURRENT_VERSION)); + } else { + // Move to next non deleted snapshot's deleted table + toNextDb.addKeyInfos(keyInfo.getProtobuf( + ClientVersion.CURRENT_VERSION)); + } + } - SnapshotMoveTableKeysRequest.Builder moveDeletedKeysBuilder = SnapshotMoveTableKeysRequest.newBuilder() - .setFromSnapshotID(toProtobuf(snapInfo.getSnapshotId())); + private boolean isDirReclaimable( + Table.KeyValue deletedDir, + Table previousDirTable, + Table renamedTable, + List renamedList) throws IOException { - SnapshotMoveTableKeysRequest moveDeletedKeys = moveDeletedKeysBuilder - .addAllDeletedKeys(deletedKeys) - .addAllRenamedKeys(renamedList) - .addAllDeletedDirs(dirsToMove) - .build(); - if (isBufferLimitCrossed(ratisByteLimit, 0, moveDeletedKeys.getSerializedSize())) { - int remaining = MIN_ERR_LIMIT_PER_TASK; - deletedKeys = deletedKeys.subList(0, Math.min(remaining, deletedKeys.size())); - remaining -= deletedKeys.size(); - renamedList = renamedList.subList(0, Math.min(remaining, renamedList.size())); - remaining -= renamedList.size(); - dirsToMove = dirsToMove.subList(0, Math.min(remaining, dirsToMove.size())); - moveDeletedKeys = moveDeletedKeysBuilder - .addAllDeletedKeys(deletedKeys) - .addAllRenamedKeys(renamedList) - .addAllDeletedDirs(dirsToMove) + if (previousDirTable == null) { + return true; + } + + String deletedDirDbKey = deletedDir.getKey(); + OmKeyInfo deletedDirInfo = deletedDir.getValue(); + String dbRenameKey = ozoneManager.getMetadataManager().getRenameKey( + deletedDirInfo.getVolumeName(), deletedDirInfo.getBucketName(), + deletedDirInfo.getObjectID()); + + /* + snapshotRenamedTable: /volumeName/bucketName/objectID -> + /volumeId/bucketId/parentId/dirName + */ + String dbKeyBeforeRename = renamedTable.getIfExist(dbRenameKey); + String prevDbKey = null; + + if (dbKeyBeforeRename != null) { + prevDbKey = dbKeyBeforeRename; + HddsProtos.KeyValue renamedDir = HddsProtos.KeyValue + .newBuilder() + .setKey(dbRenameKey) + .setValue(dbKeyBeforeRename) .build(); + renamedList.add(renamedDir); + } else { + // In OMKeyDeleteResponseWithFSO OzonePathKey is converted to + // OzoneDeletePathKey. Changing it back to check the previous DirTable. + prevDbKey = ozoneManager.getMetadataManager() + .getOzoneDeletePathDirKey(deletedDirDbKey); } + OmDirectoryInfo prevDirectoryInfo = previousDirTable.get(prevDbKey); + if (prevDirectoryInfo == null) { + return true; + } + + return prevDirectoryInfo.getObjectID() != deletedDirInfo.getObjectID(); + } + + public void submitSnapshotMoveDeletedKeys(SnapshotInfo snapInfo, + List toReclaimList, + List toNextDBList, + List renamedList, + List dirsToMove) throws InterruptedException { + + SnapshotMoveDeletedKeysRequest.Builder moveDeletedKeysBuilder = + SnapshotMoveDeletedKeysRequest.newBuilder() + .setFromSnapshot(snapInfo.getProtobuf()); + + SnapshotMoveDeletedKeysRequest moveDeletedKeys = moveDeletedKeysBuilder + .addAllReclaimKeys(toReclaimList) + .addAllNextDBKeys(toNextDBList) + .addAllRenamedKeys(renamedList) + .addAllDeletedDirsToMove(dirsToMove) + .build(); + OMRequest omRequest = OMRequest.newBuilder() - .setCmdType(Type.SnapshotMoveTableKeys) - .setSnapshotMoveTableKeysRequest(moveDeletedKeys) + .setCmdType(Type.SnapshotMoveDeletedKeys) + .setSnapshotMoveDeletedKeysRequest(moveDeletedKeys) .setClientId(clientId.toString()) .build(); @@ -295,26 +561,36 @@ private void submitSnapshotMoveDeletedKeys(SnapshotInfo snapInfo, } } - private void submitRequest(OMRequest omRequest) { + public void submitRequest(OMRequest omRequest) { try { - OzoneManagerRatisUtils.submitRequest(ozoneManager, omRequest, clientId, getRunCount().get()); + if (isRatisEnabled()) { + OzoneManagerRatisServer server = ozoneManager.getOmRatisServer(); + + RaftClientRequest raftClientRequest = RaftClientRequest.newBuilder() + .setClientId(clientId) + .setServerId(server.getRaftPeerId()) + .setGroupId(server.getRaftGroupId()) + .setCallId(getRunCount().get()) + .setMessage(Message.valueOf( + OMRatisHelper.convertRequestToByteString(omRequest))) + .setType(RaftClientRequest.writeRequestType()) + .build(); + + server.submitRequest(omRequest, raftClientRequest); + } else { + ozoneManager.getOmServerProtocol().submitRequest(null, omRequest); + } } catch (ServiceException e) { - LOG.error("Request: {} fired by SnapshotDeletingService failed. Will retry in the next run", omRequest, e); + LOG.error("Snapshot Deleting request failed. " + + "Will retry at next run.", e); } } } - /** - * Checks if a given snapshot has been deleted and all the changes made to snapshot have been flushed to disk. - * @param snapInfo SnapshotInfo corresponding to the snapshot. - * @return true if the snapshot is still active or changes to snapshot have not been flushed to disk otherwise false. - * @throws IOException - */ @VisibleForTesting - boolean shouldIgnoreSnapshot(SnapshotInfo snapInfo) throws IOException { + boolean shouldIgnoreSnapshot(SnapshotInfo snapInfo) { SnapshotInfo.SnapshotStatus snapshotStatus = snapInfo.getSnapshotStatus(); - return snapshotStatus != SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED || - !OmSnapshotManager.areSnapshotChangesFlushedToDB(getOzoneManager().getMetadataManager(), snapInfo); + return snapshotStatus != SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED; } // TODO: Move this util class. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java index e7133e62589..fe0f6e111ed 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java @@ -34,12 +34,13 @@ import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.SnapshotChainManager; +import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; +import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; @@ -47,6 +48,8 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotSize; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; import org.apache.ratis.protocol.ClientId; +import org.apache.ratis.protocol.Message; +import org.apache.ratis.protocol.RaftClientRequest; import java.io.IOException; import java.util.ArrayList; @@ -61,7 +64,6 @@ import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.getDirectoryInfo; import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.getOzonePathKeyForFso; -import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.getPreviousSnapshot; /** * Snapshot BG Service for deleted directory deep clean and exclusive size @@ -144,11 +146,11 @@ public BackgroundTaskResult call() { > iterator = snapshotInfoTable.iterator()) { while (iterator.hasNext()) { - SnapshotInfo currSnapInfo = snapshotInfoTable.get(iterator.next().getKey()); + SnapshotInfo currSnapInfo = iterator.next().getValue(); // Expand deleted dirs only on active snapshot. Deleted Snapshots // will be cleaned up by SnapshotDeletingService. - if (currSnapInfo == null || currSnapInfo.getSnapshotStatus() != SNAPSHOT_ACTIVE || + if (currSnapInfo.getSnapshotStatus() != SNAPSHOT_ACTIVE || currSnapInfo.getDeepCleanedDeletedDir()) { continue; } @@ -174,7 +176,8 @@ public BackgroundTaskResult call() { "unexpected state."); } - SnapshotInfo previousSnapshot = getPreviousSnapshot(getOzoneManager(), snapChainManager, currSnapInfo); + SnapshotInfo previousSnapshot = getPreviousActiveSnapshot( + currSnapInfo, snapChainManager, omSnapshotManager); SnapshotInfo previousToPrevSnapshot = null; Table previousKeyTable = null; @@ -191,7 +194,8 @@ public BackgroundTaskResult call() { .getKeyTable(bucketInfo.getBucketLayout()); prevRenamedTable = omPreviousSnapshot .getMetadataManager().getSnapshotRenamedTable(); - previousToPrevSnapshot = getPreviousSnapshot(getOzoneManager(), snapChainManager, previousSnapshot); + previousToPrevSnapshot = getPreviousActiveSnapshot( + previousSnapshot, snapChainManager, omSnapshotManager); } Table previousToPrevKeyTable = null; @@ -434,7 +438,25 @@ private void updateDeepCleanSnapshotDir(String snapshotKeyTable) { public void submitRequest(OMRequest omRequest, ClientId clientId) { try { - OzoneManagerRatisUtils.submitRequest(getOzoneManager(), omRequest, clientId, getRunCount().get()); + if (isRatisEnabled()) { + OzoneManagerRatisServer server = + getOzoneManager().getOmRatisServer(); + + RaftClientRequest raftClientRequest = RaftClientRequest.newBuilder() + .setClientId(clientId) + .setServerId(server.getRaftPeerId()) + .setGroupId(server.getRaftGroupId()) + .setCallId(getRunCount().get()) + .setMessage(Message.valueOf( + OMRatisHelper.convertRequestToByteString(omRequest))) + .setType(RaftClientRequest.writeRequestType()) + .build(); + + server.submitRequest(omRequest, raftClientRequest); + } else { + getOzoneManager().getOmServerProtocol() + .submitRequest(null, omRequest); + } } catch (ServiceException e) { LOG.error("Snapshot deep cleaning request failed. " + "Will retry at next run.", e); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotUtils.java index b400fb6ed76..05b0e5b0cdc 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotUtils.java @@ -77,7 +77,7 @@ public static Object getINode(Path file) throws IOException { * sst compaction backup directory) * * @param truncateLength - Length of initial path to trim in file path. - * @param hardLinkFiles - Map of link->file paths. + * @param hardLinkFiles - Map of link->file paths. * @return Path to the file of links created. */ public static Path createHardLinkList(int truncateLength, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java index 201a9fe0c9c..2041fa791a7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java @@ -21,14 +21,12 @@ import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.SnapshotChainManager; import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.RocksDBException; import org.slf4j.Logger; @@ -36,13 +34,9 @@ import java.io.File; import java.io.IOException; -import java.util.ArrayList; -import java.util.List; import java.util.NoSuchElementException; import java.util.HashMap; import java.util.Map; -import java.util.Objects; -import java.util.Optional; import java.util.UUID; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; @@ -93,13 +87,6 @@ public static SnapshotInfo getSnapshotInfo(final OzoneManager ozoneManager, return snapshotInfo; } - public static SnapshotInfo getSnapshotInfo(OzoneManager ozoneManager, - SnapshotChainManager chainManager, - UUID snapshotId) throws IOException { - String tableKey = chainManager.getTableKey(snapshotId); - return SnapshotUtils.getSnapshotInfo(ozoneManager, tableKey); - } - public static void dropColumnFamilyHandle( final ManagedRocksDB rocksDB, final ColumnFamilyHandle columnFamilyHandle) { @@ -153,24 +140,37 @@ public static void checkSnapshotActive(SnapshotInfo snapInfo, } /** - * Get the next snapshot in the snapshot chain. + * Get the next non deleted snapshot in the snapshot chain. */ - public static SnapshotInfo getNextSnapshot(OzoneManager ozoneManager, - SnapshotChainManager chainManager, - SnapshotInfo snapInfo) + public static SnapshotInfo getNextActiveSnapshot(SnapshotInfo snapInfo, + SnapshotChainManager chainManager, OmSnapshotManager omSnapshotManager) throws IOException { + // If the snapshot is deleted in the previous run, then the in-memory // SnapshotChainManager might throw NoSuchElementException as the snapshot // is removed in-memory but OMDoubleBuffer has not flushed yet. if (snapInfo == null) { - throw new OMException("Provided Snapshot Info argument is null. Cannot get the next snapshot for a null value", - INVALID_SNAPSHOT_ERROR); + throw new OMException("Snapshot Info is null. Cannot get the next snapshot", INVALID_SNAPSHOT_ERROR); } + try { - if (chainManager.hasNextPathSnapshot(snapInfo.getSnapshotPath(), + while (chainManager.hasNextPathSnapshot(snapInfo.getSnapshotPath(), snapInfo.getSnapshotId())) { - UUID nextPathSnapshot = chainManager.nextPathSnapshot(snapInfo.getSnapshotPath(), snapInfo.getSnapshotId()); - return getSnapshotInfo(ozoneManager, chainManager, nextPathSnapshot); + + UUID nextPathSnapshot = + chainManager.nextPathSnapshot( + snapInfo.getSnapshotPath(), snapInfo.getSnapshotId()); + + String tableKey = chainManager.getTableKey(nextPathSnapshot); + SnapshotInfo nextSnapshotInfo = + omSnapshotManager.getSnapshotInfo(tableKey); + + if (nextSnapshotInfo.getSnapshotStatus().equals( + SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE)) { + return nextSnapshotInfo; + } + + snapInfo = nextSnapshotInfo; } } catch (NoSuchElementException ex) { LOG.error("The snapshot {} is not longer in snapshot chain, It " + @@ -180,41 +180,6 @@ public static SnapshotInfo getNextSnapshot(OzoneManager ozoneManager, return null; } - /** - * Get the previous snapshot in the snapshot chain. - */ - public static SnapshotInfo getPreviousSnapshot(OzoneManager ozoneManager, - SnapshotChainManager chainManager, - SnapshotInfo snapInfo) - throws IOException { - UUID previousSnapshotId = getPreviousSnapshotId(snapInfo, chainManager); - return previousSnapshotId == null ? null : getSnapshotInfo(ozoneManager, chainManager, previousSnapshotId); - } - - /** - * Get the previous snapshot in the snapshot chain. - */ - private static UUID getPreviousSnapshotId(SnapshotInfo snapInfo, SnapshotChainManager chainManager) - throws IOException { - // If the snapshot is deleted in the previous run, then the in-memory - // SnapshotChainManager might throw NoSuchElementException as the snapshot - // is removed in-memory but OMDoubleBuffer has not flushed yet. - if (snapInfo == null) { - throw new OMException("Provided Snapshot Info argument is null. Cannot get the previous snapshot for a null " + - "value", INVALID_SNAPSHOT_ERROR); - } - try { - if (chainManager.hasPreviousPathSnapshot(snapInfo.getSnapshotPath(), - snapInfo.getSnapshotId())) { - return chainManager.previousPathSnapshot(snapInfo.getSnapshotPath(), - snapInfo.getSnapshotId()); - } - } catch (NoSuchElementException ignored) { - - } - return null; - } - /** * Return a map column family to prefix for the keys in the table for * the given volume and bucket. @@ -244,7 +209,7 @@ public static Map getColumnFamilyToKeyPrefixMap( *

    * Note: Currently, this is only intended to be a special use case in * Snapshot. If this is used elsewhere, consider moving this to - * {@link OMMetadataManager}. + * @link OMMetadataManager}. * * @param volumeName volume name * @param bucketName bucket name @@ -279,74 +244,4 @@ public static String getOzonePathKeyForFso(OMMetadataManager metadataManager, final long bucketId = metadataManager.getBucketId(volumeName, bucketName); return OM_KEY_PREFIX + volumeId + OM_KEY_PREFIX + bucketId + OM_KEY_PREFIX; } - - /** - * Returns merged repeatedKeyInfo entry with the existing deleted entry in the table. - * @param snapshotMoveKeyInfos keyInfos to be added. - * @param metadataManager metadataManager for a store. - * @return RepeatedOmKeyInfo - * @throws IOException - */ - public static RepeatedOmKeyInfo createMergedRepeatedOmKeyInfoFromDeletedTableEntry( - OzoneManagerProtocolProtos.SnapshotMoveKeyInfos snapshotMoveKeyInfos, OMMetadataManager metadataManager) throws - IOException { - String dbKey = snapshotMoveKeyInfos.getKey(); - List keyInfoList = new ArrayList<>(); - for (OzoneManagerProtocolProtos.KeyInfo info : snapshotMoveKeyInfos.getKeyInfosList()) { - OmKeyInfo fromProtobuf = OmKeyInfo.getFromProtobuf(info); - keyInfoList.add(fromProtobuf); - } - // When older version of keys are moved to the next snapshot's deletedTable - // The newer version might also be in the next snapshot's deletedTable and - // it might overwrite the existing value which inturn could lead to orphan block in the system. - // Checking the keyInfoList with the last n versions of the omKeyInfo versions would ensure all versions are - // present in the list and would also avoid redundant additions to the list if the last n versions match, which - // can happen on om transaction replay on snapshotted rocksdb. - RepeatedOmKeyInfo result = metadataManager.getDeletedTable().get(dbKey); - if (result == null) { - result = new RepeatedOmKeyInfo(keyInfoList); - } else if (!isSameAsLatestOmKeyInfo(keyInfoList, result)) { - keyInfoList.forEach(result::addOmKeyInfo); - } - return result; - } - - private static boolean isSameAsLatestOmKeyInfo(List omKeyInfos, - RepeatedOmKeyInfo result) { - int size = result.getOmKeyInfoList().size(); - if (size >= omKeyInfos.size()) { - return omKeyInfos.equals(result.getOmKeyInfoList().subList(size - omKeyInfos.size(), size)); - } - return false; - } - - public static SnapshotInfo getLatestSnapshotInfo(String volumeName, String bucketName, - OzoneManager ozoneManager, - SnapshotChainManager snapshotChainManager) throws IOException { - Optional latestPathSnapshot = Optional.ofNullable( - getLatestPathSnapshotId(volumeName, bucketName, snapshotChainManager)); - return latestPathSnapshot.isPresent() ? - getSnapshotInfo(ozoneManager, snapshotChainManager, latestPathSnapshot.get()) : null; - } - - public static UUID getLatestPathSnapshotId(String volumeName, String bucketName, - SnapshotChainManager snapshotChainManager) throws IOException { - String snapshotPath = volumeName + OM_KEY_PREFIX + bucketName; - return snapshotChainManager.getLatestPathSnapshotId(snapshotPath); - } - - // Validates the previous path snapshotId for given a snapshotInfo. In case snapshotInfo is - // null, the snapshotInfo would be considered as AOS and previous snapshot becomes the latest snapshot in the global - // snapshot chain. Would throw OMException if validation fails otherwise function would pass. - public static void validatePreviousSnapshotId(SnapshotInfo snapshotInfo, - SnapshotChainManager snapshotChainManager, - UUID expectedPreviousSnapshotId) throws IOException { - UUID previousSnapshotId = snapshotInfo == null ? snapshotChainManager.getLatestGlobalSnapshotId() : - SnapshotUtils.getPreviousSnapshotId(snapshotInfo, snapshotChainManager); - if (!Objects.equals(expectedPreviousSnapshotId, previousSnapshotId)) { - throw new OMException("Snapshot validation failed. Expected previous snapshotId : " + - expectedPreviousSnapshotId + " but was " + previousSnapshotId, - OMException.ResultCodes.INVALID_REQUEST); - } - } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeature.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeature.java index 5a62a7cfc62..7cdff8f5c11 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeature.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeature.java @@ -40,7 +40,6 @@ public enum OMLayoutFeature implements LayoutFeature { MULTITENANCY_SCHEMA(3, "Multi-Tenancy Schema"), - @Deprecated HSYNC(4, "Support hsync"), FILESYSTEM_SNAPSHOT(5, "Ozone version supporting snapshot"), diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java index 5682b040e85..a5e94689aee 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java @@ -63,6 +63,7 @@ import org.apache.hadoop.ozone.om.helpers.OpenKeySession; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatusLight; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.ServiceInfo; import org.apache.hadoop.ozone.om.helpers.ServiceInfoEx; import org.apache.hadoop.ozone.om.helpers.SnapshotDiffJob; @@ -112,6 +113,8 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListKeysLightResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListTenantRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListTenantResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListTrashRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListTrashResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListVolumeRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListVolumeResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.LookupKeyRequest; @@ -130,6 +133,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetSafeModeRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetSafeModeResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetS3VolumeContextResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServerDefaultsResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotDiffRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotDiffResponse; @@ -234,6 +238,11 @@ public OMResponse handleReadRequest(OMRequest request) { request.getListKeysRequest()); responseBuilder.setListKeysLightResponse(listKeysLightResponse); break; + case ListTrash: + ListTrashResponse listTrashResponse = listTrash( + request.getListTrashRequest(), request.getVersion()); + responseBuilder.setListTrashResponse(listTrashResponse); + break; case ListMultiPartUploadParts: MultipartUploadListPartsResponse listPartsResponse = listParts(request.getListMultipartUploadPartsRequest()); @@ -375,15 +384,11 @@ public OMResponse handleReadRequest(OMRequest request) { getSnapshotInfo(request.getSnapshotInfoRequest()); responseBuilder.setSnapshotInfoResponse(snapshotInfoResponse); break; - case GetQuotaRepairStatus: - OzoneManagerProtocolProtos.GetQuotaRepairStatusResponse quotaRepairStatusRsp = - getQuotaRepairStatus(request.getGetQuotaRepairStatusRequest()); - responseBuilder.setGetQuotaRepairStatusResponse(quotaRepairStatusRsp); - break; - case StartQuotaRepair: - OzoneManagerProtocolProtos.StartQuotaRepairResponse startQuotaRepairRsp = - startQuotaRepair(request.getStartQuotaRepairRequest()); - responseBuilder.setStartQuotaRepairResponse(startQuotaRepairRsp); + case GetServerDefaults: + responseBuilder.setServerDefaultsResponse( + ServerDefaultsResponse.newBuilder() + .setServerDefaults(impl.getServerDefaults().getProtobuf()) + .build()); break; default: responseBuilder.setSuccess(false); @@ -830,6 +835,26 @@ public static OMResponse disallowListKeysWithBucketLayout( return resp; } + private ListTrashResponse listTrash(ListTrashRequest request, + int clientVersion) throws IOException { + + ListTrashResponse.Builder resp = + ListTrashResponse.newBuilder(); + + List deletedKeys = impl.listTrash( + request.getVolumeName(), + request.getBucketName(), + request.getStartKeyName(), + request.getKeyPrefix(), + request.getMaxKeys()); + + for (RepeatedOmKeyInfo key: deletedKeys) { + resp.addDeletedKeys(key.getProto(false, clientVersion)); + } + + return resp.build(); + } + @RequestFeatureValidator( conditions = ValidationCondition.OLDER_CLIENT_REQUESTS, processingPhase = RequestProcessingPhase.POST_PROCESS, @@ -1524,16 +1549,4 @@ private SafeModeAction toSafeModeAction( safeMode); } } - - private OzoneManagerProtocolProtos.GetQuotaRepairStatusResponse getQuotaRepairStatus( - OzoneManagerProtocolProtos.GetQuotaRepairStatusRequest req) throws IOException { - return OzoneManagerProtocolProtos.GetQuotaRepairStatusResponse.newBuilder() - .setStatus(impl.getQuotaRepairStatus()) - .build(); - } - private OzoneManagerProtocolProtos.StartQuotaRepairResponse startQuotaRepair( - OzoneManagerProtocolProtos.StartQuotaRepairRequest req) throws IOException { - impl.startQuotaRepair(req.getBucketsList()); - return OzoneManagerProtocolProtos.StartQuotaRepairResponse.newBuilder().build(); - } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/RequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/RequestHandler.java index 76546f2e480..e60362a1ebb 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/RequestHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/RequestHandler.java @@ -68,7 +68,7 @@ default OMClientResponse handleWriteRequest(OMRequest omRequest, TermIndex termI } /** - * Implementation of {@link #handleWriteRequest}. + * Implementation of {@link #handleWriteRequest(OMRequest, TermIndex, OzoneManagerDoubleBuffer)}. * * @param omRequest the write request * @param termIndex - ratis transaction term and index diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestTrashService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestTrashService.java new file mode 100644 index 00000000000..4f0c15f15e5 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestTrashService.java @@ -0,0 +1,137 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.hadoop.ozone.om; + + +import static org.junit.jupiter.api.Assertions.assertTrue; +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.server.ServerUtils; +import org.apache.hadoop.hdds.utils.db.DBConfigFromFile; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.helpers.OpenKeySession; +import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.ratis.util.ExitUtils; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Collections; + +/** + * Test Key Trash Service. + *

    + * This test does the things including: + * 1. UTs for list trash. + * 2. UTs for recover trash. + * 3. UTs for empty trash. + *

    + */ +public class TestTrashService { + + @TempDir + private Path tempFolder; + + private KeyManager keyManager; + private OzoneManagerProtocol writeClient; + private OzoneManager om; + private String volumeName; + private String bucketName; + + @BeforeEach + void setup() throws Exception { + ExitUtils.disableSystemExit(); + OzoneConfiguration configuration = new OzoneConfiguration(); + + File folder = tempFolder.toFile(); + if (!folder.exists()) { + assertTrue(folder.mkdirs()); + } + System.setProperty(DBConfigFromFile.CONFIG_DIR, "/"); + ServerUtils.setOzoneMetaDirPath(configuration, folder.toString()); + + OmTestManagers omTestManagers + = new OmTestManagers(configuration); + keyManager = omTestManagers.getKeyManager(); + writeClient = omTestManagers.getWriteClient(); + om = omTestManagers.getOzoneManager(); + volumeName = "volume"; + bucketName = "bucket"; + } + + @AfterEach + public void cleanup() throws Exception { + om.stop(); + } + + @Test + public void testRecoverTrash() throws IOException { + String keyName = "testKey"; + String destinationBucket = "destBucket"; + createAndDeleteKey(keyName); + + boolean recoverOperation = keyManager.getMetadataManager() + .recoverTrash(volumeName, bucketName, keyName, destinationBucket); + assertTrue(recoverOperation); + } + + private void createAndDeleteKey(String keyName) throws IOException { + + OMRequestTestUtils.addVolumeToOM(keyManager.getMetadataManager(), + OmVolumeArgs.newBuilder() + .setOwnerName("owner") + .setAdminName("admin") + .setVolume(volumeName) + .build()); + + OMRequestTestUtils.addBucketToOM(keyManager.getMetadataManager(), + OmBucketInfo.newBuilder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .build()); + + OmKeyArgs keyArgs = new OmKeyArgs.Builder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setKeyName(keyName) + .setAcls(Collections.emptyList()) + .setLocationInfoList(new ArrayList<>()) + .setReplicationConfig(StandaloneReplicationConfig + .getInstance(HddsProtos.ReplicationFactor.ONE)) + .setOwnerName(UserGroupInformation.getCurrentUser().getShortUserName()) + .build(); + + /* Create and delete key in the Key Manager. */ + OpenKeySession session = writeClient.openKey(keyArgs); + writeClient.commitKey(keyArgs, session.getId()); + writeClient.deleteKey(keyArgs); + } + +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBuffer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBuffer.java index 6e24c9ff93f..125c9efcaf2 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBuffer.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBuffer.java @@ -44,9 +44,9 @@ import org.apache.hadoop.ozone.om.response.bucket.OMBucketCreateResponse; import org.apache.hadoop.ozone.om.response.key.OMKeyCreateResponse; import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotCreateResponse; -import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotPurgeResponse; import org.apache.hadoop.ozone.om.s3.S3SecretCacheProvider; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateSnapshotResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authentication.util.KerberosName; @@ -81,12 +81,12 @@ class TestOzoneManagerDoubleBuffer { private OzoneManagerDoubleBuffer doubleBuffer; private OzoneManager ozoneManager; private S3SecretLockedManager secretManager; + private final CreateSnapshotResponse snapshotResponse1 = mock(CreateSnapshotResponse.class); + private final CreateSnapshotResponse snapshotResponse2 = mock(CreateSnapshotResponse.class); private final OMResponse omKeyResponse = mock(OMResponse.class); private final OMResponse omBucketResponse = mock(OMResponse.class); private final OMResponse omSnapshotResponse1 = mock(OMResponse.class); private final OMResponse omSnapshotResponse2 = mock(OMResponse.class); - private final OMResponse omSnapshotPurgeResponseProto1 = mock(OMResponse.class); - private final OMResponse omSnapshotPurgeResponseProto2 = mock(OMResponse.class); private static OMClientResponse omKeyCreateResponse = mock(OMKeyCreateResponse.class); private static OMClientResponse omBucketCreateResponse = @@ -95,9 +95,6 @@ class TestOzoneManagerDoubleBuffer { mock(OMSnapshotCreateResponse.class); private static OMClientResponse omSnapshotCreateResponse2 = mock(OMSnapshotCreateResponse.class); - private static OMClientResponse omSnapshotPurgeResponse1 = mock(OMSnapshotPurgeResponse.class); - private static OMClientResponse omSnapshotPurgeResponse2 = mock(OMSnapshotPurgeResponse.class); - @TempDir private File tempDir; private OzoneManagerDoubleBuffer.FlushNotifier flushNotifier; @@ -146,22 +143,19 @@ public void setup() throws IOException { doNothing().when(omBucketCreateResponse).checkAndUpdateDB(any(), any()); doNothing().when(omSnapshotCreateResponse1).checkAndUpdateDB(any(), any()); doNothing().when(omSnapshotCreateResponse2).checkAndUpdateDB(any(), any()); - doNothing().when(omSnapshotPurgeResponse1).checkAndUpdateDB(any(), any()); - doNothing().when(omSnapshotPurgeResponse2).checkAndUpdateDB(any(), any()); when(omKeyResponse.getTraceID()).thenReturn("keyTraceId"); when(omBucketResponse.getTraceID()).thenReturn("bucketTraceId"); when(omSnapshotResponse1.getTraceID()).thenReturn("snapshotTraceId-1"); when(omSnapshotResponse2.getTraceID()).thenReturn("snapshotTraceId-2"); - when(omSnapshotPurgeResponseProto1.getTraceID()).thenReturn("snapshotPurgeTraceId-1"); - when(omSnapshotPurgeResponseProto2.getTraceID()).thenReturn("snapshotPurgeTraceId-2"); - - when(omKeyResponse.getCmdType()).thenReturn(OzoneManagerProtocolProtos.Type.CreateKey); - when(omBucketResponse.getCmdType()).thenReturn(OzoneManagerProtocolProtos.Type.CreateBucket); - when(omSnapshotPurgeResponseProto1.getCmdType()).thenReturn(OzoneManagerProtocolProtos.Type.SnapshotPurge); - when(omSnapshotPurgeResponseProto2.getCmdType()).thenReturn(OzoneManagerProtocolProtos.Type.SnapshotPurge); - when(omSnapshotResponse1.getCmdType()).thenReturn(OzoneManagerProtocolProtos.Type.SnapshotPurge); - when(omSnapshotResponse2.getCmdType()).thenReturn(OzoneManagerProtocolProtos.Type.SnapshotPurge); + when(omSnapshotResponse1.hasCreateSnapshotResponse()) + .thenReturn(true); + when(omSnapshotResponse2.hasCreateSnapshotResponse()) + .thenReturn(true); + when(omSnapshotResponse1.getCreateSnapshotResponse()) + .thenReturn(snapshotResponse1); + when(omSnapshotResponse2.getCreateSnapshotResponse()) + .thenReturn(snapshotResponse2); when(omKeyCreateResponse.getOMResponse()).thenReturn(omKeyResponse); when(omBucketCreateResponse.getOMResponse()).thenReturn(omBucketResponse); @@ -169,10 +163,6 @@ public void setup() throws IOException { .thenReturn(omSnapshotResponse1); when(omSnapshotCreateResponse2.getOMResponse()) .thenReturn(omSnapshotResponse2); - when(omSnapshotPurgeResponse1.getOMResponse()) - .thenReturn(omSnapshotPurgeResponseProto1); - when(omSnapshotPurgeResponse2.getOMResponse()) - .thenReturn(omSnapshotPurgeResponseProto2); } @AfterEach @@ -204,35 +194,8 @@ private static Stream doubleBufferFlushCases() { omSnapshotCreateResponse1, omSnapshotCreateResponse2, omBucketCreateResponse), - 4L, 4L, 14L, 16L, 1L, 1.142F), - Arguments.of(Arrays.asList(omSnapshotPurgeResponse1, - omSnapshotPurgeResponse2), - 2L, 2L, 16L, 18L, 1L, 1.125F), - Arguments.of(Arrays.asList(omKeyCreateResponse, - omBucketCreateResponse, - omSnapshotPurgeResponse1, - omSnapshotPurgeResponse2), - 3L, 4L, 19L, 22L, 2L, 1.157F), - Arguments.of(Arrays.asList(omKeyCreateResponse, - omSnapshotPurgeResponse1, - omBucketCreateResponse, - omSnapshotPurgeResponse2), - 4L, 4L, 23L, 26L, 1L, 1.1300F), - Arguments.of(Arrays.asList(omKeyCreateResponse, - omSnapshotPurgeResponse1, - omSnapshotPurgeResponse2, - omBucketCreateResponse), - 4L, 4L, 27L, 30L, 1L, 1.111F), - Arguments.of(Arrays.asList(omKeyCreateResponse, - omBucketCreateResponse, - omSnapshotPurgeResponse1, - omSnapshotCreateResponse1, - omSnapshotPurgeResponse2, - omBucketCreateResponse, - omSnapshotCreateResponse2), - 6L, 7L, 33L, 37L, 2L, 1.121F) - - ); + 4L, 4L, 14L, 16L, 1L, 1.142F) + ); } /** diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java index eff23a18e6e..c807c04688d 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java @@ -30,11 +30,9 @@ import java.util.List; import java.util.Map; import java.util.UUID; -import java.util.stream.Collectors; import javax.xml.bind.DatatypeConverter; import org.apache.commons.lang3.RandomStringUtils; -import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.client.BlockID; @@ -42,7 +40,6 @@ import org.apache.hadoop.hdds.client.ReplicationConfigValidator; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.KeyValue; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.ozone.ClientVersion; @@ -112,7 +109,6 @@ import org.apache.hadoop.util.Time; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; -import org.apache.logging.log4j.util.Strings; import static org.mockito.Mockito.any; import static org.mockito.Mockito.doCallRealMethod; @@ -753,17 +749,17 @@ public static OMRequest.Builder newCreateBucketRequest( .setClientId(UUID.randomUUID().toString()); } - public static List< KeyValue> getMetadataList() { - List metadataList = new ArrayList<>(); - metadataList.add(KeyValue.newBuilder().setKey("key1").setValue( + public static List< HddsProtos.KeyValue> getMetadataList() { + List metadataList = new ArrayList<>(); + metadataList.add(HddsProtos.KeyValue.newBuilder().setKey("key1").setValue( "value1").build()); - metadataList.add(KeyValue.newBuilder().setKey("key2").setValue( + metadataList.add(HddsProtos.KeyValue.newBuilder().setKey("key2").setValue( "value2").build()); return metadataList; } - public static KeyValue fsoMetadata() { - return KeyValue.newBuilder() + public static HddsProtos.KeyValue fsoMetadata() { + return HddsProtos.KeyValue.newBuilder() .setKey(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS) .setValue(Boolean.FALSE.toString()) .build(); @@ -1054,7 +1050,7 @@ public static OMRequest createCommitPartMPURequest(String volumeName, .setMultipartNumber(partNumber) .setMultipartUploadID(multipartUploadID) .addAllKeyLocations(new ArrayList<>()) - .addMetadata(KeyValue.newBuilder() + .addMetadata(HddsProtos.KeyValue.newBuilder() .setKey(OzoneConsts.ETAG) .setValue(DatatypeConverter.printHexBinary( new DigestInputStream( @@ -1325,69 +1321,6 @@ public static OMRequest createSnapshotRequest(String volumeName, .build(); } - public static OMRequest moveSnapshotTableKeyRequest(UUID snapshotId, - List>> deletedKeys, - List>> deletedDirs, - List> renameKeys) { - List deletedMoveKeys = new ArrayList<>(); - for (Pair> deletedKey : deletedKeys) { - OzoneManagerProtocolProtos.SnapshotMoveKeyInfos snapshotMoveKeyInfos = - OzoneManagerProtocolProtos.SnapshotMoveKeyInfos.newBuilder() - .setKey(deletedKey.getKey()) - .addAllKeyInfos( - deletedKey.getValue().stream() - .map(omKeyInfo -> omKeyInfo.getProtobuf(ClientVersion.CURRENT_VERSION)).collect(Collectors.toList())) - .build(); - deletedMoveKeys.add(snapshotMoveKeyInfos); - } - - List deletedDirMoveKeys = new ArrayList<>(); - for (Pair> deletedKey : deletedDirs) { - OzoneManagerProtocolProtos.SnapshotMoveKeyInfos snapshotMoveKeyInfos = - OzoneManagerProtocolProtos.SnapshotMoveKeyInfos.newBuilder() - .setKey(deletedKey.getKey()) - .addAllKeyInfos( - deletedKey.getValue().stream() - .map(omKeyInfo -> omKeyInfo.getProtobuf(ClientVersion.CURRENT_VERSION)) - .collect(Collectors.toList())) - .build(); - deletedDirMoveKeys.add(snapshotMoveKeyInfos); - } - - List renameKeyList = new ArrayList<>(); - for (Pair renameKey : renameKeys) { - KeyValue.Builder keyValue = KeyValue.newBuilder(); - keyValue.setKey(renameKey.getKey()); - if (!Strings.isBlank(renameKey.getValue())) { - keyValue.setValue(renameKey.getValue()); - } - renameKeyList.add(keyValue.build()); - } - - - OzoneManagerProtocolProtos.SnapshotMoveTableKeysRequest snapshotMoveTableKeysRequest = - OzoneManagerProtocolProtos.SnapshotMoveTableKeysRequest.newBuilder() - .setFromSnapshotID(HddsUtils.toProtobuf(snapshotId)) - .addAllDeletedKeys(deletedMoveKeys) - .addAllDeletedDirs(deletedDirMoveKeys) - .addAllRenamedKeys(renameKeyList) - .build(); - - OzoneManagerProtocolProtos.UserInfo userInfo = - OzoneManagerProtocolProtos.UserInfo.newBuilder() - .setUserName("user") - .setHostName("host") - .setRemoteAddress("remote-address") - .build(); - - return OMRequest.newBuilder() - .setSnapshotMoveTableKeysRequest(snapshotMoveTableKeysRequest) - .setCmdType(Type.SnapshotMoveTableKeys) - .setClientId(UUID.randomUUID().toString()) - .setUserInfo(userInfo) - .build(); - } - /** * Create OMRequest for Rename Snapshot. * diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java index 9eb8738b9d4..cbb782e184f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java @@ -26,23 +26,18 @@ import java.util.List; import java.util.UUID; import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.ClientVersion; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OmSnapshot; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; -import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.key.OMDirectoriesPurgeResponseWithFSO; import org.apache.hadoop.ozone.om.response.key.OMKeyPurgeResponse; -import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import jakarta.annotation.Nonnull; @@ -114,7 +109,7 @@ private void updateBlockInfo(OmKeyInfo omKeyInfo) throws IOException { * Create OMRequest which encapsulates DeleteKeyRequest. * @return OMRequest */ - private OMRequest createPurgeKeysRequest(String fromSnapshot, String purgeDeletedDir, + private OMRequest createPurgeKeysRequest(String purgeDeletedDir, List keyList, OmBucketInfo bucketInfo) throws IOException { List purgePathRequestList = new ArrayList<>(); @@ -132,9 +127,7 @@ private OMRequest createPurgeKeysRequest(String fromSnapshot, String purgeDelete OzoneManagerProtocolProtos.PurgeDirectoriesRequest.Builder purgeDirRequest = OzoneManagerProtocolProtos.PurgeDirectoriesRequest.newBuilder(); purgeDirRequest.addAllDeletedPath(purgePathRequestList); - if (fromSnapshot != null) { - purgeDirRequest.setSnapshotTableKey(fromSnapshot); - } + OzoneManagerProtocolProtos.OMRequest omRequest = OzoneManagerProtocolProtos.OMRequest.newBuilder() .setCmdType(OzoneManagerProtocolProtos.Type.PurgeDirectories) @@ -145,7 +138,8 @@ private OMRequest createPurgeKeysRequest(String fromSnapshot, String purgeDelete } private OzoneManagerProtocolProtos.PurgePathRequest wrapPurgeRequest( final long volumeId, final long bucketId, final String purgeDeletedDir, - final List purgeDeletedFiles, final List markDirsAsDeleted) { + final List purgeDeletedFiles, + final List markDirsAsDeleted) { // Put all keys to be purged in a list OzoneManagerProtocolProtos.PurgePathRequest.Builder purgePathsRequest = OzoneManagerProtocolProtos.PurgePathRequest.newBuilder(); @@ -188,13 +182,13 @@ public void testValidateAndUpdateCacheCheckQuota() throws Exception { // Create and Delete keys. The keys should be moved to DeletedKeys table List deletedKeyInfos = createAndDeleteKeys(1, null); // The keys should be present in the DeletedKeys table before purging - List deletedKeyNames = validateDeletedKeysTable(omMetadataManager, deletedKeyInfos, true); + List deletedKeyNames = validateDeletedKeysTable(deletedKeyInfos); // Create PurgeKeysRequest to purge the deleted keys String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); OmBucketInfo omBucketInfo = omMetadataManager.getBucketTable().get( bucketKey); - OMRequest omRequest = createPurgeKeysRequest(null, + OMRequest omRequest = createPurgeKeysRequest( null, deletedKeyInfos, omBucketInfo); OMRequest preExecutedRequest = preExecute(omRequest); OMDirectoriesPurgeRequestWithFSO omKeyPurgeRequest = @@ -211,59 +205,7 @@ public void testValidateAndUpdateCacheCheckQuota() throws Exception { performBatchOperationCommit(omClientResponse); // The keys should exist in the DeletedKeys table after dir delete - validateDeletedKeys(omMetadataManager, deletedKeyNames); - } - - @Test - public void testValidateAndUpdateCacheSnapshotLastTransactionInfoUpdated() throws Exception { - // Create and Delete keys. The keys should be moved to DeletedKeys table - List deletedKeyInfos = createAndDeleteKeys(1, null); - // The keys should be present in the DeletedKeys table before purging - List deletedKeyNames = validateDeletedKeysTable(omMetadataManager, deletedKeyInfos, true); - - String snapshotName = "snap1"; - SnapshotInfo snapshotInfo = createSnapshot(snapshotName); - ReferenceCounted rcOmSnapshot = ozoneManager.getOmSnapshotManager() - .getSnapshot(snapshotInfo.getVolumeName(), snapshotInfo.getBucketName(), snapshotInfo.getName()); - // Keys should be present in snapshot - validateDeletedKeysTable(rcOmSnapshot.get().getMetadataManager(), deletedKeyInfos, true); - // keys should have been moved from AOS - validateDeletedKeysTable(omMetadataManager, deletedKeyInfos, false); - - // Create PurgeKeysRequest to purge the deleted keys - assertEquals(snapshotInfo.getLastTransactionInfo(), - TransactionInfo.valueOf(TransactionInfo.getTermIndex(1L)).toByteString()); - String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); - OmBucketInfo omBucketInfo = omMetadataManager.getBucketTable().get( - bucketKey); - OMRequest omRequest = createPurgeKeysRequest(snapshotInfo.getTableKey(), - null, deletedKeyInfos, omBucketInfo); - OMRequest preExecutedRequest = preExecute(omRequest); - OMDirectoriesPurgeRequestWithFSO omKeyPurgeRequest = - new OMDirectoriesPurgeRequestWithFSO(preExecutedRequest); - - assertEquals(1000L * deletedKeyNames.size(), omBucketInfo.getUsedBytes()); - OMDirectoriesPurgeResponseWithFSO omClientResponse - = (OMDirectoriesPurgeResponseWithFSO) omKeyPurgeRequest - .validateAndUpdateCache(ozoneManager, 100L); - - SnapshotInfo snapshotInfoOnDisk = omMetadataManager.getSnapshotInfoTable().getSkipCache(snapshotInfo.getTableKey()); - SnapshotInfo updatedSnapshotInfo = omMetadataManager.getSnapshotInfoTable().get(snapshotInfo.getTableKey()); - - assertEquals(snapshotInfoOnDisk, snapshotInfo); - snapshotInfo.setLastTransactionInfo(TransactionInfo.valueOf(TransactionInfo.getTermIndex(100L)) - .toByteString()); - assertEquals(snapshotInfo, updatedSnapshotInfo); - omBucketInfo = omMetadataManager.getBucketTable().get(bucketKey); - assertEquals(0L * deletedKeyNames.size(), omBucketInfo.getUsedBytes()); - - performBatchOperationCommit(omClientResponse); - - // The keys should exist in the DeletedKeys table after dir delete - validateDeletedKeys(rcOmSnapshot.get().getMetadataManager(), deletedKeyNames); - snapshotInfoOnDisk = omMetadataManager.getSnapshotInfoTable().getSkipCache(snapshotInfo.getTableKey()); - assertEquals(snapshotInfo, snapshotInfoOnDisk); - rcOmSnapshot.close(); + validateDeletedKeys(deletedKeyNames); } @Test @@ -272,13 +214,13 @@ public void testValidateAndUpdateCacheQuotaBucketRecreated() // Create and Delete keys. The keys should be moved to DeletedKeys table List deletedKeyInfos = createAndDeleteKeys(1, null); // The keys should be present in the DeletedKeys table before purging - List deletedKeyNames = validateDeletedKeysTable(omMetadataManager, deletedKeyInfos, true); + List deletedKeyNames = validateDeletedKeysTable(deletedKeyInfos); // Create PurgeKeysRequest to purge the deleted keys String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); OmBucketInfo omBucketInfo = omMetadataManager.getBucketTable().get( bucketKey); - OMRequest omRequest = createPurgeKeysRequest(null, + OMRequest omRequest = createPurgeKeysRequest( null, deletedKeyInfos, omBucketInfo); OMRequest preExecutedRequest = preExecute(omRequest); OMDirectoriesPurgeRequestWithFSO omKeyPurgeRequest = @@ -316,32 +258,35 @@ public void testValidateAndUpdateCacheQuotaBucketRecreated() performBatchOperationCommit(omClientResponse); // The keys should exist in the DeletedKeys table after dir delete - validateDeletedKeys(omMetadataManager, deletedKeyNames); + validateDeletedKeys(deletedKeyNames); } - private void performBatchOperationCommit(OMDirectoriesPurgeResponseWithFSO omClientResponse) throws IOException { + private void performBatchOperationCommit( + OMDirectoriesPurgeResponseWithFSO omClientResponse) throws IOException { try (BatchOperation batchOperation = omMetadataManager.getStore().initBatchOperation()) { + omClientResponse.addToDBBatch(omMetadataManager, batchOperation); + // Do manual commit and see whether addToBatch is successful or not. omMetadataManager.getStore().commitBatchOperation(batchOperation); } } @Nonnull - private List validateDeletedKeysTable(OMMetadataManager omMetadataManager, - List deletedKeyInfos, boolean keyExists) throws IOException { + private List validateDeletedKeysTable( + List deletedKeyInfos) throws IOException { List deletedKeyNames = new ArrayList<>(); for (OmKeyInfo deletedKey : deletedKeyInfos) { String keyName = omMetadataManager.getOzoneKey(deletedKey.getVolumeName(), deletedKey.getBucketName(), deletedKey.getKeyName()); - assertEquals(omMetadataManager.getDeletedTable().isExist(keyName), keyExists); + assertTrue(omMetadataManager.getDeletedTable().isExist(keyName)); deletedKeyNames.add(keyName); } return deletedKeyNames; } - private void validateDeletedKeys(OMMetadataManager omMetadataManager, + private void validateDeletedKeys( List deletedKeyNames) throws IOException { for (String deletedKey : deletedKeyNames) { assertTrue(omMetadataManager.getDeletedTable().isExist( diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java index 1fc0cb6ebad..13f0191b29a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java @@ -56,8 +56,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyLocation; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.CsvSource; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK; @@ -377,19 +375,10 @@ public void testValidateAndUpdateCacheWithUncommittedBlocks() } - /** - * In these scenarios below, OM should reject key commit with HSync requested from a client: - * 1. ozone.hbase.enhancements.allowed = false, ozone.fs.hsync.enabled = false - * 2. ozone.hbase.enhancements.allowed = false, ozone.fs.hsync.enabled = true - * 3. ozone.hbase.enhancements.allowed = true, ozone.fs.hsync.enabled = false - */ - @ParameterizedTest - @CsvSource({"false,false", "false,true", "true,false"}) - public void testRejectHsyncIfNotEnabled(boolean hbaseEnhancementsEnabled, boolean fsHsyncEnabled) throws Exception { + @Test + public void testRejectHsyncIfNotEnabled() throws Exception { OzoneConfiguration conf = ozoneManager.getConfiguration(); - conf.setBoolean(OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, hbaseEnhancementsEnabled); - conf.setBoolean("ozone.client.hbase.enhancements.allowed", true); - conf.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, fsHsyncEnabled); + conf.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, false); BucketLayout bucketLayout = getBucketLayout(); OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, @@ -403,9 +392,6 @@ public void testRejectHsyncIfNotEnabled(boolean hbaseEnhancementsEnabled, boolea // Regular key commit should still work doKeyCommit(false, allocatedKeyLocationList.subList(0, 5)); - // Restore config after this test run - conf.setBoolean(OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, true); - conf.setBoolean("ozone.client.hbase.enhancements.allowed", true); conf.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java index c323fecd501..a912f549b3c 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java @@ -23,10 +23,12 @@ import java.util.List; import java.util.UUID; -import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.ozone.om.OmSnapshot; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotCreateRequest; +import org.apache.hadoop.ozone.om.request.snapshot.TestOMSnapshotCreateRequest; +import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotCreateResponse; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; import org.junit.jupiter.api.Test; @@ -40,10 +42,12 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; import org.apache.hadoop.hdds.utils.db.BatchOperation; -import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.when; /** * Tests {@link OMKeyPurgeRequest} and {@link OMKeyPurgeResponse}. @@ -111,6 +115,35 @@ private OMRequest createPurgeKeysRequest(List deletedKeys, .build(); } + /** + * Create snapshot and checkpoint directory. + */ + private SnapshotInfo createSnapshot(String snapshotName) throws Exception { + when(ozoneManager.isAdmin(any())).thenReturn(true); + BatchOperation batchOperation = omMetadataManager.getStore() + .initBatchOperation(); + OMRequest omRequest = OMRequestTestUtils + .createSnapshotRequest(volumeName, bucketName, snapshotName); + // Pre-Execute OMSnapshotCreateRequest. + OMSnapshotCreateRequest omSnapshotCreateRequest = + TestOMSnapshotCreateRequest.doPreExecute(omRequest, ozoneManager); + + // validateAndUpdateCache OMSnapshotCreateResponse. + OMSnapshotCreateResponse omClientResponse = (OMSnapshotCreateResponse) + omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 1L); + // Add to batch and commit to DB. + omClientResponse.addToDBBatch(omMetadataManager, batchOperation); + omMetadataManager.getStore().commitBatchOperation(batchOperation); + batchOperation.close(); + + String key = SnapshotInfo.getTableKey(volumeName, + bucketName, snapshotName); + SnapshotInfo snapshotInfo = + omMetadataManager.getSnapshotInfoTable().get(key); + assertNotNull(snapshotInfo); + return snapshotInfo; + } + private OMRequest preExecute(OMRequest originalOmRequest) throws IOException { OMKeyPurgeRequest omKeyPurgeRequest = new OMKeyPurgeRequest(originalOmRequest); @@ -172,15 +205,22 @@ public void testKeyPurgeInSnapshot() throws Exception { List deletedKeyNames = createAndDeleteKeys(1, null); SnapshotInfo snapInfo = createSnapshot("snap1"); - assertEquals(snapInfo.getLastTransactionInfo(), - TransactionInfo.valueOf(TransactionInfo.getTermIndex(1L)).toByteString()); // The keys should be not present in the active Db's deletedTable for (String deletedKey : deletedKeyNames) { assertFalse(omMetadataManager.getDeletedTable().isExist(deletedKey)); } - ReferenceCounted rcOmSnapshot = ozoneManager.getOmSnapshotManager() - .getSnapshot(snapInfo.getVolumeName(), snapInfo.getBucketName(), snapInfo.getName()); + SnapshotInfo fromSnapshotInfo = new SnapshotInfo.Builder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setName("snap1") + .build(); + + ReferenceCounted rcOmSnapshot = + ozoneManager.getOmSnapshotManager().getSnapshot( + fromSnapshotInfo.getVolumeName(), + fromSnapshotInfo.getBucketName(), + fromSnapshotInfo.getName()); OmSnapshot omSnapshot = rcOmSnapshot.get(); // The keys should be present in the snapshot's deletedTable @@ -199,12 +239,6 @@ public void testKeyPurgeInSnapshot() throws Exception { omKeyPurgeRequest.validateAndUpdateCache(ozoneManager, 100L); - SnapshotInfo snapshotInfoOnDisk = omMetadataManager.getSnapshotInfoTable().getSkipCache(snapInfo.getTableKey()); - SnapshotInfo updatedSnapshotInfo = omMetadataManager.getSnapshotInfoTable().get(snapInfo.getTableKey()); - assertEquals(snapshotInfoOnDisk, snapInfo); - snapInfo.setLastTransactionInfo(TransactionInfo.valueOf(TransactionInfo.getTermIndex(100L)) - .toByteString()); - assertEquals(snapInfo, updatedSnapshotInfo); OMResponse omResponse = OMResponse.newBuilder() .setPurgeKeysResponse(PurgeKeysResponse.getDefaultInstance()) .setCmdType(Type.PurgeKeys) @@ -214,14 +248,14 @@ public void testKeyPurgeInSnapshot() throws Exception { try (BatchOperation batchOperation = omMetadataManager.getStore().initBatchOperation()) { - OMKeyPurgeResponse omKeyPurgeResponse = new OMKeyPurgeResponse(omResponse, deletedKeyNames, snapInfo, null); + OMKeyPurgeResponse omKeyPurgeResponse = new OMKeyPurgeResponse( + omResponse, deletedKeyNames, fromSnapshotInfo, null); omKeyPurgeResponse.addToDBBatch(omMetadataManager, batchOperation); // Do manual commit and see whether addToBatch is successful or not. omMetadataManager.getStore().commitBatchOperation(batchOperation); } - snapshotInfoOnDisk = omMetadataManager.getSnapshotInfoTable().getSkipCache(snapInfo.getTableKey()); - assertEquals(snapshotInfoOnDisk, snapInfo); + // The keys should not exist in the DeletedKeys table for (String deletedKey : deletedKeyNames) { assertFalse(omSnapshot.getMetadataManager() diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java index e2219d5fcc1..c1b0e45e6d6 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java @@ -31,7 +31,6 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; -import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.om.IOmMetadataReader; import org.apache.hadoop.ozone.om.OMPerformanceMetrics; @@ -44,15 +43,9 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.request.OMClientRequest; -import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; -import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotCreateRequest; -import org.apache.hadoop.ozone.om.request.snapshot.TestOMSnapshotCreateRequest; -import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotCreateResponse; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; import org.apache.hadoop.ozone.security.acl.OzoneNativeAuthorizer; import org.apache.hadoop.security.UserGroupInformation; @@ -139,7 +132,6 @@ public void setup() throws Exception { folder.toAbsolutePath().toString()); ozoneConfiguration.set(OzoneConfigKeys.OZONE_METADATA_DIRS, folder.toAbsolutePath().toString()); - ozoneConfiguration.setBoolean(OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, true); ozoneConfiguration.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true); omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, ozoneManager); @@ -244,7 +236,7 @@ public void setup() throws Exception { .thenReturn(bucket); when(ozoneManager.resolveBucketLink(any(Pair.class))) .thenReturn(bucket); - OmSnapshotManager omSnapshotManager = Mockito.spy(new OmSnapshotManager(ozoneManager)); + OmSnapshotManager omSnapshotManager = new OmSnapshotManager(ozoneManager); when(ozoneManager.getOmSnapshotManager()) .thenReturn(omSnapshotManager); @@ -292,34 +284,4 @@ public void stop() { omMetrics.unRegister(); framework().clearInlineMocks(); } - - /** - * Create snapshot and checkpoint directory. - */ - protected SnapshotInfo createSnapshot(String snapshotName) throws Exception { - when(ozoneManager.isAdmin(any())).thenReturn(true); - BatchOperation batchOperation = omMetadataManager.getStore() - .initBatchOperation(); - OzoneManagerProtocolProtos.OMRequest omRequest = OMRequestTestUtils - .createSnapshotRequest(volumeName, bucketName, snapshotName); - // Pre-Execute OMSnapshotCreateRequest. - OMSnapshotCreateRequest omSnapshotCreateRequest = - TestOMSnapshotCreateRequest.doPreExecute(omRequest, ozoneManager); - - // validateAndUpdateCache OMSnapshotCreateResponse. - OMSnapshotCreateResponse omClientResponse = (OMSnapshotCreateResponse) - omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 1L); - // Add to batch and commit to DB. - omClientResponse.addToDBBatch(omMetadataManager, batchOperation); - omMetadataManager.getStore().commitBatchOperation(batchOperation); - batchOperation.close(); - - String key = SnapshotInfo.getTableKey(volumeName, - bucketName, snapshotName); - SnapshotInfo snapshotInfo = - omMetadataManager.getSnapshotInfoTable().get(key); - assertNotNull(snapshotInfo); - return snapshotInfo; - } - } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java index af904382256..3997f39d7bd 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java @@ -19,8 +19,14 @@ package org.apache.hadoop.ozone.om.request.snapshot; import org.apache.hadoop.hdds.client.RatisReplicationConfig; -import org.apache.hadoop.hdds.utils.TransactionInfo; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.audit.AuditLogger; +import org.apache.hadoop.ozone.audit.AuditMessage; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; @@ -31,15 +37,18 @@ import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.om.response.key.OMKeyRenameResponse; import org.apache.hadoop.ozone.om.response.key.OMKeyRenameResponseWithFSO; -import org.apache.hadoop.ozone.om.snapshot.TestSnapshotRequestAndResponse; +import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; +import java.io.File; import java.io.IOException; import java.util.UUID; @@ -55,19 +64,69 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.framework; +import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; /** * Tests OMSnapshotCreateRequest class, which handles CreateSnapshot request. */ -public class TestOMSnapshotCreateRequest extends TestSnapshotRequestAndResponse { +public class TestOMSnapshotCreateRequest { + @TempDir + private File anotherTempDir; + + private OzoneManager ozoneManager; + private OMMetrics omMetrics; + private OmMetadataManagerImpl omMetadataManager; + private BatchOperation batchOperation; + + private String volumeName; + private String bucketName; private String snapshotName1; private String snapshotName2; @BeforeEach public void setup() throws Exception { + ozoneManager = mock(OzoneManager.class); + omMetrics = OMMetrics.create(); + OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); + ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, + anotherTempDir.getAbsolutePath()); + omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, + ozoneManager); + when(ozoneManager.getMetrics()).thenReturn(omMetrics); + when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); + when(ozoneManager.isRatisEnabled()).thenReturn(true); + when(ozoneManager.isFilesystemSnapshotEnabled()).thenReturn(true); + when(ozoneManager.isAdmin(any())).thenReturn(false); + when(ozoneManager.isOwner(any(), any())).thenReturn(false); + when(ozoneManager.getBucketOwner(any(), any(), + any(), any())).thenReturn("dummyBucketOwner"); + OMLayoutVersionManager lvm = mock(OMLayoutVersionManager.class); + when(lvm.isAllowed(anyString())).thenReturn(true); + when(ozoneManager.getVersionManager()).thenReturn(lvm); + AuditLogger auditLogger = mock(AuditLogger.class); + when(ozoneManager.getAuditLogger()).thenReturn(auditLogger); + doNothing().when(auditLogger).logWrite(any(AuditMessage.class)); + batchOperation = omMetadataManager.getStore().initBatchOperation(); + + volumeName = UUID.randomUUID().toString(); + bucketName = UUID.randomUUID().toString(); snapshotName1 = UUID.randomUUID().toString(); snapshotName2 = UUID.randomUUID().toString(); + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + omMetadataManager); + } + + @AfterEach + public void stop() { + omMetrics.unRegister(); + framework().clearInlineMocks(); + if (batchOperation != null) { + batchOperation.close(); + } } @ValueSource(strings = { @@ -80,9 +139,9 @@ public void setup() throws Exception { }) @ParameterizedTest public void testPreExecute(String snapshotName) throws Exception { - when(getOzoneManager().isOwner(any(), any())).thenReturn(true); - OMRequest omRequest = createSnapshotRequest(getVolumeName(), - getBucketName(), snapshotName); + when(ozoneManager.isOwner(any(), any())).thenReturn(true); + OMRequest omRequest = createSnapshotRequest(volumeName, + bucketName, snapshotName); doPreExecute(omRequest); } @@ -98,9 +157,9 @@ public void testPreExecute(String snapshotName) throws Exception { }) @ParameterizedTest public void testPreExecuteFailure(String snapshotName) { - when(getOzoneManager().isOwner(any(), any())).thenReturn(true); - OMRequest omRequest = createSnapshotRequest(getVolumeName(), - getBucketName(), snapshotName); + when(ozoneManager.isOwner(any(), any())).thenReturn(true); + OMRequest omRequest = createSnapshotRequest(volumeName, + bucketName, snapshotName); OMException omException = assertThrows(OMException.class, () -> doPreExecute(omRequest)); assertTrue(omException.getMessage() @@ -110,8 +169,8 @@ public void testPreExecuteFailure(String snapshotName) { @Test public void testPreExecuteBadOwner() { // Owner is not set for the request. - OMRequest omRequest = createSnapshotRequest(getVolumeName(), - getBucketName(), snapshotName1); + OMRequest omRequest = createSnapshotRequest(volumeName, + bucketName, snapshotName1); OMException omException = assertThrows(OMException.class, () -> doPreExecute(omRequest)); @@ -121,29 +180,29 @@ public void testPreExecuteBadOwner() { @Test public void testValidateAndUpdateCache() throws Exception { - when(getOzoneManager().isAdmin(any())).thenReturn(true); - OMRequest omRequest = createSnapshotRequest(getVolumeName(), - getBucketName(), snapshotName1); + when(ozoneManager.isAdmin(any())).thenReturn(true); + OMRequest omRequest = createSnapshotRequest(volumeName, + bucketName, snapshotName1); OMSnapshotCreateRequest omSnapshotCreateRequest = doPreExecute(omRequest); - String key = getTableKey(getVolumeName(), getBucketName(), snapshotName1); - String bucketKey = getOmMetadataManager().getBucketKey(getVolumeName(), getBucketName()); + String key = getTableKey(volumeName, bucketName, snapshotName1); + String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); // Add a 1000-byte key to the bucket OmKeyInfo key1 = addKey("key-testValidateAndUpdateCache", 12345L); addKeyToTable(key1); - OmBucketInfo omBucketInfo = getOmMetadataManager().getBucketTable().get( + OmBucketInfo omBucketInfo = omMetadataManager.getBucketTable().get( bucketKey); long bucketDataSize = key1.getDataSize(); long bucketUsedBytes = omBucketInfo.getUsedBytes(); assertEquals(key1.getReplicatedSize(), bucketUsedBytes); // Value in cache should be null as of now. - assertNull(getOmMetadataManager().getSnapshotInfoTable().get(key)); + assertNull(omMetadataManager.getSnapshotInfoTable().get(key)); // Run validateAndUpdateCache. OMClientResponse omClientResponse = - omSnapshotCreateRequest.validateAndUpdateCache(getOzoneManager(), 1); + omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 1); assertNotNull(omClientResponse.getOMResponse()); @@ -167,21 +226,20 @@ public void testValidateAndUpdateCache() throws Exception { // Get value from cache SnapshotInfo snapshotInfoInCache = - getOmMetadataManager().getSnapshotInfoTable().get(key); + omMetadataManager.getSnapshotInfoTable().get(key); assertNotNull(snapshotInfoInCache); assertEquals(snapshotInfoFromProto, snapshotInfoInCache); - assertEquals(snapshotInfoInCache.getLastTransactionInfo(), - TransactionInfo.valueOf(TransactionInfo.getTermIndex(1L)).toByteString()); - assertEquals(0, getOmMetrics().getNumSnapshotCreateFails()); - assertEquals(1, getOmMetrics().getNumSnapshotActive()); - assertEquals(1, getOmMetrics().getNumSnapshotCreates()); + + assertEquals(0, omMetrics.getNumSnapshotCreateFails()); + assertEquals(1, omMetrics.getNumSnapshotActive()); + assertEquals(1, omMetrics.getNumSnapshotCreates()); } @Test public void testEntryRenamedKeyTable() throws Exception { - when(getOzoneManager().isAdmin(any())).thenReturn(true); + when(ozoneManager.isAdmin(any())).thenReturn(true); Table snapshotRenamedTable = - getOmMetadataManager().getSnapshotRenamedTable(); + omMetadataManager.getSnapshotRenamedTable(); renameKey("key1", "key2", 0); renameDir("dir1", "dir2", 5); @@ -191,17 +249,17 @@ public void testEntryRenamedKeyTable() throws Exception { // Create snapshot createSnapshot(snapshotName1); - String snapKey = getTableKey(getVolumeName(), - getBucketName(), snapshotName1); + String snapKey = getTableKey(volumeName, + bucketName, snapshotName1); SnapshotInfo snapshotInfo = - getOmMetadataManager().getSnapshotInfoTable().get(snapKey); + omMetadataManager.getSnapshotInfoTable().get(snapKey); assertNotNull(snapshotInfo); renameKey("key3", "key4", 10); renameDir("dir3", "dir4", 15); // Rename table should have two entries as rename is within snapshot scope. - assertEquals(2, getOmMetadataManager() + assertEquals(2, omMetadataManager .countRowsInTable(snapshotRenamedTable)); // Create snapshot to clear snapshotRenamedTable @@ -211,33 +269,33 @@ public void testEntryRenamedKeyTable() throws Exception { @Test public void testEntryExists() throws Exception { - when(getOzoneManager().isAdmin(any())).thenReturn(true); + when(ozoneManager.isAdmin(any())).thenReturn(true); - String key = getTableKey(getVolumeName(), getBucketName(), snapshotName1); + String key = getTableKey(volumeName, bucketName, snapshotName1); OMRequest omRequest = - createSnapshotRequest(getVolumeName(), getBucketName(), snapshotName1); + createSnapshotRequest(volumeName, bucketName, snapshotName1); OMSnapshotCreateRequest omSnapshotCreateRequest = doPreExecute(omRequest); - assertNull(getOmMetadataManager().getSnapshotInfoTable().get(key)); - omSnapshotCreateRequest.validateAndUpdateCache(getOzoneManager(), 1); + assertNull(omMetadataManager.getSnapshotInfoTable().get(key)); + omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 1); - assertNotNull(getOmMetadataManager().getSnapshotInfoTable().get(key)); + assertNotNull(omMetadataManager.getSnapshotInfoTable().get(key)); // Now try to create again to verify error - omRequest = createSnapshotRequest(getVolumeName(), getBucketName(), snapshotName1); + omRequest = createSnapshotRequest(volumeName, bucketName, snapshotName1); omSnapshotCreateRequest = doPreExecute(omRequest); OMClientResponse omClientResponse = - omSnapshotCreateRequest.validateAndUpdateCache(getOzoneManager(), 2); + omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 2); OMResponse omResponse = omClientResponse.getOMResponse(); assertNotNull(omResponse.getCreateSnapshotResponse()); assertEquals(OzoneManagerProtocolProtos.Status.FILE_ALREADY_EXISTS, omResponse.getStatus()); - assertEquals(1, getOmMetrics().getNumSnapshotCreateFails()); - assertEquals(1, getOmMetrics().getNumSnapshotActive()); - assertEquals(2, getOmMetrics().getNumSnapshotCreates()); + assertEquals(1, omMetrics.getNumSnapshotCreateFails()); + assertEquals(1, omMetrics.getNumSnapshotActive()); + assertEquals(2, omMetrics.getNumSnapshotCreates()); } private void renameKey(String fromKey, String toKey, long offset) @@ -256,15 +314,15 @@ private void renameKey(String fromKey, String toKey, long offset) new OMKeyRenameResponse(omResponse, fromKeyInfo.getKeyName(), toKeyInfo.getKeyName(), toKeyInfo); - omKeyRenameResponse.addToDBBatch(getOmMetadataManager(), getBatchOperation()); - getOmMetadataManager().getStore().commitBatchOperation(getBatchOperation()); + omKeyRenameResponse.addToDBBatch(omMetadataManager, batchOperation); + omMetadataManager.getStore().commitBatchOperation(batchOperation); } private void renameDir(String fromKey, String toKey, long offset) throws Exception { String fromKeyParentName = UUID.randomUUID().toString(); - OmKeyInfo fromKeyParent = OMRequestTestUtils.createOmKeyInfo(getVolumeName(), - getBucketName(), fromKeyParentName, RatisReplicationConfig.getInstance(THREE)) + OmKeyInfo fromKeyParent = OMRequestTestUtils.createOmKeyInfo(volumeName, + bucketName, fromKeyParentName, RatisReplicationConfig.getInstance(THREE)) .setObjectID(100L) .build(); @@ -282,32 +340,32 @@ private void renameDir(String fromKey, String toKey, long offset) new OMKeyRenameResponseWithFSO(omResponse, getDBKeyName(fromKeyInfo), getDBKeyName(toKeyInfo), fromKeyParent, null, toKeyInfo, null, true, BucketLayout.FILE_SYSTEM_OPTIMIZED); - omKeyRenameResponse.addToDBBatch(getOmMetadataManager(), getBatchOperation()); - getOmMetadataManager().getStore().commitBatchOperation(getBatchOperation()); + omKeyRenameResponse.addToDBBatch(omMetadataManager, batchOperation); + omMetadataManager.getStore().commitBatchOperation(batchOperation); } protected String getDBKeyName(OmKeyInfo keyInfo) throws IOException { - return getOmMetadataManager().getOzonePathKey( - getOmMetadataManager().getVolumeId(getVolumeName()), - getOmMetadataManager().getBucketId(getVolumeName(), getBucketName()), + return omMetadataManager.getOzonePathKey( + omMetadataManager.getVolumeId(volumeName), + omMetadataManager.getBucketId(volumeName, bucketName), keyInfo.getParentObjectID(), keyInfo.getKeyName()); } private void createSnapshot(String snapName) throws Exception { OMRequest omRequest = createSnapshotRequest( - getVolumeName(), getBucketName(), snapName); + volumeName, bucketName, snapName); OMSnapshotCreateRequest omSnapshotCreateRequest = doPreExecute(omRequest); //create entry OMClientResponse omClientResponse = - omSnapshotCreateRequest.validateAndUpdateCache(getOzoneManager(), 1); - omClientResponse.checkAndUpdateDB(getOmMetadataManager(), getBatchOperation()); - getOmMetadataManager().getStore().commitBatchOperation(getBatchOperation()); + omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 1); + omClientResponse.checkAndUpdateDB(omMetadataManager, batchOperation); + omMetadataManager.getStore().commitBatchOperation(batchOperation); } private OMSnapshotCreateRequest doPreExecute( OMRequest originalRequest) throws Exception { - return doPreExecute(originalRequest, getOzoneManager()); + return doPreExecute(originalRequest, ozoneManager); } /** @@ -324,15 +382,15 @@ public static OMSnapshotCreateRequest doPreExecute( } private OmKeyInfo addKey(String keyName, long objectId) { - return OMRequestTestUtils.createOmKeyInfo(getVolumeName(), getBucketName(), keyName, + return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(THREE)).setObjectID(objectId) .build(); } protected String addKeyToTable(OmKeyInfo keyInfo) throws Exception { OMRequestTestUtils.addKeyToTable(false, true, keyInfo, 0, 0L, - getOmMetadataManager()); - return getOmMetadataManager().getOzoneKey(keyInfo.getVolumeName(), + omMetadataManager); + return omMetadataManager.getOzoneKey(keyInfo.getVolumeName(), keyInfo.getBucketName(), keyInfo.getKeyName()); } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotDeleteRequest.java index 4c5dc2e77f0..5a8bb5d7c0d 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotDeleteRequest.java @@ -19,21 +19,33 @@ package org.apache.hadoop.ozone.om.request.snapshot; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.audit.AuditLogger; +import org.apache.hadoop.ozone.audit.AuditMessage; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.snapshot.TestSnapshotRequestAndResponse; +import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status; import org.apache.hadoop.util.Time; +import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; +import java.io.File; import java.util.UUID; import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE; @@ -49,6 +61,10 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.framework; +import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; /** @@ -56,15 +72,60 @@ * Mostly mirrors TestOMSnapshotCreateRequest. * testEntryNotExist() and testEntryExists() are unique. */ -public class TestOMSnapshotDeleteRequest extends TestSnapshotRequestAndResponse { +public class TestOMSnapshotDeleteRequest { + @TempDir + private File folder; + private OzoneManager ozoneManager; + private OMMetrics omMetrics; + private OmMetadataManagerImpl omMetadataManager; + + private String volumeName; + private String bucketName; private String snapshotName; @BeforeEach public void setup() throws Exception { + ozoneManager = mock(OzoneManager.class); + omMetrics = OMMetrics.create(); + OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); + ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, + folder.getAbsolutePath()); + omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, + ozoneManager); + when(ozoneManager.getMetrics()).thenReturn(omMetrics); + when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); + when(ozoneManager.isRatisEnabled()).thenReturn(true); + when(ozoneManager.isFilesystemSnapshotEnabled()).thenReturn(true); + when(ozoneManager.isAdmin(any())).thenReturn(false); + when(ozoneManager.isOwner(any(), any())).thenReturn(false); + when(ozoneManager.getBucketOwner(any(), any(), + any(), any())).thenReturn("dummyBucketOwner"); + OMLayoutVersionManager lvm = mock(OMLayoutVersionManager.class); + when(lvm.isAllowed(anyString())).thenReturn(true); + when(ozoneManager.getVersionManager()).thenReturn(lvm); + AuditLogger auditLogger = mock(AuditLogger.class); + when(ozoneManager.getAuditLogger()).thenReturn(auditLogger); + doNothing().when(auditLogger).logWrite(any(AuditMessage.class)); + + OmSnapshotManager omSnapshotManager = mock(OmSnapshotManager.class); + when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); + + volumeName = UUID.randomUUID().toString(); + bucketName = UUID.randomUUID().toString(); snapshotName = UUID.randomUUID().toString(); + OMRequestTestUtils.addVolumeAndBucketToDB( + volumeName, bucketName, omMetadataManager); + } + @AfterEach + public void stop() { + omMetrics.unRegister(); + framework().clearInlineMocks(); + } + + @ValueSource(strings = { // '-' is allowed. "9cdf0e8a-6946-41ad-a2d1-9eb724fab126", @@ -75,9 +136,9 @@ public void setup() throws Exception { }) @ParameterizedTest public void testPreExecute(String deleteSnapshotName) throws Exception { - when(getOzoneManager().isOwner(any(), any())).thenReturn(true); - OMRequest omRequest = deleteSnapshotRequest(getVolumeName(), - getBucketName(), deleteSnapshotName); + when(ozoneManager.isOwner(any(), any())).thenReturn(true); + OMRequest omRequest = deleteSnapshotRequest(volumeName, + bucketName, deleteSnapshotName); doPreExecute(omRequest); } @@ -93,9 +154,9 @@ public void testPreExecute(String deleteSnapshotName) throws Exception { }) @ParameterizedTest public void testPreExecuteFailure(String deleteSnapshotName) { - when(getOzoneManager().isOwner(any(), any())).thenReturn(true); - OMRequest omRequest = deleteSnapshotRequest(getVolumeName(), - getBucketName(), deleteSnapshotName); + when(ozoneManager.isOwner(any(), any())).thenReturn(true); + OMRequest omRequest = deleteSnapshotRequest(volumeName, + bucketName, deleteSnapshotName); OMException omException = assertThrows(OMException.class, () -> doPreExecute(omRequest)); assertTrue(omException.getMessage() @@ -105,8 +166,8 @@ public void testPreExecuteFailure(String deleteSnapshotName) { @Test public void testPreExecuteBadOwner() { // Owner is not set for the request. - OMRequest omRequest = deleteSnapshotRequest(getVolumeName(), - getBucketName(), snapshotName); + OMRequest omRequest = deleteSnapshotRequest(volumeName, + bucketName, snapshotName); OMException omException = assertThrows(OMException.class, () -> doPreExecute(omRequest)); @@ -116,27 +177,27 @@ public void testPreExecuteBadOwner() { @Test public void testValidateAndUpdateCache() throws Exception { - when(getOzoneManager().isAdmin(any())).thenReturn(true); + when(ozoneManager.isAdmin(any())).thenReturn(true); OMRequest omRequest = - deleteSnapshotRequest(getVolumeName(), getBucketName(), snapshotName); + deleteSnapshotRequest(volumeName, bucketName, snapshotName); OMSnapshotDeleteRequest omSnapshotDeleteRequest = doPreExecute(omRequest); - String key = SnapshotInfo.getTableKey(getVolumeName(), getBucketName(), snapshotName); + String key = SnapshotInfo.getTableKey(volumeName, bucketName, snapshotName); // As we have not still called validateAndUpdateCache, get() should // return null. - assertNull(getOmMetadataManager().getSnapshotInfoTable().get(key)); + assertNull(omMetadataManager.getSnapshotInfoTable().get(key)); // add key to cache - SnapshotInfo snapshotInfo = SnapshotInfo.newInstance(getVolumeName(), getBucketName(), + SnapshotInfo snapshotInfo = SnapshotInfo.newInstance(volumeName, bucketName, snapshotName, null, Time.now()); assertEquals(SNAPSHOT_ACTIVE, snapshotInfo.getSnapshotStatus()); - getOmMetadataManager().getSnapshotInfoTable().addCacheEntry( + omMetadataManager.getSnapshotInfoTable().addCacheEntry( new CacheKey<>(key), CacheValue.get(1L, snapshotInfo)); // Trigger validateAndUpdateCache OMClientResponse omClientResponse = - omSnapshotDeleteRequest.validateAndUpdateCache(getOzoneManager(), 2L); + omSnapshotDeleteRequest.validateAndUpdateCache(ozoneManager, 2L); OMResponse omResponse = omClientResponse.getOMResponse(); assertNotNull(omResponse); @@ -146,14 +207,14 @@ public void testValidateAndUpdateCache() throws Exception { assertEquals(OK, omResponse.getStatus()); // check cache - snapshotInfo = getOmMetadataManager().getSnapshotInfoTable().get(key); + snapshotInfo = omMetadataManager.getSnapshotInfoTable().get(key); assertNotNull(snapshotInfo); assertEquals(SNAPSHOT_DELETED, snapshotInfo.getSnapshotStatus()); - assertEquals(0, getOmMetrics().getNumSnapshotCreates()); + assertEquals(0, omMetrics.getNumSnapshotCreates()); // Expected -1 because no snapshot was created before. - assertEquals(-1, getOmMetrics().getNumSnapshotActive()); - assertEquals(1, getOmMetrics().getNumSnapshotDeleted()); - assertEquals(0, getOmMetrics().getNumSnapshotDeleteFails()); + assertEquals(-1, omMetrics.getNumSnapshotActive()); + assertEquals(1, omMetrics.getNumSnapshotDeleted()); + assertEquals(0, omMetrics.getNumSnapshotDeleteFails()); } /** @@ -161,25 +222,25 @@ public void testValidateAndUpdateCache() throws Exception { */ @Test public void testEntryNotExist() throws Exception { - when(getOzoneManager().isAdmin(any())).thenReturn(true); + when(ozoneManager.isAdmin(any())).thenReturn(true); OMRequest omRequest = deleteSnapshotRequest( - getVolumeName(), getBucketName(), snapshotName); + volumeName, bucketName, snapshotName); OMSnapshotDeleteRequest omSnapshotDeleteRequest = doPreExecute(omRequest); - String key = SnapshotInfo.getTableKey(getVolumeName(), getBucketName(), snapshotName); + String key = SnapshotInfo.getTableKey(volumeName, bucketName, snapshotName); // Entry does not exist - assertNull(getOmMetadataManager().getSnapshotInfoTable().get(key)); + assertNull(omMetadataManager.getSnapshotInfoTable().get(key)); // Trigger delete snapshot validateAndUpdateCache OMClientResponse omClientResponse = - omSnapshotDeleteRequest.validateAndUpdateCache(getOzoneManager(), 1L); + omSnapshotDeleteRequest.validateAndUpdateCache(ozoneManager, 1L); OMResponse omResponse = omClientResponse.getOMResponse(); assertNotNull(omResponse.getDeleteSnapshotResponse()); assertEquals(Status.FILE_NOT_FOUND, omResponse.getStatus()); - assertEquals(0, getOmMetrics().getNumSnapshotActive()); - assertEquals(0, getOmMetrics().getNumSnapshotDeleted()); - assertEquals(1, getOmMetrics().getNumSnapshotDeleteFails()); + assertEquals(0, omMetrics.getNumSnapshotActive()); + assertEquals(0, omMetrics.getNumSnapshotDeleted()); + assertEquals(1, omMetrics.getNumSnapshotDeleteFails()); } /** @@ -188,50 +249,50 @@ public void testEntryNotExist() throws Exception { */ @Test public void testEntryExist() throws Exception { - when(getOzoneManager().isAdmin(any())).thenReturn(true); - String key = SnapshotInfo.getTableKey(getVolumeName(), getBucketName(), snapshotName); + when(ozoneManager.isAdmin(any())).thenReturn(true); + String key = SnapshotInfo.getTableKey(volumeName, bucketName, snapshotName); OMRequest omRequest1 = - createSnapshotRequest(getVolumeName(), getBucketName(), snapshotName); + createSnapshotRequest(volumeName, bucketName, snapshotName); OMSnapshotCreateRequest omSnapshotCreateRequest = - TestOMSnapshotCreateRequest.doPreExecute(omRequest1, getOzoneManager()); + TestOMSnapshotCreateRequest.doPreExecute(omRequest1, ozoneManager); - assertNull(getOmMetadataManager().getSnapshotInfoTable().get(key)); + assertNull(omMetadataManager.getSnapshotInfoTable().get(key)); // Create snapshot entry - omSnapshotCreateRequest.validateAndUpdateCache(getOzoneManager(), 1L); + omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 1L); SnapshotInfo snapshotInfo = - getOmMetadataManager().getSnapshotInfoTable().get(key); + omMetadataManager.getSnapshotInfoTable().get(key); assertNotNull(snapshotInfo); assertEquals(SNAPSHOT_ACTIVE, snapshotInfo.getSnapshotStatus()); - assertEquals(1, getOmMetrics().getNumSnapshotActive()); + assertEquals(1, omMetrics.getNumSnapshotActive()); OMRequest omRequest2 = - deleteSnapshotRequest(getVolumeName(), getBucketName(), snapshotName); + deleteSnapshotRequest(volumeName, bucketName, snapshotName); OMSnapshotDeleteRequest omSnapshotDeleteRequest = doPreExecute(omRequest2); // Delete snapshot entry OMClientResponse omClientResponse = - omSnapshotDeleteRequest.validateAndUpdateCache(getOzoneManager(), 2L); + omSnapshotDeleteRequest.validateAndUpdateCache(ozoneManager, 2L); // Response should be successful OMResponse omResponse = omClientResponse.getOMResponse(); assertNotNull(omResponse); assertNotNull(omResponse.getDeleteSnapshotResponse()); assertEquals(OK, omResponse.getStatus()); - snapshotInfo = getOmMetadataManager().getSnapshotInfoTable().get(key); + snapshotInfo = omMetadataManager.getSnapshotInfoTable().get(key); // The snapshot entry should still exist in the table, // but marked as DELETED. assertNotNull(snapshotInfo); assertEquals(SNAPSHOT_DELETED, snapshotInfo.getSnapshotStatus()); assertThat(snapshotInfo.getDeletionTime()).isGreaterThan(0L); - assertEquals(0, getOmMetrics().getNumSnapshotActive()); + assertEquals(0, omMetrics.getNumSnapshotActive()); // Now delete snapshot entry again, expect error. - omRequest2 = deleteSnapshotRequest(getVolumeName(), getBucketName(), snapshotName); + omRequest2 = deleteSnapshotRequest(volumeName, bucketName, snapshotName); omSnapshotDeleteRequest = doPreExecute(omRequest2); omClientResponse = - omSnapshotDeleteRequest.validateAndUpdateCache(getOzoneManager(), 3L); + omSnapshotDeleteRequest.validateAndUpdateCache(ozoneManager, 3L); omResponse = omClientResponse.getOMResponse(); assertNotNull(omResponse); @@ -239,11 +300,11 @@ public void testEntryExist() throws Exception { assertEquals(Status.FILE_NOT_FOUND, omResponse.getStatus()); // Snapshot entry should still be there. - snapshotInfo = getOmMetadataManager().getSnapshotInfoTable().get(key); + snapshotInfo = omMetadataManager.getSnapshotInfoTable().get(key); assertNotNull(snapshotInfo); assertEquals(SNAPSHOT_DELETED, snapshotInfo.getSnapshotStatus()); - assertEquals(0, getOmMetrics().getNumSnapshotActive()); - assertEquals(1, getOmMetrics().getNumSnapshotDeleteFails()); + assertEquals(0, omMetrics.getNumSnapshotActive()); + assertEquals(1, omMetrics.getNumSnapshotDeleteFails()); } private OMSnapshotDeleteRequest doPreExecute( @@ -252,7 +313,7 @@ private OMSnapshotDeleteRequest doPreExecute( new OMSnapshotDeleteRequest(originalRequest); OMRequest modifiedRequest = - omSnapshotDeleteRequest.preExecute(getOzoneManager()); + omSnapshotDeleteRequest.preExecute(ozoneManager); return new OMSnapshotDeleteRequest(modifiedRequest); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotMoveTableKeysRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotMoveTableKeysRequest.java deleted file mode 100644 index 247f322dfcf..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotMoveTableKeysRequest.java +++ /dev/null @@ -1,264 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.ozone.om.request.snapshot; - -import org.apache.commons.lang3.tuple.Pair; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; -import org.apache.hadoop.ozone.om.snapshot.TestSnapshotRequestAndResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; - -import java.util.Collections; -import java.util.List; -import java.util.UUID; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_KEY_NAME; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_REQUEST; -import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.addVolumeAndBucketToDB; -import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.deleteSnapshotRequest; -import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.moveSnapshotTableKeyRequest; - -/** - * Class to test OmSnapshotMoveTableKeyRequest. - */ -public class TestOMSnapshotMoveTableKeysRequest extends TestSnapshotRequestAndResponse { - - private String snapshotName1; - private String snapshotName2; - private SnapshotInfo snapshotInfo1; - private SnapshotInfo snapshotInfo2; - - @BeforeEach - public void setup() throws Exception { - snapshotName1 = UUID.randomUUID().toString(); - snapshotName2 = UUID.randomUUID().toString(); - } - - public TestOMSnapshotMoveTableKeysRequest() { - super(true); - } - - private void createSnapshots(boolean createSecondSnapshot) throws Exception { - createSnapshotCheckpoint(getVolumeName(), getBucketName(), snapshotName1); - snapshotInfo1 = SnapshotUtils.getSnapshotInfo(getOzoneManager(), getVolumeName(), getBucketName(), snapshotName1); - if (createSecondSnapshot) { - createSnapshotCheckpoint(getVolumeName(), getBucketName(), snapshotName2); - snapshotInfo2 = SnapshotUtils.getSnapshotInfo(getOzoneManager(), getVolumeName(), getBucketName(), snapshotName2); - } - } - - private SnapshotInfo deleteSnapshot(SnapshotInfo snapshotInfo, long transactionIndex) throws Exception { - OzoneManagerProtocolProtos.OMRequest omRequest = deleteSnapshotRequest(snapshotInfo.getVolumeName(), - snapshotInfo.getBucketName(), snapshotInfo.getName()); - OMSnapshotDeleteRequest omSnapshotDeleteRequest = new OMSnapshotDeleteRequest(omRequest); - omSnapshotDeleteRequest.preExecute(getOzoneManager()); - omSnapshotDeleteRequest.validateAndUpdateCache(getOzoneManager(), transactionIndex); - return SnapshotUtils.getSnapshotInfo(getOzoneManager(), snapshotInfo.getTableKey()); - } - - @Test - public void testValidateAndUpdateCacheWithNextSnapshotInactive() throws Exception { - createSnapshots(true); - snapshotInfo2 = deleteSnapshot(snapshotInfo2, 0); - OzoneManagerProtocolProtos.OMRequest omRequest = moveSnapshotTableKeyRequest(snapshotInfo1.getSnapshotId(), - Collections.emptyList(), Collections.emptyList(), Collections.emptyList()); - OMSnapshotMoveTableKeysRequest omSnapshotMoveTableKeysRequest = new OMSnapshotMoveTableKeysRequest(omRequest); - omSnapshotMoveTableKeysRequest = new OMSnapshotMoveTableKeysRequest( - omSnapshotMoveTableKeysRequest.preExecute(getOzoneManager())); - OMClientResponse omClientResponse = omSnapshotMoveTableKeysRequest.validateAndUpdateCache(getOzoneManager(), 1); - Assertions.assertFalse(omClientResponse.getOMResponse().getSuccess()); - Assertions.assertEquals(OzoneManagerProtocolProtos.Status.INVALID_SNAPSHOT_ERROR, - omClientResponse.getOMResponse().getStatus()); - } - - @Test - public void testPreExecuteWithInvalidDeletedKeyPrefix() throws Exception { - createSnapshots(true); - String invalidVolumeName = UUID.randomUUID().toString(); - String invalidBucketName = UUID.randomUUID().toString(); - addVolumeAndBucketToDB(invalidVolumeName, invalidBucketName, getOmMetadataManager()); - List>> deletedKeys = - Stream.of(getDeletedKeys(getVolumeName(), getBucketName(), 0, 10, 10, 0), - getDeletedKeys(invalidVolumeName, invalidBucketName, 0, 10, 10, 0)) - .flatMap(List::stream).collect(Collectors.toList()); - OzoneManagerProtocolProtos.OMRequest omRequest = moveSnapshotTableKeyRequest(snapshotInfo1.getSnapshotId(), - deletedKeys, Collections.emptyList(), Collections.emptyList()); - OMSnapshotMoveTableKeysRequest omSnapshotMoveTableKeysRequest = new OMSnapshotMoveTableKeysRequest(omRequest); - OMException omException = Assertions.assertThrows(OMException.class, - () -> omSnapshotMoveTableKeysRequest.preExecute(getOzoneManager())); - Assertions.assertEquals(INVALID_KEY_NAME, omException.getResult()); - } - - @Test - public void testPreExecuteWithInvalidDeletedDirPrefix() throws Exception { - createSnapshots(true); - String invalidVolumeName = UUID.randomUUID().toString(); - String invalidBucketName = UUID.randomUUID().toString(); - addVolumeAndBucketToDB(invalidVolumeName, invalidBucketName, getOmMetadataManager()); - List>> deletedDirs = - Stream.of(getDeletedDirKeys(getVolumeName(), getBucketName(), 0, 10, 1), - getDeletedDirKeys(invalidVolumeName, invalidBucketName, 0, 10, 1)) - .flatMap(List::stream).collect(Collectors.toList()); - OzoneManagerProtocolProtos.OMRequest omRequest = moveSnapshotTableKeyRequest(snapshotInfo1.getSnapshotId(), - Collections.emptyList(), deletedDirs, Collections.emptyList()); - OMSnapshotMoveTableKeysRequest omSnapshotMoveTableKeysRequest = new OMSnapshotMoveTableKeysRequest(omRequest); - OMException omException = Assertions.assertThrows(OMException.class, - () -> omSnapshotMoveTableKeysRequest.preExecute(getOzoneManager())); - Assertions.assertEquals(INVALID_KEY_NAME, omException.getResult()); - } - - @Test - public void testPreExecuteWithInvalidNumberKeys() throws Exception { - createSnapshots(true); - String invalidVolumeName = UUID.randomUUID().toString(); - String invalidBucketName = UUID.randomUUID().toString(); - addVolumeAndBucketToDB(invalidVolumeName, invalidBucketName, getOmMetadataManager()); - List>> deletedDirs = - Stream.of(getDeletedDirKeys(getVolumeName(), getBucketName(), 0, 10, 1), - getDeletedDirKeys(invalidVolumeName, invalidBucketName, 0, 10, 10)) - .flatMap(List::stream).collect(Collectors.toList()); - List>> deletedKeys = - Stream.of(getDeletedKeys(getVolumeName(), getBucketName(), 0, 10, 10, 0), - getDeletedKeys(invalidVolumeName, invalidBucketName, 0, 10, 0, 0)) - .flatMap(List::stream).collect(Collectors.toList()); - List> renameKeys = getRenameKeys(getVolumeName(), getBucketName(), 0, 10, snapshotName1); - renameKeys.add(Pair.of(getOmMetadataManager().getRenameKey(getVolumeName(), getBucketName(), 11), null)); - OzoneManagerProtocolProtos.OMRequest omRequest = moveSnapshotTableKeyRequest(snapshotInfo1.getSnapshotId(), - deletedKeys, deletedDirs, renameKeys); - OMSnapshotMoveTableKeysRequest omSnapshotMoveTableKeysRequest = new OMSnapshotMoveTableKeysRequest(omRequest); - omRequest = omSnapshotMoveTableKeysRequest.preExecute(getOzoneManager()); - for (OzoneManagerProtocolProtos.SnapshotMoveKeyInfos deletedDir : - omRequest.getSnapshotMoveTableKeysRequest().getDeletedDirsList()) { - Assertions.assertEquals(1, deletedDir.getKeyInfosList().size()); - } - - for (OzoneManagerProtocolProtos.SnapshotMoveKeyInfos deletedKey : - omRequest.getSnapshotMoveTableKeysRequest().getDeletedKeysList()) { - Assertions.assertNotEquals(0, deletedKey.getKeyInfosList().size()); - } - - for (HddsProtos.KeyValue renameKey : omRequest.getSnapshotMoveTableKeysRequest().getRenamedKeysList()) { - Assertions.assertTrue(renameKey.hasKey() && renameKey.hasValue()); - } - - } - - @Test - public void testPreExecuteWithInvalidRenamePrefix() throws Exception { - createSnapshots(true); - String invalidVolumeName = UUID.randomUUID().toString(); - String invalidBucketName = UUID.randomUUID().toString(); - addVolumeAndBucketToDB(invalidVolumeName, invalidBucketName, getOmMetadataManager()); - List> renameKeys = - Stream.of(getRenameKeys(getVolumeName(), getBucketName(), 0, 10, snapshotName1), - getRenameKeys(invalidVolumeName, invalidBucketName, 0, 10, snapshotName2)).flatMap(List::stream) - .collect(Collectors.toList()); - OzoneManagerProtocolProtos.OMRequest omRequest = moveSnapshotTableKeyRequest(snapshotInfo1.getSnapshotId(), - Collections.emptyList(), Collections.emptyList(), renameKeys); - OMSnapshotMoveTableKeysRequest omSnapshotMoveTableKeysRequest = new OMSnapshotMoveTableKeysRequest(omRequest); - OMException omException = Assertions.assertThrows(OMException.class, - () -> omSnapshotMoveTableKeysRequest.preExecute(getOzoneManager())); - Assertions.assertEquals(INVALID_KEY_NAME, omException.getResult()); - } - - @Test - public void testValidateAndUpdateCache() throws Exception { - createSnapshots(true); - String invalidVolumeName = UUID.randomUUID().toString(); - String invalidBucketName = UUID.randomUUID().toString(); - addVolumeAndBucketToDB(invalidVolumeName, invalidBucketName, getOmMetadataManager()); - List>> deletedKeys = getDeletedKeys(getVolumeName(), getBucketName(), 0, 10, 10, 0); - List>> deletedDirs = getDeletedDirKeys(getVolumeName(), getBucketName(), 0, 10, 1); - List> renameKeys = getRenameKeys(getVolumeName(), getBucketName(), 0, 10, snapshotName1); - OzoneManagerProtocolProtos.OMRequest omRequest = moveSnapshotTableKeyRequest(snapshotInfo1.getSnapshotId(), - deletedKeys, deletedDirs, renameKeys); - OMSnapshotMoveTableKeysRequest omSnapshotMoveTableKeysRequest = new OMSnapshotMoveTableKeysRequest(omRequest); - // perform preExecute. - omSnapshotMoveTableKeysRequest = new OMSnapshotMoveTableKeysRequest( - omSnapshotMoveTableKeysRequest.preExecute(getOzoneManager())); - OMClientResponse omClientResponse = omSnapshotMoveTableKeysRequest.validateAndUpdateCache(getOzoneManager(), 1); - Assertions.assertTrue(omClientResponse.getOMResponse().getSuccess()); - Assertions.assertEquals(OzoneManagerProtocolProtos.Status.OK, - omClientResponse.getOMResponse().getStatus()); - } - - @Test - public void testPreExecuteWithInvalidDuplicateDeletedKey() throws Exception { - createSnapshots(true); - String invalidVolumeName = UUID.randomUUID().toString(); - String invalidBucketName = UUID.randomUUID().toString(); - addVolumeAndBucketToDB(invalidVolumeName, invalidBucketName, getOmMetadataManager()); - List>> deletedKeys = - Stream.of(getDeletedKeys(getVolumeName(), getBucketName(), 0, 10, 10, 0), - getDeletedKeys(getVolumeName(), getBucketName(), 0, 10, 10, 0)).flatMap(List::stream) - .collect(Collectors.toList()); - OzoneManagerProtocolProtos.OMRequest omRequest = moveSnapshotTableKeyRequest(snapshotInfo1.getSnapshotId(), - deletedKeys, Collections.emptyList(), Collections.emptyList()); - OMSnapshotMoveTableKeysRequest omSnapshotMoveTableKeysRequest = new OMSnapshotMoveTableKeysRequest(omRequest); - OMException omException = Assertions.assertThrows(OMException.class, - () -> omSnapshotMoveTableKeysRequest.preExecute(getOzoneManager())); - Assertions.assertEquals(INVALID_REQUEST, omException.getResult()); - } - - @Test - public void testPreExecuteWithInvalidDuplicateDeletedDir() throws Exception { - createSnapshots(true); - String invalidVolumeName = UUID.randomUUID().toString(); - String invalidBucketName = UUID.randomUUID().toString(); - addVolumeAndBucketToDB(invalidVolumeName, invalidBucketName, getOmMetadataManager()); - List>> deletedDirs = - Stream.of(getDeletedDirKeys(getVolumeName(), getBucketName(), 0, 10, 1), - getDeletedDirKeys(getVolumeName(), getBucketName(), 0, 10, 1)).flatMap(List::stream) - .collect(Collectors.toList()); - OzoneManagerProtocolProtos.OMRequest omRequest = moveSnapshotTableKeyRequest(snapshotInfo1.getSnapshotId(), - Collections.emptyList(), deletedDirs, Collections.emptyList()); - OMSnapshotMoveTableKeysRequest omSnapshotMoveTableKeysRequest = new OMSnapshotMoveTableKeysRequest(omRequest); - OMException omException = Assertions.assertThrows(OMException.class, - () -> omSnapshotMoveTableKeysRequest.preExecute(getOzoneManager())); - Assertions.assertEquals(INVALID_REQUEST, omException.getResult()); - } - - @Test - public void testPreExecuteWithInvalidDuplicateRenameKey() throws Exception { - createSnapshots(true); - String invalidVolumeName = UUID.randomUUID().toString(); - String invalidBucketName = UUID.randomUUID().toString(); - addVolumeAndBucketToDB(invalidVolumeName, invalidBucketName, getOmMetadataManager()); - List> renameKeys = - Stream.of(getRenameKeys(getVolumeName(), getBucketName(), 0, 10, snapshotName1), - getRenameKeys(getVolumeName(), getBucketName(), 0, 10, snapshotName1)) - .flatMap(List::stream).collect(Collectors.toList()); - OzoneManagerProtocolProtos.OMRequest omRequest = moveSnapshotTableKeyRequest(snapshotInfo1.getSnapshotId(), - Collections.emptyList(), Collections.emptyList(), renameKeys); - OMSnapshotMoveTableKeysRequest omSnapshotMoveTableKeysRequest = new OMSnapshotMoveTableKeysRequest(omRequest); - OMException omException = Assertions.assertThrows(OMException.class, - () -> omSnapshotMoveTableKeysRequest.preExecute(getOzoneManager())); - Assertions.assertEquals(INVALID_REQUEST, omException.getResult()); - } -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java index 1c44decdfda..8edd096e766 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java @@ -19,32 +19,44 @@ package org.apache.hadoop.ozone.om.request.snapshot; -import com.google.protobuf.ByteString; -import org.apache.hadoop.hdds.utils.TransactionInfo; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.audit.AuditLogger; +import org.apache.hadoop.ozone.om.IOmMetadataReader; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.SnapshotChainManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotCreateResponse; import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotPurgeResponse; -import org.apache.hadoop.ozone.om.snapshot.TestSnapshotRequestAndResponse; +import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; +import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotPurgeRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; import org.junit.jupiter.params.provider.ValueSource; +import java.io.File; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; +import java.nio.file.Paths; import java.util.ArrayList; import java.util.Collections; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Random; @@ -56,8 +68,10 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -65,16 +79,49 @@ /** * Tests OMSnapshotPurgeRequest class. */ -public class TestOMSnapshotPurgeRequestAndResponse extends TestSnapshotRequestAndResponse { - private final List checkpointPaths = new ArrayList<>(); - private String keyName; +public class TestOMSnapshotPurgeRequestAndResponse { + private List checkpointPaths = new ArrayList<>(); - public TestOMSnapshotPurgeRequestAndResponse() { - super(true); - } + private OzoneManager ozoneManager; + private OMMetrics omMetrics; + private OMMetadataManager omMetadataManager; + private OmSnapshotManager omSnapshotManager; + private AuditLogger auditLogger; + + private String volumeName; + private String bucketName; + private String keyName; @BeforeEach - public void setup() throws Exception { + void setup(@TempDir File testDir) throws Exception { + ozoneManager = mock(OzoneManager.class); + OMLayoutVersionManager lvm = mock(OMLayoutVersionManager.class); + when(lvm.isAllowed(anyString())).thenReturn(true); + when(ozoneManager.getVersionManager()).thenReturn(lvm); + when(ozoneManager.isRatisEnabled()).thenReturn(true); + auditLogger = mock(AuditLogger.class); + when(ozoneManager.getAuditLogger()).thenReturn(auditLogger); + omMetrics = OMMetrics.create(); + OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); + ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, + testDir.getAbsolutePath()); + ozoneConfiguration.set(OzoneConfigKeys.OZONE_METADATA_DIRS, + testDir.getAbsolutePath()); + omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, + ozoneManager); + when(ozoneManager.getMetrics()).thenReturn(omMetrics); + when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); + when(ozoneManager.getConfiguration()).thenReturn(ozoneConfiguration); + when(ozoneManager.isAdmin(any())).thenReturn(true); + when(ozoneManager.isFilesystemSnapshotEnabled()).thenReturn(true); + + ReferenceCounted rcOmMetadataReader = + mock(ReferenceCounted.class); + when(ozoneManager.getOmMetadataReader()).thenReturn(rcOmMetadataReader); + omSnapshotManager = new OmSnapshotManager(ozoneManager); + when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); + volumeName = UUID.randomUUID().toString(); + bucketName = UUID.randomUUID().toString(); keyName = UUID.randomUUID().toString(); } @@ -85,14 +132,17 @@ private List createSnapshots(int numSnapshotKeys) throws Exception { Random random = new Random(); + // Add volume, bucket and key entries to OM DB. + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + omMetadataManager); // Create Snapshot and CheckpointDir List purgeSnapshots = new ArrayList<>(numSnapshotKeys); for (int i = 1; i <= numSnapshotKeys; i++) { String snapshotName = keyName + "-" + random.nextLong(); createSnapshotCheckpoint(snapshotName); - purgeSnapshots.add(SnapshotInfo.getTableKey(getVolumeName(), - getBucketName(), snapshotName)); + purgeSnapshots.add(SnapshotInfo.getTableKey(volumeName, + bucketName, snapshotName)); } return purgeSnapshots; @@ -122,7 +172,39 @@ private OMRequest createPurgeKeysRequest(List purgeSnapshotKeys) { * Create snapshot and checkpoint directory. */ private void createSnapshotCheckpoint(String snapshotName) throws Exception { - checkpointPaths.add(createSnapshotCheckpoint(getVolumeName(), getBucketName(), snapshotName)); + createSnapshotCheckpoint(volumeName, bucketName, snapshotName); + } + + private void createSnapshotCheckpoint(String volume, + String bucket, + String snapshotName) throws Exception { + OMRequest omRequest = OMRequestTestUtils + .createSnapshotRequest(volume, bucket, snapshotName); + // Pre-Execute OMSnapshotCreateRequest. + OMSnapshotCreateRequest omSnapshotCreateRequest = + TestOMSnapshotCreateRequest.doPreExecute(omRequest, ozoneManager); + + // validateAndUpdateCache OMSnapshotCreateResponse. + OMSnapshotCreateResponse omClientResponse = (OMSnapshotCreateResponse) + omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 1); + // Add to batch and commit to DB. + try (BatchOperation batchOperation = omMetadataManager.getStore().initBatchOperation()) { + omClientResponse.addToDBBatch(omMetadataManager, batchOperation); + omMetadataManager.getStore().commitBatchOperation(batchOperation); + } + + String key = SnapshotInfo.getTableKey(volume, bucket, snapshotName); + SnapshotInfo snapshotInfo = + omMetadataManager.getSnapshotInfoTable().get(key); + assertNotNull(snapshotInfo); + + RDBStore store = (RDBStore) omMetadataManager.getStore(); + String checkpointPrefix = store.getDbLocation().getName(); + Path snapshotDirPath = Paths.get(store.getSnapshotsParentDir(), + checkpointPrefix + snapshotInfo.getCheckpointDir()); + // Check the DB is still there + assertTrue(Files.exists(snapshotDirPath)); + checkpointPaths.add(snapshotDirPath); } private OMSnapshotPurgeRequest preExecute(OMRequest originalOmRequest) @@ -130,7 +212,7 @@ private OMSnapshotPurgeRequest preExecute(OMRequest originalOmRequest) OMSnapshotPurgeRequest omSnapshotPurgeRequest = new OMSnapshotPurgeRequest(originalOmRequest); OMRequest modifiedOmRequest = omSnapshotPurgeRequest - .preExecute(getOzoneManager()); + .preExecute(ozoneManager); return new OMSnapshotPurgeRequest(modifiedOmRequest); } @@ -142,48 +224,48 @@ private void purgeSnapshots(OMRequest snapshotPurgeRequest) // validateAndUpdateCache for OMSnapshotPurgeRequest. OMSnapshotPurgeResponse omSnapshotPurgeResponse = (OMSnapshotPurgeResponse) - omSnapshotPurgeRequest.validateAndUpdateCache(getOzoneManager(), 200L); + omSnapshotPurgeRequest.validateAndUpdateCache(ozoneManager, 200L); // Commit to DB. - try (BatchOperation batchOperation = getOmMetadataManager().getStore().initBatchOperation()) { - omSnapshotPurgeResponse.checkAndUpdateDB(getOmMetadataManager(), batchOperation); - getOmMetadataManager().getStore().commitBatchOperation(batchOperation); + try (BatchOperation batchOperation = omMetadataManager.getStore().initBatchOperation()) { + omSnapshotPurgeResponse.checkAndUpdateDB(omMetadataManager, batchOperation); + omMetadataManager.getStore().commitBatchOperation(batchOperation); } } @Test public void testValidateAndUpdateCache() throws Exception { - long initialSnapshotPurgeCount = getOmMetrics().getNumSnapshotPurges(); - long initialSnapshotPurgeFailCount = getOmMetrics().getNumSnapshotPurgeFails(); + long initialSnapshotPurgeCount = omMetrics.getNumSnapshotPurges(); + long initialSnapshotPurgeFailCount = omMetrics.getNumSnapshotPurgeFails(); List snapshotDbKeysToPurge = createSnapshots(10); - assertFalse(getOmMetadataManager().getSnapshotInfoTable().isEmpty()); + assertFalse(omMetadataManager.getSnapshotInfoTable().isEmpty()); OMRequest snapshotPurgeRequest = createPurgeKeysRequest( snapshotDbKeysToPurge); OMSnapshotPurgeRequest omSnapshotPurgeRequest = preExecute(snapshotPurgeRequest); OMSnapshotPurgeResponse omSnapshotPurgeResponse = (OMSnapshotPurgeResponse) - omSnapshotPurgeRequest.validateAndUpdateCache(getOzoneManager(), 200L); + omSnapshotPurgeRequest.validateAndUpdateCache(ozoneManager, 200L); for (String snapshotTableKey: snapshotDbKeysToPurge) { - assertNull(getOmMetadataManager().getSnapshotInfoTable().get(snapshotTableKey)); + assertNull(omMetadataManager.getSnapshotInfoTable().get(snapshotTableKey)); } - try (BatchOperation batchOperation = getOmMetadataManager().getStore().initBatchOperation()) { - omSnapshotPurgeResponse.checkAndUpdateDB(getOmMetadataManager(), batchOperation); - getOmMetadataManager().getStore().commitBatchOperation(batchOperation); + try (BatchOperation batchOperation = omMetadataManager.getStore().initBatchOperation()) { + omSnapshotPurgeResponse.checkAndUpdateDB(omMetadataManager, batchOperation); + omMetadataManager.getStore().commitBatchOperation(batchOperation); } // Check if the entries are deleted. - assertTrue(getOmMetadataManager().getSnapshotInfoTable().isEmpty()); + assertTrue(omMetadataManager.getSnapshotInfoTable().isEmpty()); // Check if all the checkpoints are cleared. for (Path checkpoint : checkpointPaths) { assertFalse(Files.exists(checkpoint)); } - assertEquals(initialSnapshotPurgeCount + 1, getOmMetrics().getNumSnapshotPurges()); - assertEquals(initialSnapshotPurgeFailCount, getOmMetrics().getNumSnapshotPurgeFails()); + assertEquals(initialSnapshotPurgeCount + 1, omMetrics.getNumSnapshotPurges()); + assertEquals(initialSnapshotPurgeFailCount, omMetrics.getNumSnapshotPurgeFails()); } /** @@ -191,8 +273,8 @@ public void testValidateAndUpdateCache() throws Exception { */ @Test public void testValidateAndUpdateCacheFailure() throws Exception { - long initialSnapshotPurgeCount = getOmMetrics().getNumSnapshotPurges(); - long initialSnapshotPurgeFailCount = getOmMetrics().getNumSnapshotPurgeFails(); + long initialSnapshotPurgeCount = omMetrics.getNumSnapshotPurges(); + long initialSnapshotPurgeFailCount = omMetrics.getNumSnapshotPurgeFails(); List snapshotDbKeysToPurge = createSnapshots(10); @@ -201,17 +283,17 @@ public void testValidateAndUpdateCacheFailure() throws Exception { when(mockedSnapshotInfoTable.get(anyString())).thenThrow(new IOException("Injected fault error.")); when(mockedMetadataManager.getSnapshotInfoTable()).thenReturn(mockedSnapshotInfoTable); - when(getOzoneManager().getMetadataManager()).thenReturn(mockedMetadataManager); + when(ozoneManager.getMetadataManager()).thenReturn(mockedMetadataManager); OMRequest snapshotPurgeRequest = createPurgeKeysRequest(snapshotDbKeysToPurge); OMSnapshotPurgeRequest omSnapshotPurgeRequest = preExecute(snapshotPurgeRequest); OMSnapshotPurgeResponse omSnapshotPurgeResponse = (OMSnapshotPurgeResponse) - omSnapshotPurgeRequest.validateAndUpdateCache(getOzoneManager(), 200L); + omSnapshotPurgeRequest.validateAndUpdateCache(ozoneManager, 200L); assertEquals(INTERNAL_ERROR, omSnapshotPurgeResponse.getOMResponse().getStatus()); - assertEquals(initialSnapshotPurgeCount, getOmMetrics().getNumSnapshotPurges()); - assertEquals(initialSnapshotPurgeFailCount + 1, getOmMetrics().getNumSnapshotPurgeFails()); + assertEquals(initialSnapshotPurgeCount, omMetrics.getNumSnapshotPurges()); + assertEquals(initialSnapshotPurgeFailCount + 1, omMetrics.getNumSnapshotPurgeFails()); } // TODO: clean up: Do we this test after @@ -224,7 +306,7 @@ public void testSnapshotChainCleanup(int index) throws Exception { // Before purge, check snapshot chain OmMetadataManagerImpl metadataManager = - (OmMetadataManagerImpl) getOmMetadataManager(); + (OmMetadataManagerImpl) omMetadataManager; SnapshotChainManager chainManager = metadataManager .getSnapshotChainManager(); SnapshotInfo snapInfo = metadataManager.getSnapshotInfoTable() @@ -258,8 +340,8 @@ public void testSnapshotChainCleanup(int index) throws Exception { snapInfo.getSnapshotId()); } - long rowsInTableBeforePurge = getOmMetadataManager() - .countRowsInTable(getOmMetadataManager().getSnapshotInfoTable()); + long rowsInTableBeforePurge = omMetadataManager + .countRowsInTable(omMetadataManager.getSnapshotInfoTable()); // Purge Snapshot of the given index. List toPurgeList = Collections.singletonList(snapShotToPurge); OMRequest snapshotPurgeRequest = createPurgeKeysRequest( @@ -282,8 +364,8 @@ public void testSnapshotChainCleanup(int index) throws Exception { .getGlobalPreviousSnapshotId(), prevGlobalSnapId); } - assertNotEquals(rowsInTableBeforePurge, getOmMetadataManager() - .countRowsInTable(getOmMetadataManager().getSnapshotInfoTable())); + assertNotEquals(rowsInTableBeforePurge, omMetadataManager + .countRowsInTable(omMetadataManager.getSnapshotInfoTable())); } private static Stream snapshotPurgeCases() { @@ -337,14 +419,14 @@ public void testSnapshotChainInSnapshotInfoTableAfterSnapshotPurge( int toIndex, boolean createInBucketOrder) throws Exception { SnapshotChainManager chainManager = - ((OmMetadataManagerImpl) getOmMetadataManager()).getSnapshotChainManager(); + ((OmMetadataManagerImpl) omMetadataManager).getSnapshotChainManager(); int totalKeys = numberOfBuckets * numberOfKeysPerBucket; List buckets = new ArrayList<>(); for (int i = 0; i < numberOfBuckets; i++) { String bucketNameLocal = "bucket-" + UUID.randomUUID(); - OMRequestTestUtils.addVolumeAndBucketToDB(getVolumeName(), bucketNameLocal, - getOmMetadataManager()); + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketNameLocal, + omMetadataManager); buckets.add(bucketNameLocal); } @@ -355,43 +437,26 @@ public void testSnapshotChainInSnapshotInfoTableAfterSnapshotPurge( int bucketIndex = createInBucketOrder ? i : j; String bucket = buckets.get(bucketIndex % numberOfBuckets); String snapshotName = UUID.randomUUID().toString(); - createSnapshotCheckpoint(getVolumeName(), bucket, snapshotName); + createSnapshotCheckpoint(volumeName, bucket, snapshotName); String snapshotTableKey = - SnapshotInfo.getTableKey(getVolumeName(), bucket, snapshotName); + SnapshotInfo.getTableKey(volumeName, bucket, snapshotName); SnapshotInfo snapshotInfo = - getOmMetadataManager().getSnapshotInfoTable().get(snapshotTableKey); + omMetadataManager.getSnapshotInfoTable().get(snapshotTableKey); snapshotInfoList.add(snapshotInfo); } } - long numberOfSnapshotBeforePurge = getOmMetadataManager() - .countRowsInTable(getOmMetadataManager().getSnapshotInfoTable()); + long numberOfSnapshotBeforePurge = omMetadataManager + .countRowsInTable(omMetadataManager.getSnapshotInfoTable()); assertEquals(totalKeys, numberOfSnapshotBeforePurge); assertEquals(totalKeys, chainManager.getGlobalSnapshotChain().size()); - Map expectedTransactionInfos = new HashMap<>(); - // Ratis transaction uses term index 1 while creating snapshot. - ByteString expectedLastTransactionVal = TransactionInfo.valueOf(TransactionInfo.getTermIndex(1L)) - .toByteString(); - for (SnapshotInfo snapshotInfo : snapshotInfoList) { - expectedTransactionInfos.put(snapshotInfo.getSnapshotId(), expectedLastTransactionVal); - } - validateSnapshotOrderInSnapshotInfoTableAndSnapshotChain(snapshotInfoList, expectedTransactionInfos); - // Ratis transaction uses term index 200 while purging snapshot. - expectedLastTransactionVal = TransactionInfo.valueOf(TransactionInfo.getTermIndex(200L)) - .toByteString(); + + validateSnapshotOrderInSnapshotInfoTableAndSnapshotChain(snapshotInfoList); + List purgeSnapshotKeys = new ArrayList<>(); for (int i = fromIndex; i <= toIndex; i++) { SnapshotInfo purgeSnapshotInfo = snapshotInfoList.get(i); - UUID snapId = purgeSnapshotInfo.getSnapshotId(); - // expecting nextPathSnapshot & nextGlobalSnapshot in chain gets updated. - if (chainManager.hasNextGlobalSnapshot(snapId)) { - expectedTransactionInfos.put(chainManager.nextGlobalSnapshot(snapId), expectedLastTransactionVal); - } - if (chainManager.hasNextPathSnapshot(purgeSnapshotInfo.getSnapshotPath(), snapId)) { - expectedTransactionInfos.put(chainManager.nextPathSnapshot(purgeSnapshotInfo.getSnapshotPath(), snapId), - expectedLastTransactionVal); - } - String purgeSnapshotKey = SnapshotInfo.getTableKey(getVolumeName(), + String purgeSnapshotKey = SnapshotInfo.getTableKey(volumeName, purgeSnapshotInfo.getBucketName(), purgeSnapshotInfo.getName()); purgeSnapshotKeys.add(purgeSnapshotKey); @@ -404,34 +469,34 @@ public void testSnapshotChainInSnapshotInfoTableAfterSnapshotPurge( for (int i = 0; i < totalKeys; i++) { if (i < fromIndex || i > toIndex) { SnapshotInfo info = snapshotInfoList.get(i); - String snapshotKey = SnapshotInfo.getTableKey(getVolumeName(), + String snapshotKey = SnapshotInfo.getTableKey(volumeName, info.getBucketName(), info.getName()); snapshotInfoListAfterPurge.add( - getOmMetadataManager().getSnapshotInfoTable().get(snapshotKey)); + omMetadataManager.getSnapshotInfoTable().get(snapshotKey)); } } long expectNumberOfSnapshotAfterPurge = totalKeys - (toIndex - fromIndex + 1); - long actualNumberOfSnapshotAfterPurge = getOmMetadataManager() - .countRowsInTable(getOmMetadataManager().getSnapshotInfoTable()); + long actualNumberOfSnapshotAfterPurge = omMetadataManager + .countRowsInTable(omMetadataManager.getSnapshotInfoTable()); assertEquals(expectNumberOfSnapshotAfterPurge, actualNumberOfSnapshotAfterPurge); assertEquals(expectNumberOfSnapshotAfterPurge, chainManager .getGlobalSnapshotChain().size()); - validateSnapshotOrderInSnapshotInfoTableAndSnapshotChain(snapshotInfoListAfterPurge, expectedTransactionInfos); + validateSnapshotOrderInSnapshotInfoTableAndSnapshotChain( + snapshotInfoListAfterPurge); } private void validateSnapshotOrderInSnapshotInfoTableAndSnapshotChain( - List snapshotInfoList, Map expectedTransactionInfos) throws IOException { + List snapshotInfoList + ) throws IOException { if (snapshotInfoList.isEmpty()) { return; } - for (SnapshotInfo snapshotInfo : snapshotInfoList) { - assertEquals(snapshotInfo.getLastTransactionInfo(), expectedTransactionInfos.get(snapshotInfo.getSnapshotId())); - } + OmMetadataManagerImpl metadataManager = - (OmMetadataManagerImpl) getOmMetadataManager(); + (OmMetadataManagerImpl) omMetadataManager; SnapshotChainManager chainManager = metadataManager .getSnapshotChainManager(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotRenameRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotRenameRequest.java index a746597288a..ab2bac1bd0e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotRenameRequest.java @@ -17,8 +17,17 @@ package org.apache.hadoop.ozone.om.request.snapshot; import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.audit.AuditLogger; +import org.apache.hadoop.ozone.audit.AuditMessage; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; @@ -26,14 +35,17 @@ import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.snapshot.TestSnapshotRequestAndResponse; +import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.util.Time; +import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; +import java.io.File; import java.util.UUID; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; @@ -50,19 +62,75 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.framework; +import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; /** * Tests OMSnapshotRenameRequest class, which handles RenameSnapshot request. */ -public class TestOMSnapshotRenameRequest extends TestSnapshotRequestAndResponse { +public class TestOMSnapshotRenameRequest { + + @TempDir + private File anotherTempDir; + + private OzoneManager ozoneManager; + private OMMetrics omMetrics; + private OmMetadataManagerImpl omMetadataManager; + private BatchOperation batchOperation; + + private String volumeName; + private String bucketName; private String snapshotName1; private String snapshotName2; @BeforeEach public void setup() throws Exception { + ozoneManager = mock(OzoneManager.class); + omMetrics = OMMetrics.create(); + OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); + ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, + anotherTempDir.getAbsolutePath()); + ozoneConfiguration.set(OzoneConfigKeys.OZONE_METADATA_DIRS, + anotherTempDir.getAbsolutePath()); + omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, + ozoneManager); + when(ozoneManager.getMetrics()).thenReturn(omMetrics); + when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); + when(ozoneManager.isRatisEnabled()).thenReturn(true); + when(ozoneManager.isFilesystemSnapshotEnabled()).thenReturn(true); + when(ozoneManager.isAdmin(any())).thenReturn(false); + when(ozoneManager.isOwner(any(), any())).thenReturn(false); + when(ozoneManager.getBucketOwner(any(), any(), + any(), any())).thenReturn("dummyBucketOwner"); + OMLayoutVersionManager lvm = mock(OMLayoutVersionManager.class); + when(lvm.isAllowed(anyString())).thenReturn(true); + when(ozoneManager.getVersionManager()).thenReturn(lvm); + AuditLogger auditLogger = mock(AuditLogger.class); + when(ozoneManager.getAuditLogger()).thenReturn(auditLogger); + doNothing().when(auditLogger).logWrite(any(AuditMessage.class)); + batchOperation = omMetadataManager.getStore().initBatchOperation(); + when(ozoneManager.getConfiguration()).thenReturn(ozoneConfiguration); + OmSnapshotManager omSnapshotManager = new OmSnapshotManager(ozoneManager); + when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); + + volumeName = UUID.randomUUID().toString(); + bucketName = UUID.randomUUID().toString(); snapshotName1 = UUID.randomUUID().toString(); snapshotName2 = UUID.randomUUID().toString(); + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + omMetadataManager); + } + + @AfterEach + public void stop() { + omMetrics.unRegister(); + framework().clearInlineMocks(); + if (batchOperation != null) { + batchOperation.close(); + } } @ValueSource(strings = { @@ -75,11 +143,11 @@ public void setup() throws Exception { }) @ParameterizedTest public void testPreExecute(String toSnapshotName) throws Exception { - when(getOzoneManager().isOwner(any(), any())).thenReturn(true); + when(ozoneManager.isOwner(any(), any())).thenReturn(true); String currentSnapshotName = "current"; - OzoneManagerProtocolProtos.OMRequest omRequest = renameSnapshotRequest(getVolumeName(), - getBucketName(), currentSnapshotName, toSnapshotName); + OzoneManagerProtocolProtos.OMRequest omRequest = renameSnapshotRequest(volumeName, + bucketName, currentSnapshotName, toSnapshotName); doPreExecute(omRequest); } @@ -99,10 +167,10 @@ public void testPreExecute(String toSnapshotName) throws Exception { }) @ParameterizedTest public void testPreExecuteFailure(String toSnapshotName) { - when(getOzoneManager().isOwner(any(), any())).thenReturn(true); + when(ozoneManager.isOwner(any(), any())).thenReturn(true); String currentSnapshotName = "current"; - OzoneManagerProtocolProtos.OMRequest omRequest = renameSnapshotRequest(getVolumeName(), - getBucketName(), currentSnapshotName, toSnapshotName); + OzoneManagerProtocolProtos.OMRequest omRequest = renameSnapshotRequest(volumeName, + bucketName, currentSnapshotName, toSnapshotName); OMException omException = assertThrows(OMException.class, () -> doPreExecute(omRequest)); assertTrue(omException.getMessage().contains("Invalid snapshot name: " + toSnapshotName)); @@ -111,8 +179,8 @@ public void testPreExecuteFailure(String toSnapshotName) { @Test public void testPreExecuteBadOwner() { // Owner is not set for the request. - OzoneManagerProtocolProtos.OMRequest omRequest = renameSnapshotRequest(getVolumeName(), - getBucketName(), snapshotName1, snapshotName2); + OzoneManagerProtocolProtos.OMRequest omRequest = renameSnapshotRequest(volumeName, + bucketName, snapshotName1, snapshotName2); OMException omException = assertThrows(OMException.class, () -> doPreExecute(omRequest)); @@ -122,39 +190,39 @@ public void testPreExecuteBadOwner() { @Test public void testValidateAndUpdateCache() throws Exception { - when(getOzoneManager().isAdmin(any())).thenReturn(true); - OzoneManagerProtocolProtos.OMRequest omRequest = renameSnapshotRequest(getVolumeName(), - getBucketName(), snapshotName1, snapshotName2); + when(ozoneManager.isAdmin(any())).thenReturn(true); + OzoneManagerProtocolProtos.OMRequest omRequest = renameSnapshotRequest(volumeName, + bucketName, snapshotName1, snapshotName2); OMSnapshotRenameRequest omSnapshotRenameRequest = doPreExecute(omRequest); - String key = getTableKey(getVolumeName(), getBucketName(), snapshotName1); - String bucketKey = getOmMetadataManager().getBucketKey(getVolumeName(), getBucketName()); + String key = getTableKey(volumeName, bucketName, snapshotName1); + String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); // Add a 1000-byte key to the bucket OmKeyInfo key1 = addKey("key-testValidateAndUpdateCache", 12345L); addKeyToTable(key1); - OmBucketInfo omBucketInfo = getOmMetadataManager().getBucketTable().get( + OmBucketInfo omBucketInfo = omMetadataManager.getBucketTable().get( bucketKey); long bucketDataSize = key1.getDataSize(); long bucketUsedBytes = omBucketInfo.getUsedBytes(); assertEquals(key1.getReplicatedSize(), bucketUsedBytes); // Value in cache should be null as of now. - assertNull(getOmMetadataManager().getSnapshotInfoTable().get(key)); + assertNull(omMetadataManager.getSnapshotInfoTable().get(key)); // Add key to cache. - SnapshotInfo snapshotInfo = SnapshotInfo.newInstance(getVolumeName(), getBucketName(), + SnapshotInfo snapshotInfo = SnapshotInfo.newInstance(volumeName, bucketName, snapshotName1, UUID.randomUUID(), Time.now()); snapshotInfo.setReferencedSize(1000L); snapshotInfo.setReferencedReplicatedSize(3 * 1000L); assertEquals(SNAPSHOT_ACTIVE, snapshotInfo.getSnapshotStatus()); - getOmMetadataManager().getSnapshotInfoTable().addCacheEntry( + omMetadataManager.getSnapshotInfoTable().addCacheEntry( new CacheKey<>(key), CacheValue.get(1L, snapshotInfo)); // Run validateAndUpdateCache. OMClientResponse omClientResponse = - omSnapshotRenameRequest.validateAndUpdateCache(getOzoneManager(), 2L); + omSnapshotRenameRequest.validateAndUpdateCache(ozoneManager, 2L); assertNotNull(omClientResponse.getOMResponse()); @@ -176,56 +244,56 @@ public void testValidateAndUpdateCache() throws Exception { SnapshotInfo snapshotInfoOldProto = getFromProtobuf(snapshotInfoProto); - String key2 = getTableKey(getVolumeName(), getBucketName(), snapshotName2); + String key2 = getTableKey(volumeName, bucketName, snapshotName2); // Get value from cache SnapshotInfo snapshotInfoNewInCache = - getOmMetadataManager().getSnapshotInfoTable().get(key2); + omMetadataManager.getSnapshotInfoTable().get(key2); assertNotNull(snapshotInfoNewInCache); assertEquals(snapshotInfoOldProto, snapshotInfoNewInCache); assertEquals(snapshotInfo.getSnapshotId(), snapshotInfoNewInCache.getSnapshotId()); SnapshotInfo snapshotInfoOldInCache = - getOmMetadataManager().getSnapshotInfoTable().get(key); + omMetadataManager.getSnapshotInfoTable().get(key); assertNull(snapshotInfoOldInCache); } @Test public void testEntryExists() throws Exception { - when(getOzoneManager().isAdmin(any())).thenReturn(true); + when(ozoneManager.isAdmin(any())).thenReturn(true); - String keyNameOld = getTableKey(getVolumeName(), getBucketName(), snapshotName1); - String keyNameNew = getTableKey(getVolumeName(), getBucketName(), snapshotName2); + String keyNameOld = getTableKey(volumeName, bucketName, snapshotName1); + String keyNameNew = getTableKey(volumeName, bucketName, snapshotName2); - assertNull(getOmMetadataManager().getSnapshotInfoTable().get(keyNameOld)); - assertNull(getOmMetadataManager().getSnapshotInfoTable().get(keyNameNew)); + assertNull(omMetadataManager.getSnapshotInfoTable().get(keyNameOld)); + assertNull(omMetadataManager.getSnapshotInfoTable().get(keyNameNew)); // First make sure we have two snapshots. OzoneManagerProtocolProtos.OMRequest createOmRequest = - createSnapshotRequest(getVolumeName(), getBucketName(), snapshotName1); + createSnapshotRequest(volumeName, bucketName, snapshotName1); OMSnapshotCreateRequest omSnapshotCreateRequest = - TestOMSnapshotCreateRequest.doPreExecute(createOmRequest, getOzoneManager()); - omSnapshotCreateRequest.validateAndUpdateCache(getOzoneManager(), 1); + TestOMSnapshotCreateRequest.doPreExecute(createOmRequest, ozoneManager); + omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 1); createOmRequest = - createSnapshotRequest(getVolumeName(), getBucketName(), snapshotName2); + createSnapshotRequest(volumeName, bucketName, snapshotName2); omSnapshotCreateRequest = - TestOMSnapshotCreateRequest.doPreExecute(createOmRequest, getOzoneManager()); - omSnapshotCreateRequest.validateAndUpdateCache(getOzoneManager(), 2); + TestOMSnapshotCreateRequest.doPreExecute(createOmRequest, ozoneManager); + omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 2); - assertNotNull(getOmMetadataManager().getSnapshotInfoTable().get(keyNameOld)); - assertNotNull(getOmMetadataManager().getSnapshotInfoTable().get(keyNameNew)); + assertNotNull(omMetadataManager.getSnapshotInfoTable().get(keyNameOld)); + assertNotNull(omMetadataManager.getSnapshotInfoTable().get(keyNameNew)); // Now try renaming and get an error. OzoneManagerProtocolProtos.OMRequest omRequest = - renameSnapshotRequest(getVolumeName(), getBucketName(), snapshotName1, snapshotName2); + renameSnapshotRequest(volumeName, bucketName, snapshotName1, snapshotName2); OMSnapshotRenameRequest omSnapshotRenameRequest = doPreExecute(omRequest); OMClientResponse omClientResponse = - omSnapshotRenameRequest.validateAndUpdateCache(getOzoneManager(), 3); + omSnapshotRenameRequest.validateAndUpdateCache(ozoneManager, 3); - assertNotNull(getOmMetadataManager().getSnapshotInfoTable().get(keyNameOld)); - assertNotNull(getOmMetadataManager().getSnapshotInfoTable().get(keyNameNew)); + assertNotNull(omMetadataManager.getSnapshotInfoTable().get(keyNameOld)); + assertNotNull(omMetadataManager.getSnapshotInfoTable().get(keyNameNew)); OzoneManagerProtocolProtos.OMResponse omResponse = omClientResponse.getOMResponse(); assertNotNull(omResponse.getRenameSnapshotResponse()); @@ -235,24 +303,24 @@ public void testEntryExists() throws Exception { @Test public void testEntryNotFound() throws Exception { - when(getOzoneManager().isAdmin(any())).thenReturn(true); + when(ozoneManager.isAdmin(any())).thenReturn(true); - String keyNameOld = getTableKey(getVolumeName(), getBucketName(), snapshotName1); - String keyNameNew = getTableKey(getVolumeName(), getBucketName(), snapshotName2); + String keyNameOld = getTableKey(volumeName, bucketName, snapshotName1); + String keyNameNew = getTableKey(volumeName, bucketName, snapshotName2); - assertNull(getOmMetadataManager().getSnapshotInfoTable().get(keyNameOld)); - assertNull(getOmMetadataManager().getSnapshotInfoTable().get(keyNameNew)); + assertNull(omMetadataManager.getSnapshotInfoTable().get(keyNameOld)); + assertNull(omMetadataManager.getSnapshotInfoTable().get(keyNameNew)); // Now try renaming and get an error. OzoneManagerProtocolProtos.OMRequest omRequest = - renameSnapshotRequest(getVolumeName(), getBucketName(), snapshotName1, snapshotName2); + renameSnapshotRequest(volumeName, bucketName, snapshotName1, snapshotName2); OMSnapshotRenameRequest omSnapshotRenameRequest = doPreExecute(omRequest); OMClientResponse omClientResponse = - omSnapshotRenameRequest.validateAndUpdateCache(getOzoneManager(), 3); + omSnapshotRenameRequest.validateAndUpdateCache(ozoneManager, 3); - assertNull(getOmMetadataManager().getSnapshotInfoTable().get(keyNameOld)); - assertNull(getOmMetadataManager().getSnapshotInfoTable().get(keyNameNew)); + assertNull(omMetadataManager.getSnapshotInfoTable().get(keyNameOld)); + assertNull(omMetadataManager.getSnapshotInfoTable().get(keyNameNew)); OzoneManagerProtocolProtos.OMResponse omResponse = omClientResponse.getOMResponse(); assertNotNull(omResponse.getRenameSnapshotResponse()); @@ -262,7 +330,7 @@ public void testEntryNotFound() throws Exception { private OMSnapshotRenameRequest doPreExecute( OzoneManagerProtocolProtos.OMRequest originalRequest) throws Exception { - return doPreExecute(originalRequest, getOzoneManager()); + return doPreExecute(originalRequest, ozoneManager); } public static OMSnapshotRenameRequest doPreExecute( @@ -276,15 +344,15 @@ public static OMSnapshotRenameRequest doPreExecute( } private OmKeyInfo addKey(String keyName, long objectId) { - return OMRequestTestUtils.createOmKeyInfo(getVolumeName(), getBucketName(), keyName, + return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(THREE)).setObjectID(objectId) .build(); } protected String addKeyToTable(OmKeyInfo keyInfo) throws Exception { OMRequestTestUtils.addKeyToTable(false, true, keyInfo, 0, 0L, - getOmMetadataManager()); - return getOmMetadataManager().getOzoneKey(keyInfo.getVolumeName(), + omMetadataManager); + return omMetadataManager.getOzoneKey(keyInfo.getVolumeName(), keyInfo.getBucketName(), keyInfo.getKeyName()); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotSetPropertyRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotSetPropertyRequestAndResponse.java index 380922f9e22..b5bfc2714b0 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotSetPropertyRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotSetPropertyRequestAndResponse.java @@ -18,23 +18,32 @@ */ package org.apache.hadoop.ozone.om.request.snapshot; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotSetPropertyResponse; -import org.apache.hadoop.ozone.om.snapshot.TestSnapshotRequestAndResponse; +import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotSize; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetSnapshotPropertyRequest; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.io.TempDir; +import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.List; @@ -51,13 +60,37 @@ * Tests TestOMSnapshotSetPropertyRequest * TestOMSnapshotSetPropertyResponse class. */ -public class TestOMSnapshotSetPropertyRequestAndResponse extends TestSnapshotRequestAndResponse { +public class TestOMSnapshotSetPropertyRequestAndResponse { + private BatchOperation batchOperation; + private OzoneManager ozoneManager; + private OMMetadataManager omMetadataManager; + private OMMetrics omMetrics; + private String volumeName; + private String bucketName; private String snapName; private long exclusiveSize; private long exclusiveSizeAfterRepl; @BeforeEach - void setup() { + void setup(@TempDir File testDir) throws Exception { + omMetrics = OMMetrics.create(); + ozoneManager = mock(OzoneManager.class); + OMLayoutVersionManager lvm = mock(OMLayoutVersionManager.class); + when(lvm.isAllowed(anyString())).thenReturn(true); + when(ozoneManager.getVersionManager()).thenReturn(lvm); + when(ozoneManager.isRatisEnabled()).thenReturn(true); + OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); + ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, + testDir.getAbsolutePath()); + ozoneConfiguration.set(OzoneConfigKeys.OZONE_METADATA_DIRS, + testDir.getAbsolutePath()); + omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, + ozoneManager); + when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); + when(ozoneManager.getMetrics()).thenReturn(omMetrics); + + volumeName = UUID.randomUUID().toString(); + bucketName = UUID.randomUUID().toString(); snapName = UUID.randomUUID().toString(); exclusiveSize = 2000L; exclusiveSizeAfterRepl = 6000L; @@ -65,11 +98,11 @@ void setup() { @Test public void testValidateAndUpdateCache() throws IOException { - long initialSnapshotSetPropertyCount = getOmMetrics().getNumSnapshotSetProperties(); - long initialSnapshotSetPropertyFailCount = getOmMetrics().getNumSnapshotSetPropertyFails(); + long initialSnapshotSetPropertyCount = omMetrics.getNumSnapshotSetProperties(); + long initialSnapshotSetPropertyFailCount = omMetrics.getNumSnapshotSetPropertyFails(); createSnapshotDataForTest(); - assertFalse(getOmMetadataManager().getSnapshotInfoTable().isEmpty()); + assertFalse(omMetadataManager.getSnapshotInfoTable().isEmpty()); List snapshotUpdateSizeRequests = createSnapshotUpdateSizeRequest(); @@ -78,27 +111,28 @@ public void testValidateAndUpdateCache() throws IOException { OMSnapshotSetPropertyRequest omSnapshotSetPropertyRequest = new OMSnapshotSetPropertyRequest(request); OMRequest modifiedOmRequest = omSnapshotSetPropertyRequest - .preExecute(getOzoneManager()); + .preExecute(ozoneManager); omSnapshotSetPropertyRequest = new OMSnapshotSetPropertyRequest(modifiedOmRequest); // Validate and Update Cache OMSnapshotSetPropertyResponse omSnapshotSetPropertyResponse = (OMSnapshotSetPropertyResponse) omSnapshotSetPropertyRequest - .validateAndUpdateCache(getOzoneManager(), 200L); + .validateAndUpdateCache(ozoneManager, 200L); // Commit to DB. - omSnapshotSetPropertyResponse.checkAndUpdateDB(getOmMetadataManager(), - getBatchOperation()); - getOmMetadataManager().getStore().commitBatchOperation(getBatchOperation()); + batchOperation = omMetadataManager.getStore().initBatchOperation(); + omSnapshotSetPropertyResponse.checkAndUpdateDB(omMetadataManager, + batchOperation); + omMetadataManager.getStore().commitBatchOperation(batchOperation); } assertEquals(initialSnapshotSetPropertyCount + snapshotUpdateSizeRequests.size(), - getOmMetrics().getNumSnapshotSetProperties()); - assertEquals(initialSnapshotSetPropertyFailCount, getOmMetrics().getNumSnapshotSetPropertyFails()); + omMetrics.getNumSnapshotSetProperties()); + assertEquals(initialSnapshotSetPropertyFailCount, omMetrics.getNumSnapshotSetPropertyFails()); // Check if the exclusive size is set. try (TableIterator> - iterator = getOmMetadataManager().getSnapshotInfoTable().iterator()) { + iterator = omMetadataManager.getSnapshotInfoTable().iterator()) { while (iterator.hasNext()) { Table.KeyValue snapshotEntry = iterator.next(); assertCacheValues(snapshotEntry.getKey()); @@ -115,11 +149,11 @@ public void testValidateAndUpdateCache() throws IOException { */ @Test public void testValidateAndUpdateCacheFailure() throws IOException { - long initialSnapshotSetPropertyCount = getOmMetrics().getNumSnapshotSetProperties(); - long initialSnapshotSetPropertyFailCount = getOmMetrics().getNumSnapshotSetPropertyFails(); + long initialSnapshotSetPropertyCount = omMetrics.getNumSnapshotSetProperties(); + long initialSnapshotSetPropertyFailCount = omMetrics.getNumSnapshotSetPropertyFails(); createSnapshotDataForTest(); - assertFalse(getOmMetadataManager().getSnapshotInfoTable().isEmpty()); + assertFalse(omMetadataManager.getSnapshotInfoTable().isEmpty()); List snapshotUpdateSizeRequests = createSnapshotUpdateSizeRequest(); OmMetadataManagerImpl mockedMetadataManager = mock(OmMetadataManagerImpl.class); @@ -127,27 +161,27 @@ public void testValidateAndUpdateCacheFailure() throws IOException { when(mockedSnapshotInfoTable.get(anyString())).thenThrow(new IOException("Injected fault error.")); when(mockedMetadataManager.getSnapshotInfoTable()).thenReturn(mockedSnapshotInfoTable); - when(getOzoneManager().getMetadataManager()).thenReturn(mockedMetadataManager); + when(ozoneManager.getMetadataManager()).thenReturn(mockedMetadataManager); for (OMRequest omRequest: snapshotUpdateSizeRequests) { OMSnapshotSetPropertyRequest omSnapshotSetPropertyRequest = new OMSnapshotSetPropertyRequest(omRequest); - OMRequest modifiedOmRequest = omSnapshotSetPropertyRequest.preExecute(getOzoneManager()); + OMRequest modifiedOmRequest = omSnapshotSetPropertyRequest.preExecute(ozoneManager); omSnapshotSetPropertyRequest = new OMSnapshotSetPropertyRequest(modifiedOmRequest); // Validate and Update Cache OMSnapshotSetPropertyResponse omSnapshotSetPropertyResponse = (OMSnapshotSetPropertyResponse) - omSnapshotSetPropertyRequest.validateAndUpdateCache(getOzoneManager(), 200L); + omSnapshotSetPropertyRequest.validateAndUpdateCache(ozoneManager, 200L); assertEquals(INTERNAL_ERROR, omSnapshotSetPropertyResponse.getOMResponse().getStatus()); } - assertEquals(initialSnapshotSetPropertyCount, getOmMetrics().getNumSnapshotSetProperties()); + assertEquals(initialSnapshotSetPropertyCount, omMetrics.getNumSnapshotSetProperties()); assertEquals(initialSnapshotSetPropertyFailCount + snapshotUpdateSizeRequests.size(), - getOmMetrics().getNumSnapshotSetPropertyFails()); + omMetrics.getNumSnapshotSetPropertyFails()); } private void assertCacheValues(String dbKey) { - CacheValue cacheValue = getOmMetadataManager() + CacheValue cacheValue = omMetadataManager .getSnapshotInfoTable() .getCacheValue(new CacheKey<>(dbKey)); assertEquals(exclusiveSize, cacheValue.getCacheValue().getExclusiveSize()); @@ -159,7 +193,7 @@ private List createSnapshotUpdateSizeRequest() throws IOException { List omRequests = new ArrayList<>(); try (TableIterator> - iterator = getOmMetadataManager().getSnapshotInfoTable().iterator()) { + iterator = omMetadataManager.getSnapshotInfoTable().iterator()) { while (iterator.hasNext()) { String snapDbKey = iterator.next().getKey(); SnapshotSize snapshotSize = SnapshotSize.newBuilder() @@ -186,8 +220,8 @@ private List createSnapshotUpdateSizeRequest() private void createSnapshotDataForTest() throws IOException { // Create 10 Snapshots for (int i = 0; i < 10; i++) { - OMRequestTestUtils.addSnapshotToTableCache(getVolumeName(), getBucketName(), - snapName + i, getOmMetadataManager()); + OMRequestTestUtils.addSnapshotToTableCache(volumeName, bucketName, + snapName + i, omMetadataManager); } } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java index a370c20ad1b..7f74f3d17ec 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java @@ -27,7 +27,6 @@ import java.util.UUID; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; -import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; @@ -99,8 +98,7 @@ public void testAddToDBBatch(int numberOfKeys) throws Exception { snapshotName, snapshotId, Time.now()); - snapshotInfo.setLastTransactionInfo( - TransactionInfo.valueOf(TransactionInfo.getTermIndex(1L)).toByteString()); + // confirm table is empty assertEquals(0, omMetadataManager .countRowsInTable(omMetadataManager.getSnapshotInfoTable())); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotMoveTableKeysResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotMoveTableKeysResponse.java deleted file mode 100644 index d2e2d94ec73..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotMoveTableKeysResponse.java +++ /dev/null @@ -1,199 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package org.apache.hadoop.ozone.om.response.snapshot; - -import org.apache.commons.lang3.tuple.Pair; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.utils.db.BatchOperation; -import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.ozone.ClientVersion; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OmSnapshot; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; -import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; -import org.apache.hadoop.ozone.om.snapshot.TestSnapshotRequestAndResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.ValueSource; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.stream.Collectors; -import java.util.stream.LongStream; - -/** - * Test class to test OMSnapshotMoveTableKeysResponse. - */ -public class TestOMSnapshotMoveTableKeysResponse extends TestSnapshotRequestAndResponse { - - private String snapshotName1; - private String snapshotName2; - private SnapshotInfo snapshotInfo1; - private SnapshotInfo snapshotInfo2; - - @BeforeEach - public void setup() throws Exception { - snapshotName1 = UUID.randomUUID().toString(); - snapshotName2 = UUID.randomUUID().toString(); - } - - public TestOMSnapshotMoveTableKeysResponse() { - super(true); - } - - private void createSnapshots(boolean createSecondSnapshot) throws Exception { - addDataToTable(getOmMetadataManager().getSnapshotRenamedTable(), getRenameKeys(getVolumeName(), getBucketName(), 0, - 10, snapshotName1)); - addDataToTable(getOmMetadataManager().getDeletedTable(), getDeletedKeys(getVolumeName(), getBucketName(), 0, - 10, 10, 0).stream() - .map(pair -> Pair.of(pair.getKey(), new RepeatedOmKeyInfo(pair.getRight()))) - .collect(Collectors.toList())); - addDataToTable(getOmMetadataManager().getDeletedDirTable(), - getDeletedDirKeys(getVolumeName(), getBucketName(), 0, 10, 1).stream() - .map(pair -> Pair.of(pair.getKey(), pair.getRight().get(0))).collect(Collectors.toList())); - createSnapshotCheckpoint(getVolumeName(), getBucketName(), snapshotName1); - snapshotInfo1 = SnapshotUtils.getSnapshotInfo(getOzoneManager(), getVolumeName(), getBucketName(), snapshotName1); - addDataToTable(getOmMetadataManager().getSnapshotRenamedTable(), getRenameKeys(getVolumeName(), getBucketName(), 5, - 15, snapshotName2)); - addDataToTable(getOmMetadataManager().getDeletedTable(), getDeletedKeys(getVolumeName(), getBucketName(), 5, - 8, 10, 10).stream() - .map(pair -> Pair.of(pair.getKey(), new RepeatedOmKeyInfo(pair.getRight()))) - .collect(Collectors.toList())); - addDataToTable(getOmMetadataManager().getDeletedTable(), getDeletedKeys(getVolumeName(), getBucketName(), 8, - 15, 10, 0).stream() - .map(pair -> Pair.of(pair.getKey(), new RepeatedOmKeyInfo(pair.getRight()))) - .collect(Collectors.toList())); - addDataToTable(getOmMetadataManager().getDeletedDirTable(), - getDeletedDirKeys(getVolumeName(), getBucketName(), 5, 15, 1).stream() - .map(pair -> Pair.of(pair.getKey(), pair.getRight().get(0))).collect(Collectors.toList())); - if (createSecondSnapshot) { - createSnapshotCheckpoint(getVolumeName(), getBucketName(), snapshotName2); - snapshotInfo2 = SnapshotUtils.getSnapshotInfo(getOzoneManager(), getVolumeName(), getBucketName(), snapshotName2); - } - } - - private void addDataToTable(Table table, List> vals) throws IOException { - for (Pair pair : vals) { - table.put(pair.getKey(), pair.getValue()); - } - } - - @ParameterizedTest - @ValueSource(booleans = {true, false}) - public void testMoveTableKeysToNextSnapshot(boolean nextSnapshotExists) throws Exception { - createSnapshots(nextSnapshotExists); - - try (ReferenceCounted snapshot1 = getOmSnapshotManager().getSnapshot(getVolumeName(), getBucketName(), - snapshotName1); - ReferenceCounted snapshot2 = nextSnapshotExists ? getOmSnapshotManager().getSnapshot( - getVolumeName(), getBucketName(), snapshotName2) : null) { - OmSnapshot snapshot = snapshot1.get(); - List deletedTable = new ArrayList<>(); - List deletedDirTable = new ArrayList<>(); - List renamedTable = new ArrayList<>(); - Map renameEntries = new HashMap<>(); - snapshot.getMetadataManager().getDeletedTable().iterator() - .forEachRemaining(entry -> { - try { - deletedTable.add(OzoneManagerProtocolProtos.SnapshotMoveKeyInfos.newBuilder().setKey(entry.getKey()) - .addAllKeyInfos(entry.getValue().getOmKeyInfoList().stream().map(omKeyInfo -> omKeyInfo.getProtobuf( - ClientVersion.CURRENT_VERSION)).collect(Collectors.toList())).build()); - } catch (IOException e) { - throw new RuntimeException(e); - } - }); - - snapshot.getMetadataManager().getDeletedDirTable().iterator() - .forEachRemaining(entry -> { - try { - deletedDirTable.add(OzoneManagerProtocolProtos.SnapshotMoveKeyInfos.newBuilder().setKey(entry.getKey()) - .addKeyInfos(entry.getValue().getProtobuf(ClientVersion.CURRENT_VERSION)).build()); - } catch (IOException e) { - throw new RuntimeException(e); - } - }); - snapshot.getMetadataManager().getSnapshotRenamedTable().iterator().forEachRemaining(entry -> { - try { - renamedTable.add(HddsProtos.KeyValue.newBuilder().setKey(entry.getKey()).setValue(entry.getValue()).build()); - renameEntries.put(entry.getKey(), entry.getValue()); - } catch (IOException e) { - throw new RuntimeException(e); - } - }); - OMSnapshotMoveTableKeysResponse response = new OMSnapshotMoveTableKeysResponse( - OzoneManagerProtocolProtos.OMResponse.newBuilder().setStatus(OzoneManagerProtocolProtos.Status.OK) - .setCmdType(OzoneManagerProtocolProtos.Type.SnapshotMoveTableKeys).build(), - snapshotInfo1, nextSnapshotExists ? snapshotInfo2 : null, deletedTable, deletedDirTable, renamedTable); - try (BatchOperation batchOperation = getOmMetadataManager().getStore().initBatchOperation()) { - response.addToDBBatch(getOmMetadataManager(), batchOperation); - getOmMetadataManager().getStore().commitBatchOperation(batchOperation); - } - Assertions.assertTrue(snapshot.getMetadataManager().getDeletedTable().isEmpty()); - Assertions.assertTrue(snapshot.getMetadataManager().getDeletedDirTable().isEmpty()); - Assertions.assertTrue(snapshot.getMetadataManager().getSnapshotRenamedTable().isEmpty()); - OMMetadataManager nextMetadataManager = - nextSnapshotExists ? snapshot2.get().getMetadataManager() : getOmMetadataManager(); - AtomicInteger count = new AtomicInteger(); - nextMetadataManager.getDeletedTable().iterator().forEachRemaining(entry -> { - count.getAndIncrement(); - try { - int maxCount = count.get() >= 6 && count.get() <= 8 ? 20 : 10; - Assertions.assertEquals(maxCount, entry.getValue().getOmKeyInfoList().size()); - List versions = entry.getValue().getOmKeyInfoList().stream().map(OmKeyInfo::getKeyLocationVersions) - .map(omKeyInfo -> omKeyInfo.get(0).getVersion()).collect(Collectors.toList()); - List expectedVersions = new ArrayList<>(); - if (maxCount == 20) { - expectedVersions.addAll(LongStream.range(10, 20).boxed().collect(Collectors.toList())); - } - expectedVersions.addAll(LongStream.range(0, 10).boxed().collect(Collectors.toList())); - Assertions.assertEquals(expectedVersions, versions); - } catch (IOException e) { - throw new RuntimeException(e); - } - }); - Assertions.assertEquals(15, count.get()); - count.set(0); - - nextMetadataManager.getDeletedDirTable().iterator().forEachRemaining(entry -> count.getAndIncrement()); - Assertions.assertEquals(15, count.get()); - count.set(0); - nextMetadataManager.getSnapshotRenamedTable().iterator().forEachRemaining(entry -> { - try { - String expectedValue = renameEntries.getOrDefault(entry.getKey(), entry.getValue()); - Assertions.assertEquals(expectedValue, entry.getValue()); - } catch (IOException e) { - throw new RuntimeException(e); - } - count.getAndIncrement(); - }); - Assertions.assertEquals(15, count.get()); - } - - } -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java index ff6506da034..8163592cfc6 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java @@ -39,17 +39,13 @@ import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.om.KeyManager; -import org.apache.hadoop.ozone.om.KeyManagerImpl; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmSnapshot; -import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OmTestManagers; import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.PendingKeysDeletion; import org.apache.hadoop.ozone.om.ScmBlockLocationTestingClient; import org.apache.hadoop.ozone.common.BlockGroup; import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.apache.hadoop.ozone.om.helpers.KeyInfoWithVolumeContext; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; @@ -61,13 +57,10 @@ import org.apache.ratis.util.ExitUtils; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Nested; import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.io.TempDir; -import org.mockito.ArgumentMatchers; -import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -88,16 +81,12 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_DEEP_CLEANING_ENABLED; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.mockito.ArgumentMatchers.anyInt; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.when; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -143,7 +132,6 @@ private void createConfig(File testDir) { 1, TimeUnit.SECONDS); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); - conf.setBoolean(OZONE_SNAPSHOT_DEEP_CLEANING_ENABLED, true); conf.setQuietMode(false); } @@ -297,115 +285,6 @@ void checkDeletedTableCleanUpForSnapshot() throws Exception { assertEquals(0, rangeKVs.size()); } - /* - * Create key k1 - * Create snap1 - * Rename k1 to k2 - * Delete k2 - * Wait for KeyDeletingService to start processing deleted key k2 - * Create snap2 by making the KeyDeletingService thread wait till snap2 is flushed - * Resume KeyDeletingService thread. - * Read k1 from snap1. - */ - @Test - public void testAOSKeyDeletingWithSnapshotCreateParallelExecution() - throws Exception { - Table snapshotInfoTable = - om.getMetadataManager().getSnapshotInfoTable(); - Table deletedTable = - om.getMetadataManager().getDeletedTable(); - Table renameTable = om.getMetadataManager().getSnapshotRenamedTable(); - - // Suspend KeyDeletingService - keyDeletingService.suspend(); - SnapshotDeletingService snapshotDeletingService = om.getKeyManager().getSnapshotDeletingService(); - snapshotDeletingService.suspend(); - GenericTestUtils.waitFor(() -> !keyDeletingService.isRunningOnAOS(), 1000, 10000); - final String volumeName = getTestName(); - final String bucketName = uniqueObjectName("bucket"); - OzoneManager ozoneManager = Mockito.spy(om); - OmSnapshotManager omSnapshotManager = Mockito.spy(om.getOmSnapshotManager()); - KeyManager km = Mockito.spy(new KeyManagerImpl(ozoneManager, ozoneManager.getScmClient(), conf, - om.getPerfMetrics())); - when(ozoneManager.getOmSnapshotManager()).thenAnswer(i -> { - return omSnapshotManager; - }); - KeyDeletingService service = new KeyDeletingService(ozoneManager, scmBlockTestingClient, km, 10000, - 100000, conf, false); - service.shutdown(); - final long initialSnapshotCount = metadataManager.countRowsInTable(snapshotInfoTable); - final long initialDeletedCount = metadataManager.countRowsInTable(deletedTable); - final long initialRenameCount = metadataManager.countRowsInTable(renameTable); - // Create Volume and Buckets - createVolumeAndBucket(volumeName, bucketName, false); - OmKeyArgs args = createAndCommitKey(volumeName, bucketName, - "key1", 3); - String snap1 = uniqueObjectName("snap"); - String snap2 = uniqueObjectName("snap"); - writeClient.createSnapshot(volumeName, bucketName, snap1); - KeyInfoWithVolumeContext keyInfo = writeClient.getKeyInfo(args, false); - AtomicLong objectId = new AtomicLong(keyInfo.getKeyInfo().getObjectID()); - renameKey(volumeName, bucketName, "key1", "key2"); - deleteKey(volumeName, bucketName, "key2"); - assertTableRowCount(deletedTable, initialDeletedCount + 1, metadataManager); - assertTableRowCount(renameTable, initialRenameCount + 1, metadataManager); - - String[] deletePathKey = {metadataManager.getOzoneDeletePathKey(objectId.get(), - metadataManager.getOzoneKey(volumeName, - bucketName, "key2"))}; - assertNotNull(deletedTable.get(deletePathKey[0])); - Mockito.doAnswer(i -> { - writeClient.createSnapshot(volumeName, bucketName, snap2); - GenericTestUtils.waitFor(() -> { - try { - SnapshotInfo snapshotInfo = writeClient.getSnapshotInfo(volumeName, bucketName, snap2); - return OmSnapshotManager.areSnapshotChangesFlushedToDB(metadataManager, snapshotInfo); - } catch (IOException e) { - throw new RuntimeException(e); - } - }, 1000, 100000); - GenericTestUtils.waitFor(() -> { - try { - return renameTable.get(metadataManager.getRenameKey(volumeName, bucketName, objectId.get())) == null; - } catch (IOException e) { - throw new RuntimeException(e); - } - }, 1000, 10000); - return i.callRealMethod(); - }).when(omSnapshotManager).getSnapshot(ArgumentMatchers.eq(volumeName), ArgumentMatchers.eq(bucketName), - ArgumentMatchers.eq(snap1)); - assertTableRowCount(snapshotInfoTable, initialSnapshotCount + 1, metadataManager); - doAnswer(i -> { - PendingKeysDeletion pendingKeysDeletion = (PendingKeysDeletion) i.callRealMethod(); - for (BlockGroup group : pendingKeysDeletion.getKeyBlocksList()) { - Assertions.assertNotEquals(deletePathKey[0], group.getGroupID()); - } - return pendingKeysDeletion; - }).when(km).getPendingDeletionKeys(anyInt()); - service.runPeriodicalTaskNow(); - service.runPeriodicalTaskNow(); - assertTableRowCount(snapshotInfoTable, initialSnapshotCount + 2, metadataManager); - // Create Key3 - OmKeyArgs args2 = createAndCommitKey(volumeName, bucketName, - "key3", 3); - keyInfo = writeClient.getKeyInfo(args2, false); - objectId.set(keyInfo.getKeyInfo().getObjectID()); - // Rename Key3 to key4 - renameKey(volumeName, bucketName, "key3", "key4"); - // Delete Key4 - deleteKey(volumeName, bucketName, "key4"); - deletePathKey[0] = metadataManager.getOzoneDeletePathKey(objectId.get(), metadataManager.getOzoneKey(volumeName, - bucketName, "key4")); - // Delete snapshot - writeClient.deleteSnapshot(volumeName, bucketName, snap2); - // Run KDS and ensure key4 doesn't get purged since snap2 has not been deleted. - service.runPeriodicalTaskNow(); - writeClient.deleteSnapshot(volumeName, bucketName, snap1); - snapshotDeletingService.resume(); - assertTableRowCount(snapshotInfoTable, initialSnapshotCount, metadataManager); - keyDeletingService.resume(); - } - /* * Create Snap1 * Create 10 keys @@ -517,68 +396,68 @@ void testSnapshotExclusiveSize() throws Exception { final long initialDeletedCount = metadataManager.countRowsInTable(deletedTable); final long initialRenamedCount = metadataManager.countRowsInTable(renamedTable); - final String testVolumeName = getTestName(); - final String testBucketName = uniqueObjectName("bucket"); + final String volumeName = getTestName(); + final String bucketName = uniqueObjectName("bucket"); final String keyName = uniqueObjectName("key"); // Create Volume and Buckets - createVolumeAndBucket(testVolumeName, testBucketName, false); + createVolumeAndBucket(volumeName, bucketName, false); // Create 3 keys for (int i = 1; i <= 3; i++) { - createAndCommitKey(testVolumeName, testBucketName, keyName + i, 3); + createAndCommitKey(volumeName, bucketName, keyName + i, 3); } assertTableRowCount(keyTable, initialKeyCount + 3, metadataManager); // Create Snapshot1 String snap1 = uniqueObjectName("snap"); - writeClient.createSnapshot(testVolumeName, testBucketName, snap1); + writeClient.createSnapshot(volumeName, bucketName, snap1); assertTableRowCount(snapshotInfoTable, initialSnapshotCount + 1, metadataManager); assertTableRowCount(deletedTable, initialDeletedCount, metadataManager); // Create 2 keys for (int i = 4; i <= 5; i++) { - createAndCommitKey(testVolumeName, testBucketName, keyName + i, 3); + createAndCommitKey(volumeName, bucketName, keyName + i, 3); } // Delete a key, rename 2 keys. We will be using this to test // how we handle renamed key for exclusive size calculation. - renameKey(testVolumeName, testBucketName, keyName + 1, "renamedKey1"); - renameKey(testVolumeName, testBucketName, keyName + 2, "renamedKey2"); - deleteKey(testVolumeName, testBucketName, keyName + 3); + renameKey(volumeName, bucketName, keyName + 1, "renamedKey1"); + renameKey(volumeName, bucketName, keyName + 2, "renamedKey2"); + deleteKey(volumeName, bucketName, keyName + 3); assertTableRowCount(deletedTable, initialDeletedCount + 1, metadataManager); assertTableRowCount(renamedTable, initialRenamedCount + 2, metadataManager); // Create Snapshot2 String snap2 = uniqueObjectName("snap"); - writeClient.createSnapshot(testVolumeName, testBucketName, snap2); + writeClient.createSnapshot(volumeName, bucketName, snap2); assertTableRowCount(snapshotInfoTable, initialSnapshotCount + 2, metadataManager); assertTableRowCount(deletedTable, initialDeletedCount, metadataManager); // Create 2 keys for (int i = 6; i <= 7; i++) { - createAndCommitKey(testVolumeName, testBucketName, keyName + i, 3); + createAndCommitKey(volumeName, bucketName, keyName + i, 3); } - deleteKey(testVolumeName, testBucketName, "renamedKey1"); - deleteKey(testVolumeName, testBucketName, keyName + 4); + deleteKey(volumeName, bucketName, "renamedKey1"); + deleteKey(volumeName, bucketName, keyName + 4); // Do a second rename of already renamedKey2 - renameKey(testVolumeName, testBucketName, "renamedKey2", "renamedKey22"); + renameKey(volumeName, bucketName, "renamedKey2", "renamedKey22"); assertTableRowCount(deletedTable, initialDeletedCount + 2, metadataManager); assertTableRowCount(renamedTable, initialRenamedCount + 1, metadataManager); // Create Snapshot3 String snap3 = uniqueObjectName("snap"); - writeClient.createSnapshot(testVolumeName, testBucketName, snap3); + writeClient.createSnapshot(volumeName, bucketName, snap3); // Delete 4 keys - deleteKey(testVolumeName, testBucketName, "renamedKey22"); + deleteKey(volumeName, bucketName, "renamedKey22"); for (int i = 5; i <= 7; i++) { - deleteKey(testVolumeName, testBucketName, keyName + i); + deleteKey(volumeName, bucketName, keyName + i); } // Create Snapshot4 String snap4 = uniqueObjectName("snap"); - writeClient.createSnapshot(testVolumeName, testBucketName, snap4); - createAndCommitKey(testVolumeName, testBucketName, uniqueObjectName("key"), 3); + writeClient.createSnapshot(volumeName, bucketName, snap4); + createAndCommitKey(volumeName, bucketName, uniqueObjectName("key"), 3); long prevKdsRunCount = getRunCount(); keyDeletingService.resume(); @@ -589,7 +468,6 @@ void testSnapshotExclusiveSize() throws Exception { .put(snap3, 2000L) .put(snap4, 0L) .build(); - System.out.println(expectedSize); // Let KeyDeletingService to run for some iterations GenericTestUtils.waitFor( @@ -602,10 +480,8 @@ void testSnapshotExclusiveSize() throws Exception { while (iterator.hasNext()) { Table.KeyValue snapshotEntry = iterator.next(); String snapshotName = snapshotEntry.getValue().getName(); - Long expected = expectedSize.getOrDefault(snapshotName, 0L); assertNotNull(expected); - System.out.println(snapshotName); assertEquals(expected, snapshotEntry.getValue().getExclusiveSize()); // Since for the test we are using RATIS/THREE assertEquals(expected * 3, snapshotEntry.getValue().getExclusiveReplicatedSize()); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java index 014865f919f..eeb6f2c71ea 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java @@ -118,8 +118,6 @@ void setup(@TempDir Path tempDir) throws Exception { conf.setTimeDuration(OZONE_OM_LEASE_HARD_LIMIT, EXPIRE_THRESHOLD_MS, TimeUnit.MILLISECONDS); conf.set(OzoneConfigKeys.OZONE_OM_LEASE_SOFT_LIMIT, "0s"); - conf.setBoolean(OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, true); - conf.setBoolean("ozone.client.hbase.enhancements.allowed", true); conf.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true); conf.setQuietMode(false); OmTestManagers omTestManagers = new OmTestManagers(conf); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDeletingService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDeletingService.java index e04891da83a..3948f4fab80 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDeletingService.java @@ -20,8 +20,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.utils.TransactionInfo; -import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.apache.hadoop.ozone.om.KeyManagerImpl; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OmSnapshotManager; @@ -57,26 +56,25 @@ public class TestSnapshotDeletingService { private SnapshotChainManager chainManager; @Mock private OmMetadataManagerImpl omMetadataManager; + @Mock + private ScmBlockLocationProtocol scmClient; private final OzoneConfiguration conf = new OzoneConfiguration();; private final long sdsRunInterval = Duration.ofMillis(1000).toMillis(); private final long sdsServiceTimeout = Duration.ofSeconds(10).toMillis(); - private static Stream testCasesForIgnoreSnapshotGc() throws IOException { - SnapshotInfo flushedSnapshot = SnapshotInfo.newBuilder().setSstFiltered(true) - .setLastTransactionInfo(TransactionInfo.valueOf(1, 1).toByteString()) - .setName("snap1").build(); - SnapshotInfo unFlushedSnapshot = SnapshotInfo.newBuilder().setSstFiltered(false).setName("snap1") - .setLastTransactionInfo(TransactionInfo.valueOf(0, 0).toByteString()).build(); + private static Stream testCasesForIgnoreSnapshotGc() { + SnapshotInfo filteredSnapshot = SnapshotInfo.newBuilder().setSstFiltered(true).setName("snap1").build(); + SnapshotInfo unFilteredSnapshot = SnapshotInfo.newBuilder().setSstFiltered(false).setName("snap1").build(); return Stream.of( - Arguments.of(flushedSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED, false), - Arguments.of(flushedSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE, true), - Arguments.of(unFlushedSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED, false), - Arguments.of(unFlushedSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE, true), - Arguments.of(flushedSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED, false), - Arguments.of(unFlushedSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED, false), - Arguments.of(unFlushedSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE, true), - Arguments.of(flushedSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE, true)); + Arguments.of(filteredSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED, false), + Arguments.of(filteredSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE, true), + Arguments.of(unFilteredSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED, false), + Arguments.of(unFilteredSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE, true), + Arguments.of(filteredSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED, false), + Arguments.of(unFilteredSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED, false), + Arguments.of(unFilteredSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE, true), + Arguments.of(filteredSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE, true)); } @ParameterizedTest @@ -89,15 +87,9 @@ public void testProcessSnapshotLogicInSDS(SnapshotInfo snapshotInfo, Mockito.when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); Mockito.when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); Mockito.when(ozoneManager.getConfiguration()).thenReturn(conf); - if (status == SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED) { - Table transactionInfoTable = Mockito.mock(Table.class); - Mockito.when(omMetadataManager.getTransactionInfoTable()).thenReturn(transactionInfoTable); - Mockito.when(transactionInfoTable.getSkipCache(Mockito.anyString())) - .thenReturn(TransactionInfo.valueOf(1, 1)); - } SnapshotDeletingService snapshotDeletingService = - new SnapshotDeletingService(sdsRunInterval, sdsServiceTimeout, ozoneManager); + new SnapshotDeletingService(sdsRunInterval, sdsServiceTimeout, ozoneManager, scmClient); snapshotInfo.setSnapshotStatus(status); assertEquals(expectedOutcome, snapshotDeletingService.shouldIgnoreSnapshot(snapshotInfo)); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotChain.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotChain.java index f49bfc33976..c5ae809718e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotChain.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotChain.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.om.snapshot; import com.google.common.collect.ImmutableMap; -import org.apache.commons.compress.utils.Lists; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -39,7 +38,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -170,7 +168,6 @@ public void testAddSnapshot() throws Exception { } assertEquals(snapshotID3, chainManager.getLatestGlobalSnapshotId()); - assertEquals(snapshotID1, chainManager.getOldestGlobalSnapshotId()); assertEquals(snapshotID3, chainManager.getLatestPathSnapshotId( String.join("/", "vol1", "bucket1"))); @@ -288,7 +285,6 @@ public void testChainFromLoadFromTable(boolean increasingTIme) assertFalse(chainManager.isSnapshotChainCorrupted()); // check if snapshots loaded correctly from snapshotInfoTable assertEquals(snapshotID2, chainManager.getLatestGlobalSnapshotId()); - assertEquals(snapshotID1, chainManager.getOldestGlobalSnapshotId()); assertEquals(snapshotID2, chainManager.nextGlobalSnapshot(snapshotID1)); assertEquals(snapshotID1, chainManager.previousPathSnapshot(String .join("/", "vol1", "bucket1"), snapshotID2)); @@ -309,34 +305,6 @@ public void testChainFromLoadFromTable(boolean increasingTIme) () -> chainManager.nextGlobalSnapshot(snapshotID1)); } - @ParameterizedTest - @ValueSource(ints = {0, 1, 2, 5, 10}) - public void testSnapshotChainIterator(int numberOfSnapshots) throws IOException { - Table snapshotInfo = omMetadataManager.getSnapshotInfoTable(); - List snapshotInfoList = new ArrayList<>(); - - UUID prevSnapshotID = null; - long time = System.currentTimeMillis(); - for (int i = 0; i < numberOfSnapshots; i++) { - UUID snapshotID = UUID.randomUUID(); - SnapshotInfo snapInfo = createSnapshotInfo(snapshotID, prevSnapshotID, - prevSnapshotID, time++); - snapshotInfo.put(snapshotID.toString(), snapInfo); - prevSnapshotID = snapshotID; - snapshotInfoList.add(snapInfo); - } - chainManager = new SnapshotChainManager(omMetadataManager); - assertFalse(chainManager.isSnapshotChainCorrupted()); - List reverseChain = Lists.newArrayList(chainManager.iterator(true)); - Collections.reverse(reverseChain); - List forwardChain = Lists.newArrayList(chainManager.iterator(false)); - List expectedChain = snapshotInfoList.stream().map(SnapshotInfo::getSnapshotId).collect(Collectors.toList()); - assertEquals(expectedChain, reverseChain); - assertEquals(expectedChain, forwardChain); - assertEquals(forwardChain, reverseChain); - - } - private static Stream invalidSnapshotChain() { List nodes = IntStream.range(0, 5) .mapToObj(i -> UUID.randomUUID()) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java index 0f2ab615066..d07372c4fc6 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java @@ -364,6 +364,7 @@ public void init() throws RocksDBException, IOException, ExecutionException { omSnapshotManager = mock(OmSnapshotManager.class); when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); + when(omSnapshotManager.isSnapshotStatus(any(), any())).thenReturn(true); SnapshotCache snapshotCache = new SnapshotCache(mockCacheLoader(), 10, omMetrics, 0); when(omSnapshotManager.getActiveSnapshot(anyString(), anyString(), anyString())) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotInfo.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotInfo.java index 29e0115861f..dc00433e179 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotInfo.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotInfo.java @@ -19,18 +19,12 @@ package org.apache.hadoop.ozone.om.snapshot; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; -import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus; import org.apache.hadoop.util.Time; -import org.apache.ratis.server.protocol.TermIndex; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; @@ -41,7 +35,6 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; /** @@ -121,44 +114,4 @@ public void testSnapshotSSTFilteredFlag() throws Exception { snapshotInfo.put(EXPECTED_SNAPSHOT_KEY, info); assertTrue(snapshotInfo.get(EXPECTED_SNAPSHOT_KEY).isSstFiltered()); } - - @Test - public void testLastTransactionInfo() throws Exception { - Table snapshotInfo = - omMetadataManager.getSnapshotInfoTable(); - SnapshotInfo info = createSnapshotInfo(); - snapshotInfo.put(EXPECTED_SNAPSHOT_KEY, info); - assertNull(snapshotInfo.get(EXPECTED_SNAPSHOT_KEY).getLastTransactionInfo()); - // checking if true value is returned when snapshot is null. - assertTrue(OmSnapshotManager.areSnapshotChangesFlushedToDB(omMetadataManager, (SnapshotInfo)null)); - omMetadataManager.getTransactionInfoTable().put(OzoneConsts.TRANSACTION_INFO_KEY, TransactionInfo.valueOf(0, 0)); - // Checking if changes have been flushed when lastTransactionInfo is null - assertTrue(OmSnapshotManager.areSnapshotChangesFlushedToDB(omMetadataManager, info)); - TermIndex termIndex = TermIndex.valueOf(1, 1); - info.setLastTransactionInfo(TransactionInfo.valueOf(termIndex).toByteString()); - // Checking if changes to snapshot object has been updated but not updated on cache or disk. - assertTrue(OmSnapshotManager.areSnapshotChangesFlushedToDB(omMetadataManager, EXPECTED_SNAPSHOT_KEY)); - snapshotInfo.addCacheEntry(new CacheKey<>(EXPECTED_SNAPSHOT_KEY), CacheValue.get(termIndex.getIndex(), info)); - - assertEquals(snapshotInfo.get(EXPECTED_SNAPSHOT_KEY).getLastTransactionInfo(), info.getLastTransactionInfo()); - - // Checking if changes have not been flushed when snapshot last transaction info is behind OmTransactionTable value. - assertFalse(OmSnapshotManager.areSnapshotChangesFlushedToDB(omMetadataManager, EXPECTED_SNAPSHOT_KEY)); - omMetadataManager.getTransactionInfoTable().addCacheEntry(new CacheKey<>(OzoneConsts.TRANSACTION_INFO_KEY), - CacheValue.get(termIndex.getIndex(), TransactionInfo.valueOf(1, 1))); - assertFalse(OmSnapshotManager.areSnapshotChangesFlushedToDB(omMetadataManager, EXPECTED_SNAPSHOT_KEY)); - - // Checking changes are flushed when transaction is equal. - omMetadataManager.getTransactionInfoTable().put(OzoneConsts.TRANSACTION_INFO_KEY, - TransactionInfo.valueOf(1, 1)); - - - assertTrue(OmSnapshotManager.areSnapshotChangesFlushedToDB(omMetadataManager, EXPECTED_SNAPSHOT_KEY)); - // Checking changes are flushed when transactionIndex is greater . - omMetadataManager.getTransactionInfoTable().put(OzoneConsts.TRANSACTION_INFO_KEY, TransactionInfo.valueOf(1, 2)); - assertTrue(OmSnapshotManager.areSnapshotChangesFlushedToDB(omMetadataManager, EXPECTED_SNAPSHOT_KEY)); - // Checking changes are flushed when both term & transactionIndex is greater. - omMetadataManager.getTransactionInfoTable().put(OzoneConsts.TRANSACTION_INFO_KEY, TransactionInfo.valueOf(2, 2)); - assertTrue(OmSnapshotManager.areSnapshotChangesFlushedToDB(omMetadataManager, EXPECTED_SNAPSHOT_KEY)); - } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotRequestAndResponse.java deleted file mode 100644 index e60e23de22a..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotRequestAndResponse.java +++ /dev/null @@ -1,255 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.ozone.om.snapshot; - -import org.apache.commons.lang3.tuple.Pair; -import org.apache.hadoop.hdds.client.ReplicationConfig; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.utils.db.BatchOperation; -import org.apache.hadoop.hdds.utils.db.RDBStore; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.audit.AuditLogger; -import org.apache.hadoop.ozone.audit.AuditMessage; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.OmSnapshotManager; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; -import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; -import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; -import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotCreateRequest; -import org.apache.hadoop.ozone.om.request.snapshot.TestOMSnapshotCreateRequest; -import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotCreateResponse; -import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.io.TempDir; - -import java.io.File; -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.ArrayList; -import java.util.List; -import java.util.UUID; -import java.util.stream.Collectors; -import java.util.stream.IntStream; - -import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.createOmKeyInfo; -import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.framework; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -/** - * Base class to test snapshot functionalities. - */ -public class TestSnapshotRequestAndResponse { - @TempDir - private File testDir; - - private OzoneManager ozoneManager; - private OMMetrics omMetrics; - private OmMetadataManagerImpl omMetadataManager; - private BatchOperation batchOperation; - private OmSnapshotManager omSnapshotManager; - - private String volumeName; - private String bucketName; - private boolean isAdmin; - - public BatchOperation getBatchOperation() { - return batchOperation; - } - - public String getBucketName() { - return bucketName; - } - - public boolean isAdmin() { - return isAdmin; - } - - public OmMetadataManagerImpl getOmMetadataManager() { - return omMetadataManager; - } - - public OMMetrics getOmMetrics() { - return omMetrics; - } - - public OmSnapshotManager getOmSnapshotManager() { - return omSnapshotManager; - } - - public OzoneManager getOzoneManager() { - return ozoneManager; - } - - public File getTestDir() { - return testDir; - } - - public String getVolumeName() { - return volumeName; - } - - protected TestSnapshotRequestAndResponse() { - this.isAdmin = false; - } - - protected TestSnapshotRequestAndResponse(boolean isAdmin) { - this.isAdmin = isAdmin; - } - - @BeforeEach - public void baseSetup() throws Exception { - ozoneManager = mock(OzoneManager.class); - omMetrics = OMMetrics.create(); - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - testDir.getAbsolutePath()); - ozoneConfiguration.set(OzoneConfigKeys.OZONE_METADATA_DIRS, - testDir.getAbsolutePath()); - omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, - ozoneManager); - when(ozoneManager.getConfiguration()).thenReturn(ozoneConfiguration); - when(ozoneManager.getMetrics()).thenReturn(omMetrics); - when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); - when(ozoneManager.isRatisEnabled()).thenReturn(true); - when(ozoneManager.isFilesystemSnapshotEnabled()).thenReturn(true); - when(ozoneManager.isAdmin(any())).thenReturn(isAdmin); - when(ozoneManager.isOwner(any(), any())).thenReturn(false); - when(ozoneManager.getBucketOwner(any(), any(), - any(), any())).thenReturn("dummyBucketOwner"); - IAccessAuthorizer accessAuthorizer = mock(IAccessAuthorizer.class); - when(ozoneManager.getAccessAuthorizer()).thenReturn(accessAuthorizer); - when(accessAuthorizer.isNative()).thenReturn(false); - OMLayoutVersionManager lvm = mock(OMLayoutVersionManager.class); - when(lvm.isAllowed(anyString())).thenReturn(true); - when(ozoneManager.getVersionManager()).thenReturn(lvm); - AuditLogger auditLogger = mock(AuditLogger.class); - when(ozoneManager.getAuditLogger()).thenReturn(auditLogger); - doNothing().when(auditLogger).logWrite(any(AuditMessage.class)); - batchOperation = omMetadataManager.getStore().initBatchOperation(); - - volumeName = UUID.randomUUID().toString(); - bucketName = UUID.randomUUID().toString(); - OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); - omSnapshotManager = new OmSnapshotManager(ozoneManager); - when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); - } - - @AfterEach - public void stop() { - omMetrics.unRegister(); - framework().clearInlineMocks(); - if (batchOperation != null) { - batchOperation.close(); - } - } - - protected Path createSnapshotCheckpoint(String volume, String bucket, String snapshotName) throws Exception { - OzoneManagerProtocolProtos.OMRequest omRequest = OMRequestTestUtils - .createSnapshotRequest(volume, bucket, snapshotName); - // Pre-Execute OMSnapshotCreateRequest. - OMSnapshotCreateRequest omSnapshotCreateRequest = - TestOMSnapshotCreateRequest.doPreExecute(omRequest, ozoneManager); - - // validateAndUpdateCache OMSnapshotCreateResponse. - OMSnapshotCreateResponse omClientResponse = (OMSnapshotCreateResponse) - omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 1); - // Add to batch and commit to DB. - try (BatchOperation batchOperation = omMetadataManager.getStore().initBatchOperation()) { - omClientResponse.addToDBBatch(omMetadataManager, batchOperation); - omMetadataManager.getStore().commitBatchOperation(batchOperation); - } - - String key = SnapshotInfo.getTableKey(volume, bucket, snapshotName); - SnapshotInfo snapshotInfo = - omMetadataManager.getSnapshotInfoTable().get(key); - assertNotNull(snapshotInfo); - - RDBStore store = (RDBStore) omMetadataManager.getStore(); - String checkpointPrefix = store.getDbLocation().getName(); - Path snapshotDirPath = Paths.get(store.getSnapshotsParentDir(), - checkpointPrefix + snapshotInfo.getCheckpointDir()); - // Check the DB is still there - assertTrue(Files.exists(snapshotDirPath)); - return snapshotDirPath; - } - - protected List>> getDeletedKeys(String volume, String bucket, - int startRange, int endRange, - int numberOfKeys, - int minVersion) { - return IntStream.range(startRange, endRange).boxed() - .map(i -> Pair.of(omMetadataManager.getOzoneDeletePathKey(i, - omMetadataManager.getOzoneKey(volume, bucket, "key" + String.format("%010d", i))), - IntStream.range(0, numberOfKeys).boxed().map(cnt -> createOmKeyInfo(volume, bucket, "key" + i, - ReplicationConfig.getDefault(ozoneManager.getConfiguration()), - new OmKeyLocationInfoGroup(minVersion + cnt, new ArrayList<>(), false)) - .setCreationTime(0).setModificationTime(0).build()) - .collect(Collectors.toList()))) - .collect(Collectors.toList()); - } - - protected List> getRenameKeys(String volume, String bucket, - int startRange, int endRange, - String renameKeyPrefix) { - return IntStream.range(startRange, endRange).boxed() - .map(i -> { - try { - return Pair.of(omMetadataManager.getRenameKey(volume, bucket, i), - omMetadataManager.getOzoneKeyFSO(volume, bucket, renameKeyPrefix + i)); - } catch (IOException e) { - throw new RuntimeException(e); - } - }).collect(Collectors.toList()); - } - - protected List>> getDeletedDirKeys(String volume, String bucket, - int startRange, int endRange, int numberOfKeys) { - return IntStream.range(startRange, endRange).boxed() - .map(i -> { - try { - return Pair.of(omMetadataManager.getOzoneDeletePathKey(i, - omMetadataManager.getOzoneKeyFSO(volume, bucket, "1/key" + i)), - IntStream.range(0, numberOfKeys).boxed().map(cnt -> createOmKeyInfo(volume, bucket, "key" + i, - ReplicationConfig.getDefault(ozoneManager.getConfiguration())).build()) - .collect(Collectors.toList())); - } catch (IOException e) { - throw new RuntimeException(e); - } - }) - .collect(Collectors.toList()); - } - -} diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java index ed5574af32b..f25d9011475 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java @@ -147,6 +147,9 @@ public void initialize(URI name, Configuration conf) throws IOException { OzoneConfigKeys.OZONE_FS_DATASTREAM_AUTO_THRESHOLD, OzoneConfigKeys.OZONE_FS_DATASTREAM_AUTO_THRESHOLD_DEFAULT, StorageUnit.BYTES); + hsyncEnabled = conf.getBoolean( + OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, + OZONE_FS_HSYNC_ENABLED_DEFAULT); setConf(conf); Preconditions.checkNotNull(name.getScheme(), "No scheme provided in %s", name); @@ -194,8 +197,6 @@ public void initialize(URI name, Configuration conf) throws IOException { LOG.trace("Ozone URI for ozfs initialization is {}", uri); ConfigurationSource source = getConfSource(); - this.hsyncEnabled = OzoneFSUtils.canEnableHsync(source, true); - LOG.debug("hsyncEnabled = {}", hsyncEnabled); this.adapter = createAdapter(source, bucketStr, volumeStr, omHost, omPort); diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java index 31889ed2a58..14c297d9f47 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java @@ -260,11 +260,8 @@ private void initDefaultFsBucketLayout(OzoneConfiguration conf) } } - OzoneBucket getBucket(OFSPath ofsPath, boolean createIfNotExist) - throws IOException { - - return getBucket(ofsPath.getVolumeName(), ofsPath.getBucketName(), - createIfNotExist); + OzoneBucket getBucket(OFSPath ofsPath, boolean createIfNotExist)throws IOException { + return getBucket(ofsPath.getVolumeName(), ofsPath.getBucketName(), createIfNotExist); } /** @@ -276,8 +273,7 @@ OzoneBucket getBucket(OFSPath ofsPath, boolean createIfNotExist) * @throws IOException Exceptions other than OMException with result code * VOLUME_NOT_FOUND or BUCKET_NOT_FOUND. */ - private OzoneBucket getBucket(String volumeStr, String bucketStr, - boolean createIfNotExist) throws IOException { + private OzoneBucket getBucket(String volumeStr, String bucketStr, boolean createIfNotExist) throws IOException { Preconditions.checkNotNull(volumeStr); Preconditions.checkNotNull(bucketStr); @@ -287,7 +283,7 @@ private OzoneBucket getBucket(String volumeStr, String bucketStr, "getBucket: Invalid argument: given bucket string is empty."); } - OzoneBucket bucket; + OzoneBucket bucket = null; try { bucket = proxy.getBucketDetails(volumeStr, bucketStr); @@ -299,44 +295,8 @@ private OzoneBucket getBucket(String volumeStr, String bucketStr, OzoneFSUtils.validateBucketLayout(bucket.getName(), resolvedBucketLayout); } catch (OMException ex) { if (createIfNotExist) { - // getBucketDetails can throw VOLUME_NOT_FOUND when the parent volume - // doesn't exist and ACL is enabled; it can only throw BUCKET_NOT_FOUND - // when ACL is disabled. Both exceptions need to be handled. - switch (ex.getResult()) { - case VOLUME_NOT_FOUND: - // Create the volume first when the volume doesn't exist - try { - objectStore.createVolume(volumeStr); - } catch (OMException newVolEx) { - // Ignore the case where another client created the volume - if (!newVolEx.getResult().equals(VOLUME_ALREADY_EXISTS)) { - throw newVolEx; - } - } - // No break here. Proceed to create the bucket - case BUCKET_NOT_FOUND: - // When BUCKET_NOT_FOUND is thrown, we expect the parent volume - // exists, so that we don't call create volume and incur - // unnecessary ACL checks which could lead to unwanted behavior. - OzoneVolume volume = proxy.getVolumeDetails(volumeStr); - // Create the bucket - try { - // Buckets created by OFS should be in FSO layout - volume.createBucket(bucketStr, - BucketArgs.newBuilder().setBucketLayout( - this.defaultOFSBucketLayout).build()); - } catch (OMException newBucEx) { - // Ignore the case where another client created the bucket - if (!newBucEx.getResult().equals(BUCKET_ALREADY_EXISTS)) { - throw newBucEx; - } - } - break; - default: - // Throw unhandled exception - throw ex; - } - // Try get bucket again + handleVolumeOrBucketCreationOnException(volumeStr, bucketStr, ex); + // Try to get the bucket again bucket = proxy.getBucketDetails(volumeStr, bucketStr); } else { throw ex; @@ -346,6 +306,41 @@ private OzoneBucket getBucket(String volumeStr, String bucketStr, return bucket; } + private void handleVolumeOrBucketCreationOnException(String volumeStr, String bucketStr, OMException ex) + throws IOException { + // OM can throw VOLUME_NOT_FOUND when the parent volume does not exist, and in this case we may create the volume, + // OM can also throw BUCKET_NOT_FOUND when the parent bucket does not exist, and so we also may create the bucket. + // This method creates the volume and the bucket when an exception marks that they don't exist. + switch (ex.getResult()) { + case VOLUME_NOT_FOUND: + // Create the volume first when the volume doesn't exist + try { + objectStore.createVolume(volumeStr); + } catch (OMException newVolEx) { + // Ignore the case where another client created the volume + if (!newVolEx.getResult().equals(VOLUME_ALREADY_EXISTS)) { + throw newVolEx; + } + } + // No break here. Proceed to create the bucket + case BUCKET_NOT_FOUND: + // Create the bucket + try { + // Buckets created by OFS should be in FSO layout + BucketArgs defaultBucketArgs = BucketArgs.newBuilder().setBucketLayout(this.defaultOFSBucketLayout).build(); + proxy.createBucket(volumeStr, bucketStr, defaultBucketArgs); + } catch (OMException newBucEx) { + // Ignore the case where another client created the bucket + if (!newBucEx.getResult().equals(BUCKET_ALREADY_EXISTS)) { + throw newBucEx; + } + } + break; + default: + throw ex; + } + } + /** * This API returns the value what is configured at client side only. It could * differ from the server side default values. If no replication config @@ -515,30 +510,40 @@ public boolean createDirectory(String pathStr) throws IOException { LOG.trace("creating dir for path: {}", pathStr); incrementCounter(Statistic.OBJECTS_CREATED, 1); OFSPath ofsPath = new OFSPath(pathStr, config); - if (ofsPath.getVolumeName().isEmpty()) { + + String volumeName = ofsPath.getVolumeName(); + if (volumeName.isEmpty()) { // Volume name unspecified, invalid param, return failure return false; } - if (ofsPath.getBucketName().isEmpty()) { - // Create volume only - objectStore.createVolume(ofsPath.getVolumeName()); + + String bucketName = ofsPath.getBucketName(); + if (bucketName.isEmpty()) { + // Create volume only as path only contains one element the volume. + objectStore.createVolume(volumeName); return true; } + String keyStr = ofsPath.getKeyName(); try { - OzoneBucket bucket = getBucket(ofsPath, true); - // Empty keyStr here indicates only volume and bucket is - // given in pathStr, so getBucket above should handle the creation - // of volume and bucket. We won't feed empty keyStr to - // bucket.createDirectory as that would be a NPE. - if (keyStr != null && keyStr.length() > 0) { - bucket.createDirectory(keyStr); + if (keyStr == null || keyStr.isEmpty()) { + // This is the case when the given path only contains volume and bucket. + // If the bucket does not exist, then this will throw and bucket will be created + // in handleVolumeOrBucketCreationOnException later. + proxy.getBucketDetails(volumeName, bucketName); + } else { + proxy.createDirectory(volumeName, bucketName, keyStr); } } catch (OMException e) { if (e.getResult() == OMException.ResultCodes.FILE_ALREADY_EXISTS) { throw new FileAlreadyExistsException(e.getMessage()); } - throw e; + // Create volume and bucket if they do not exist, and retry key creation. + // This call will throw an exception if it fails, or the exception is different than it handles. + handleVolumeOrBucketCreationOnException(volumeName, bucketName, e); + if (keyStr != null && !keyStr.isEmpty()) { + proxy.createDirectory(volumeName, bucketName, keyStr); + } } return true; } @@ -714,7 +719,7 @@ private FileStatusAdapter getFileStatusForKeyOrSnapshot(OFSPath ofsPath, URI uri * * @param allUsers return trashRoots of all users if true, used by emptier * @param fs Pointer to the current OFS FileSystem - * @return {@code Collection} + * @return */ public Collection getTrashRoots(boolean allUsers, BasicRootedOzoneFileSystem fs) { diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java index 3e0a3730627..eb346b5edc5 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java @@ -153,6 +153,9 @@ public void initialize(URI name, Configuration conf) throws IOException { OzoneConfigKeys.OZONE_FS_DATASTREAM_AUTO_THRESHOLD, OzoneConfigKeys.OZONE_FS_DATASTREAM_AUTO_THRESHOLD_DEFAULT, StorageUnit.BYTES); + hsyncEnabled = conf.getBoolean( + OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, + OZONE_FS_HSYNC_ENABLED_DEFAULT); setConf(conf); Preconditions.checkNotNull(name.getScheme(), "No scheme provided in %s", name); @@ -189,8 +192,6 @@ public void initialize(URI name, Configuration conf) throws IOException { LOG.trace("Ozone URI for OFS initialization is " + uri); ConfigurationSource source = getConfSource(); - this.hsyncEnabled = OzoneFSUtils.canEnableHsync(source, true); - LOG.debug("hsyncEnabled = {}", hsyncEnabled); this.adapter = createAdapter(source, omHostOrServiceId, omPort); this.adapterImpl = (BasicRootedOzoneClientAdapterImpl) this.adapter; diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/FileStatusAdapter.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/FileStatusAdapter.java index 6354ee0eebe..f92f8d95704 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/FileStatusAdapter.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/FileStatusAdapter.java @@ -33,7 +33,7 @@ * information can be converted to this class, and this class can be used to * create hadoop 2.x FileStatus. *

    - * FileStatus (Hadoop 3.x) --> FileStatusAdapter --> FileStatus (Hadoop 2.x) + * FileStatus (Hadoop 3.x) --> FileStatusAdapter --> FileStatus (Hadoop 2.x) */ public final class FileStatusAdapter { diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java index f873b43ae98..4dc70bfa569 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java @@ -193,6 +193,7 @@ public int read(long position, ByteBuffer buf) throws IOException { /** * @param buf the ByteBuffer to receive the results of the read operation. * @param position offset + * @return void * @throws IOException if there is some error performing the read * @throws EOFException if end of file reached before reading fully */ diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml index d91d488c434..e262895664f 100644 --- a/hadoop-ozone/pom.xml +++ b/hadoop-ozone/pom.xml @@ -342,29 +342,6 @@ - - org.apache.maven.plugins - maven-remote-resources-plugin - - - org.apache.ozone:ozone-dev-support:${ozone.version} - - - - - org.apache.ozone - ozone-dev-support - ${ozone.version} - - - - - - process - - - - diff --git a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ContainerSchemaDefinition.java b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ContainerSchemaDefinition.java index 0882de3bf4f..4d62ca886cd 100644 --- a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ContainerSchemaDefinition.java +++ b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ContainerSchemaDefinition.java @@ -31,7 +31,6 @@ import javax.sql.DataSource; import java.sql.Connection; import java.sql.SQLException; -import java.util.Arrays; /** * Class used to create tables that are required for tracking containers. @@ -52,7 +51,7 @@ public enum UnHealthyContainerStates { UNDER_REPLICATED, OVER_REPLICATED, MIS_REPLICATED, - ALL_REPLICAS_BAD, + ALL_REPLICAS_UNHEALTHY, NEGATIVE_SIZE // Added new state to track containers with negative sizes } @@ -70,39 +69,11 @@ public enum UnHealthyContainerStates { public void initializeSchema() throws SQLException { Connection conn = dataSource.getConnection(); dslContext = DSL.using(conn); - - if (TABLE_EXISTS_CHECK.test(conn, UNHEALTHY_CONTAINERS_TABLE_NAME)) { - // Drop the existing constraint if it exists - String constraintName = UNHEALTHY_CONTAINERS_TABLE_NAME + "ck1"; - dslContext.alterTable(UNHEALTHY_CONTAINERS_TABLE_NAME) - .dropConstraint(constraintName) - .execute(); - - // Add the updated constraint with all enum states - addUpdatedConstraint(); - } else { - // Create the table if it does not exist + if (!TABLE_EXISTS_CHECK.test(conn, UNHEALTHY_CONTAINERS_TABLE_NAME)) { createUnhealthyContainersTable(); } } - /** - * Add the updated constraint to the table. - */ - private void addUpdatedConstraint() { - // Get all enum values as a list of strings - String[] enumStates = Arrays.stream(UnHealthyContainerStates.values()) - .map(Enum::name) - .toArray(String[]::new); - - // Alter the table to add the updated constraint - dslContext.alterTable(UNHEALTHY_CONTAINERS_TABLE_NAME) - .add(DSL.constraint(UNHEALTHY_CONTAINERS_TABLE_NAME + "ck1") - .check(field(name("container_state")) - .in(enumStates))) - .execute(); - } - /** * Create the Missing Containers table. */ diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java index 1a2a705fc0f..5c9f6a5f4e1 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java @@ -352,8 +352,7 @@ private static void triggerRebuild(ReconNamespaceSummaryManager reconNamespaceSu * @param url url to call * @param isSpnego is SPNEGO enabled * @return HttpURLConnection instance of the HTTP call. - * @throws IOException While reading the response, - * @throws AuthenticationException + * @throws IOException, AuthenticationException While reading the response. */ public HttpURLConnection makeHttpCall(URLConnectionFactory connectionFactory, String url, boolean isSpnego) @@ -570,6 +569,7 @@ public static boolean isInitializationComplete(ReconOMMetadataManager omMetadata * @param dateFormat * @param timeZone * @return the epoch milliseconds representation of the date. + * @throws ParseException */ public static long convertToEpochMillis(String dateString, String dateFormat, TimeZone timeZone) { String localDateFormat = dateFormat; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/AccessHeatMapEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/AccessHeatMapEndpoint.java index 472cdb62a66..b0a9681c5b8 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/AccessHeatMapEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/AccessHeatMapEndpoint.java @@ -65,14 +65,13 @@ public AccessHeatMapEndpoint(HeatMapServiceImpl heatMapService) { * with volume, buckets under that volume, * then directories, subdirectories and paths * under that bucket. - *

    -   * E.g. -------->>
    +   * E.g. -------->>
        * vol1                           vol2
        * - bucket1                      - bucket2
        * - dir1/dir2/key1               - dir4/dir1/key1
        * - dir1/dir2/key2               - dir4/dir5/key2
        * - dir1/dir3/key1               - dir5/dir3/key1
    -   * 
    + * * @return {@link Response} */ @GET diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java index 33fc4fd96de..86ef6c022d5 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java @@ -157,15 +157,15 @@ public ContainerEndpoint(OzoneStorageContainerManager reconSCM, } /** - * Return {@code org.apache.hadoop.hdds.scm.container} + * Return @{@link org.apache.hadoop.hdds.scm.container} * for the containers starting from the given "prev-key" query param for the * given "limit". The given "prev-key" is skipped from the results returned. * * @param prevKey the containerID after which results are returned. - * start containerID, >=0, + * start containerID, >=0, * start searching at the head if 0. * @param limit max no. of containers to get. - * count must be >= 0 + * count must be >= 0 * Usually the count will be replace with a very big * value instead of being unlimited in case the db is very big. * @return {@link Response} @@ -408,18 +408,13 @@ public Response getUnhealthyContainers( summary = containerHealthSchemaManager.getUnhealthyContainersSummary(); List containers = containerHealthSchemaManager .getUnhealthyContainers(internalState, offset, limit); - - // Filtering out EMPTY_MISSING and NEGATIVE_SIZE containers from the response. - // These container states are not being inserted into the database as they represent - // edge cases that are not critical to track as unhealthy containers. - List filteredContainers = containers.stream() - .filter(container -> !container.getContainerState() - .equals(UnHealthyContainerStates.EMPTY_MISSING.toString()) - && !container.getContainerState() - .equals(UnHealthyContainerStates.NEGATIVE_SIZE.toString())) - .collect(Collectors.toList()); - - for (UnhealthyContainers c : filteredContainers) { + List emptyMissingFiltered = containers.stream() + .filter( + container -> !container.getContainerState() + .equals(UnHealthyContainerStates.EMPTY_MISSING.toString())) + .collect( + Collectors.toList()); + for (UnhealthyContainers c : emptyMissingFiltered) { long containerID = c.getContainerId(); ContainerInfo containerInfo = containerManager.getContainer(ContainerID.valueOf(containerID)); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java index 4620b69fbe3..3f95c04fc91 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java @@ -734,7 +734,7 @@ public Response getDeletedDirectorySummary() { * /volume1/fso-bucket/dir1/dir2/dir3/file1 * Input Request for OBS bucket: * - * {@literal `api/v1/keys/listKeys?startPrefix=/volume1/obs-bucket&limit=2&replicationType=RATIS`} + * `api/v1/keys/listKeys?startPrefix=/volume1/obs-bucket&limit=2&replicationType=RATIS` * Output Response: * * { @@ -832,7 +832,7 @@ public Response getDeletedDirectorySummary() { * } * Input Request for FSO bucket: * - * {@literal `api/v1/keys/listKeys?startPrefix=/volume1/fso-bucket&limit=2&replicationType=RATIS`} + * `api/v1/keys/listKeys?startPrefix=/volume1/fso-bucket&limit=2&replicationType=RATIS` * Output Response: * * { @@ -930,6 +930,7 @@ public Response getDeletedDirectorySummary() { * } * * ******************************************************** + * @throws IOException */ @GET @Path("/listKeys") diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java index 58d2cd31076..9cd6fa33d03 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java @@ -245,15 +245,13 @@ public Map searchOpenKeysInFSO(String startPrefix, * This method transforms a user-provided path (e.g., "volume/bucket/dir1") into * a database-friendly format ("/volumeID/bucketID/ParentId/") by replacing names * with their corresponding IDs. It simplifies database queries for FSO bucket operations. - *
    -   * {@code
    +   *
        * Examples:
        * - Input: "volume/bucket/key" -> Output: "/volumeID/bucketID/parentDirID/key"
        * - Input: "volume/bucket/dir1" -> Output: "/volumeID/bucketID/dir1ID/"
        * - Input: "volume/bucket/dir1/key1" -> Output: "/volumeID/bucketID/dir1ID/key1"
        * - Input: "volume/bucket/dir1/dir2" -> Output: "/volumeID/bucketID/dir2ID/"
    -   * }
    -   * 
    + * * @param prevKeyPrefix The path to be converted. * @return The object path as "/volumeID/bucketID/ParentId/" or an empty string if an error occurs. * @throws IOException If database access fails. diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/TriggerDBSyncEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/TriggerDBSyncEndpoint.java index 3ce4fc7f837..070b7e1ccd4 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/TriggerDBSyncEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/TriggerDBSyncEndpoint.java @@ -32,7 +32,6 @@ */ @Path("/triggerdbsync") @Produces(MediaType.APPLICATION_JSON) -@AdminOnly public class TriggerDBSyncEndpoint { private OzoneManagerServiceProvider ozoneManagerServiceProvider; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java index a2db616ec2f..266caaa2d8e 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java @@ -118,7 +118,7 @@ public static String buildSubpath(String path, String nextLevel) { } /** - * Example: {@literal /vol1/buck1/a/b/c/d/e/file1.txt -> a/b/c/d/e/file1.txt} . + * Example: /vol1/buck1/a/b/c/d/e/file1.txt -> a/b/c/d/e/file1.txt. * @param names parsed request * @return key name */ diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/UnhealthyContainersResponse.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/UnhealthyContainersResponse.java index eaf08d9ca83..ba03ec61f14 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/UnhealthyContainersResponse.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/UnhealthyContainersResponse.java @@ -50,6 +50,12 @@ public class UnhealthyContainersResponse { @JsonProperty("misReplicatedCount") private long misReplicatedCount = 0; + /** + * Total count of containers with negative size. + */ + @JsonProperty("negativeSizeCount") + private long negativeSizeCount = 0; + /** * A collection of unhealthy containers. */ @@ -77,6 +83,9 @@ public void setSummaryCount(String state, long count) { } else if (state.equals( UnHealthyContainerStates.MIS_REPLICATED.toString())) { this.misReplicatedCount = count; + } else if (state.equals( + UnHealthyContainerStates.NEGATIVE_SIZE.toString())) { + this.negativeSizeCount = count; } } @@ -96,6 +105,10 @@ public long getMisReplicatedCount() { return misReplicatedCount; } + public long getNegativeSizeCount() { + return negativeSizeCount; + } + public Collection getContainers() { return containers; } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java index 11af6eaff53..639047d37bd 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java @@ -29,7 +29,6 @@ import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; -import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.PlacementPolicy; @@ -79,8 +78,6 @@ public class ContainerHealthTask extends ReconScmTask { private final ReconContainerMetadataManager reconContainerMetadataManager; private final PlacementPolicy placementPolicy; private final long interval; - private Map> - unhealthyContainerStateStatsMapForTesting; private final Set processedContainers = new HashSet<>(); @@ -188,12 +185,10 @@ private void checkAndProcessContainers( private void logUnhealthyContainerStats( Map> unhealthyContainerStateStatsMap) { - unhealthyContainerStateStatsMapForTesting = - new HashMap<>(unhealthyContainerStateStatsMap); // If any EMPTY_MISSING containers, then it is possible that such // containers got stuck in the closing state which never got // any replicas created on the datanodes. In this case, we log it as - // EMPTY_MISSING in unhealthy container statistics but do not add it to the table. + // EMPTY, and insert as EMPTY_MISSING in UNHEALTHY_CONTAINERS table. unhealthyContainerStateStatsMap.entrySet().forEach(stateEntry -> { UnHealthyContainerStates unhealthyContainerState = stateEntry.getKey(); Map containerStateStatsMap = stateEntry.getValue(); @@ -261,11 +256,6 @@ private void completeProcessingContainer( * completeProcessingContainer is called. This will check to see if any * additional records need to be added to the database. * - * If a container is identified as missing, empty-missing, under-replicated, - * over-replicated or mis-replicated, the method checks with SCM to determine - * if it has been deleted, using {@code containerDeletedInSCM}. If the container is - * deleted in SCM, the corresponding record is removed from Recon. - * * @param currentTime Timestamp to place on all records generated by this run * @param unhealthyContainerStateCountMap * @return Count of records processed @@ -283,11 +273,9 @@ private long processExistingDBRecords(long currentTime, recordCount++; UnhealthyContainersRecord rec = cursor.fetchNext(); try { - // Set the current container if it's not already set if (currentContainer == null) { currentContainer = setCurrentContainer(rec.getContainerId()); } - // If the container ID has changed, finish processing the previous one if (currentContainer.getContainerID() != rec.getContainerId()) { completeProcessingContainer( currentContainer, existingRecords, currentTime, @@ -295,29 +283,24 @@ private long processExistingDBRecords(long currentTime, existingRecords.clear(); currentContainer = setCurrentContainer(rec.getContainerId()); } - - // Unhealthy Containers such as MISSING, UNDER_REPLICATED, - // OVER_REPLICATED, MIS_REPLICATED can have their unhealthy states changed or retained. - if (!ContainerHealthRecords.retainOrUpdateRecord(currentContainer, rec)) { - rec.delete(); + if (ContainerHealthRecords + .retainOrUpdateRecord(currentContainer, rec + )) { + // Check if the missing container is deleted in SCM + if (currentContainer.isMissing() && + containerDeletedInSCM(currentContainer.getContainer())) { + rec.delete(); + } + existingRecords.add(rec.getContainerState()); + if (rec.changed()) { + rec.update(); + } + } else { LOG.info("DELETED existing unhealthy container record...for Container: {}", currentContainer.getContainerID()); - } - - // If the container is marked as MISSING and it's deleted in SCM, remove the record - if (currentContainer.isMissing() && containerDeletedInSCM(currentContainer.getContainer())) { rec.delete(); - LOG.info("DELETED existing unhealthy container record...for Container: {}", - currentContainer.getContainerID()); - } - - existingRecords.add(rec.getContainerState()); - // If the record was changed, update it - if (rec.changed()) { - rec.update(); } } catch (ContainerNotFoundException cnf) { - // If the container is not found, delete the record and reset currentContainer rec.delete(); currentContainer = null; } @@ -343,6 +326,13 @@ private void processContainer(ContainerInfo container, long currentTime, containerReplicas, placementPolicy, reconContainerMetadataManager, conf); + // Handle negative sized containers separately + if (h.getContainer().getUsedBytes() < 0) { + handleNegativeSizedContainers(h, currentTime, + unhealthyContainerStateStatsMap); + return; + } + if (h.isHealthilyReplicated() || h.isDeleted()) { return; } @@ -359,18 +349,6 @@ private void processContainer(ContainerInfo container, long currentTime, } } - /** - * Ensures the container's state in Recon is updated to match its state in SCM. - * - * If SCM reports the container as DELETED, this method attempts to transition - * the container's state in Recon from CLOSED to DELETING, or from DELETING to - * DELETED, based on the current state in Recon. It logs each transition attempt - * and handles any exceptions that may occur. - * - * @param containerInfo the container whose state is being checked and potentially updated. - * @return {@code true} if the container was found to be DELETED in SCM and the - * state transition was attempted in Recon; {@code false} otherwise. - */ private boolean containerDeletedInSCM(ContainerInfo containerInfo) { try { ContainerWithPipeline containerWithPipeline = @@ -380,8 +358,6 @@ private boolean containerDeletedInSCM(ContainerInfo containerInfo) { if (containerInfo.getState() == HddsProtos.LifeCycleState.CLOSED) { containerManager.updateContainerState(containerInfo.containerID(), HddsProtos.LifeCycleEvent.DELETE); - LOG.debug("Successfully changed container {} state from CLOSED to DELETING.", - containerInfo.containerID()); } if (containerInfo.getState() == HddsProtos.LifeCycleState.DELETING && containerManager.getContainerReplicas(containerInfo.containerID()) @@ -389,7 +365,6 @@ private boolean containerDeletedInSCM(ContainerInfo containerInfo) { ) { containerManager.updateContainerState(containerInfo.containerID(), HddsProtos.LifeCycleEvent.CLEANUP); - LOG.info("Successfully Deleted container {} from Recon.", containerInfo.containerID()); } return true; } @@ -405,50 +380,28 @@ private boolean containerDeletedInSCM(ContainerInfo containerInfo) { /** * This method is used to handle containers with negative sizes. It logs an - * error message. + * error message and inserts a record into the UNHEALTHY_CONTAINERS table. * @param containerHealthStatus * @param currentTime * @param unhealthyContainerStateStatsMap */ - private static void handleNegativeSizedContainers( + private void handleNegativeSizedContainers( ContainerHealthStatus containerHealthStatus, long currentTime, Map> unhealthyContainerStateStatsMap) { - // NEGATIVE_SIZE containers are also not inserted into the database. - // This condition usually arises due to corrupted or invalid metadata, where - // the container's size is inaccurately recorded as negative. Since this does not - // represent a typical unhealthy scenario and may not have any meaningful - // impact on system health, such containers are logged for investigation but - // excluded from the UNHEALTHY_CONTAINERS table to maintain data integrity. ContainerInfo container = containerHealthStatus.getContainer(); - LOG.error("Container {} has negative size.", container.getContainerID()); - populateContainerStats(containerHealthStatus, UnHealthyContainerStates.NEGATIVE_SIZE, - unhealthyContainerStateStatsMap); - } - - /** - * This method is used to handle containers that are empty and missing. It logs - * a debug message. - * @param containerHealthStatus - * @param currentTime - * @param unhealthyContainerStateStatsMap - */ - private static void handleEmptyMissingContainers( - ContainerHealthStatus containerHealthStatus, long currentTime, - Map> - unhealthyContainerStateStatsMap) { - // EMPTY_MISSING containers are not inserted into the database. - // These containers typically represent those that were never written to - // or remain in an incomplete state. Tracking such containers as unhealthy - // would not provide valuable insights since they don't pose a risk or issue - // to the system. Instead, they are logged for awareness, but not stored in - // the UNHEALTHY_CONTAINERS table to avoid unnecessary entries. - ContainerInfo container = containerHealthStatus.getContainer(); - LOG.debug("Empty container {} is missing. It will be logged in the " + - "unhealthy container statistics, but no record will be created in the " + - "UNHEALTHY_CONTAINERS table.", container.getContainerID()); - populateContainerStats(containerHealthStatus, EMPTY_MISSING, + LOG.error( + "Container {} has negative size. Please visit Recon's unhealthy " + + "container endpoint for more details.", + container.getContainerID()); + UnhealthyContainers record = + ContainerHealthRecords.recordForState(containerHealthStatus, + UnHealthyContainerStates.NEGATIVE_SIZE, currentTime); + List records = Collections.singletonList(record); + populateContainerStats(containerHealthStatus, + UnHealthyContainerStates.NEGATIVE_SIZE, unhealthyContainerStateStatsMap); + containerHealthSchemaManager.insertUnhealthyContainerRecords(records); } /** @@ -539,21 +492,22 @@ public static List generateUnhealthyRecords( populateContainerStats(container, UnHealthyContainerStates.MISSING, unhealthyContainerStateStatsMap); } else { - handleEmptyMissingContainers(container, time, + + LOG.debug("Empty container {} is missing. Kindly check the " + + "consolidated container stats per UNHEALTHY state logged as " + + "starting with **Container State Stats:**"); + + records.add( + recordForState(container, EMPTY_MISSING, + time)); + populateContainerStats(container, + EMPTY_MISSING, unhealthyContainerStateStatsMap); } // A container cannot have any other records if it is missing so return return records; } - // For Negative sized containers we only log but not insert into DB - if (container.getContainer().getUsedBytes() < 0 - && !recordForStateExists.contains( - UnHealthyContainerStates.NEGATIVE_SIZE.toString())) { - handleNegativeSizedContainers(container, time, - unhealthyContainerStateStatsMap); - } - if (container.isUnderReplicated() && !recordForStateExists.contains( UnHealthyContainerStates.UNDER_REPLICATED.toString())) { @@ -696,23 +650,4 @@ private static void populateContainerStats( (value + container.getContainer().getUsedBytes())); } } - - /** - * Expose the logger for testing purposes. - * - * @return the logger instance - */ - @VisibleForTesting - public Logger getLogger() { - return LOG; - } - - /** - * Expose the unhealthyContainerStateStatsMap for testing purposes. - */ - @VisibleForTesting - public Map> getUnhealthyContainerStateStatsMap() { - return unhealthyContainerStateStatsMapForTesting; - } - } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/ContainerHealthSchemaManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/ContainerHealthSchemaManager.java index 9ccc09d8d03..0c13376fa52 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/ContainerHealthSchemaManager.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/ContainerHealthSchemaManager.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.recon.persistence; import static org.hadoop.ozone.recon.schema.ContainerSchemaDefinition.UnHealthyContainerStates.UNDER_REPLICATED; -import static org.hadoop.ozone.recon.schema.ContainerSchemaDefinition.UnHealthyContainerStates.ALL_REPLICAS_BAD; +import static org.hadoop.ozone.recon.schema.ContainerSchemaDefinition.UnHealthyContainerStates.ALL_REPLICAS_UNHEALTHY; import static org.hadoop.ozone.recon.schema.tables.UnhealthyContainersTable.UNHEALTHY_CONTAINERS; import static org.jooq.impl.DSL.count; @@ -76,7 +76,7 @@ public List getUnhealthyContainers( SelectQuery query = dslContext.selectQuery(); query.addFrom(UNHEALTHY_CONTAINERS); if (state != null) { - if (state.equals(ALL_REPLICAS_BAD)) { + if (state.equals(ALL_REPLICAS_UNHEALTHY)) { query.addConditions(UNHEALTHY_CONTAINERS.CONTAINER_STATE .eq(UNDER_REPLICATED.toString())); query.addConditions(UNHEALTHY_CONTAINERS.ACTUAL_REPLICA_COUNT.eq(0)); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java index 14ae997073c..1fc114eabd7 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java @@ -109,7 +109,7 @@ List listBucketsUnderVolume( /** * Return the OzoneConfiguration instance used by Recon. - * @return OzoneConfiguration + * @return */ OzoneConfiguration getOzoneConfiguration(); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java index c773187c4b1..a7f486ea5ac 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java @@ -155,7 +155,6 @@ public class ReconStorageContainerManagerFacade private final SCMNodeDetails reconNodeDetails; private final SCMHAManager scmhaManager; private final SequenceIdGenerator sequenceIdGen; - private final ContainerHealthTask containerHealthTask; private DBStore dbStore; private ReconNodeManager nodeManager; @@ -273,7 +272,7 @@ public ReconStorageContainerManagerFacade(OzoneConfiguration conf, scmServiceProvider, reconTaskStatusDao, reconTaskConfig); - containerHealthTask = new ContainerHealthTask( + ContainerHealthTask containerHealthTask = new ContainerHealthTask( containerManager, scmServiceProvider, reconTaskStatusDao, containerHealthSchemaManager, containerPlacementPolicy, reconTaskConfig, reconContainerMetadataManager, conf); @@ -742,12 +741,6 @@ public StorageContainerServiceProvider getScmServiceProvider() { public ContainerSizeCountTask getContainerSizeCountTask() { return containerSizeCountTask; } - - @VisibleForTesting - public ContainerHealthTask getContainerHealthTask() { - return containerHealthTask; - } - @VisibleForTesting public ContainerCountBySizeDao getContainerCountBySizeDao() { return containerCountBySizeDao; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ReconContainerMetadataManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ReconContainerMetadataManager.java index 44595a43b79..59957e11624 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ReconContainerMetadataManager.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ReconContainerMetadataManager.java @@ -70,7 +70,7 @@ void batchStoreContainerKeyMapping(BatchOperation batch, Integer count) throws IOException; /** - * Store the containerID -> no. of keys count into the container DB store. + * Store the containerID -> no. of keys count into the container DB store. * * @param containerID the containerID. * @param count count of the keys within the given containerID. @@ -80,7 +80,7 @@ void batchStoreContainerKeyMapping(BatchOperation batch, void storeContainerKeyCount(Long containerID, Long count) throws IOException; /** - * Store the containerID -> no. of keys count into a batch. + * Store the containerID -> no. of keys count into a batch. * * @param batch the batch operation we store into * @param containerID the containerID. @@ -91,7 +91,7 @@ void batchStoreContainerKeyCounts(BatchOperation batch, Long containerID, Long count) throws IOException; /** - * Store the containerID -> ContainerReplicaWithTimestamp mapping to the + * Store the containerID -> ContainerReplicaWithTimestamp mapping to the * container DB store. * * @param containerID the containerID. @@ -159,7 +159,7 @@ Map getContainerReplicaHistory( * Get the stored key prefixes for the given containerId. * * @param containerId the given containerId. - * @return Map of Key prefix -> count. + * @return Map of Key prefix -> count. */ Map getKeyPrefixesForContainer( long containerId) throws IOException; @@ -170,19 +170,19 @@ Map getKeyPrefixesForContainer( * * @param containerId the given containerId. * @param prevKeyPrefix the key prefix to seek to and start scanning. - * @return Map of Key prefix -> count. + * @return Map of Key prefix -> count. */ Map getKeyPrefixesForContainer( long containerId, String prevKeyPrefix) throws IOException; /** * Get a Map of containerID, containerMetadata of Containers only for the - * given limit. If the limit is -1 or any integer < 0, then return all + * given limit. If the limit is -1 or any integer <0, then return all * the containers without any limit. * * @param limit the no. of containers to fetch. * @param prevContainer containerID after which the results are returned. - * @return Map of containerID -> containerMetadata. + * @return Map of containerID -> containerMetadata. * @throws IOException */ Map getContainers(int limit, long prevContainer) @@ -256,7 +256,7 @@ void commitBatchOperation(RDBBatchOperation rdbBatchOperation) * * @param prevKeyPrefix the key prefix to seek to and start scanning. * @param keyVersion the key version to seek - * @return Map of Key prefix -> count. + * @return Map of Key prefix -> count. */ Map getContainerForKeyPrefixes( String prevKeyPrefix, long keyVersion) throws IOException; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerMetadataManagerImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerMetadataManagerImpl.java index 42908a775a4..46b75e45fad 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerMetadataManagerImpl.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerMetadataManagerImpl.java @@ -191,7 +191,7 @@ public void batchStoreContainerKeyMapping(BatchOperation batch, } /** - * Store the containerID -> no. of keys count into the container DB store. + * Store the containerID -> no. of keys count into the container DB store. * * @param containerID the containerID. * @param count count of the keys within the given containerID. @@ -204,7 +204,7 @@ public void storeContainerKeyCount(Long containerID, Long count) } /** - * Store the containerID -> no. of keys count into a batch. + * Store the containerID -> no. of keys count into a batch. * * @param batch the batch we store into * @param containerID the containerID. @@ -219,7 +219,7 @@ public void batchStoreContainerKeyCounts(BatchOperation batch, } /** - * Store the ContainerID -> ContainerReplicaHistory (container first and last + * Store the ContainerID -> ContainerReplicaHistory (container first and last * seen time) mapping to the container DB store. * * @param containerID the containerID. @@ -417,16 +417,16 @@ public Map getKeyPrefixesForContainer( } /** - * Iterate the DB to construct a Map of containerID -> containerMetadata + * Iterate the DB to construct a Map of containerID -> containerMetadata * only for the given limit from the given start key. The start containerID * is skipped from the result. * - * Return all the containers if limit < 0. + * Return all the containers if limit < 0. * * @param limit No of containers to get. * @param prevContainer containerID after which the * list of containers are scanned. - * @return Map of containerID -> containerMetadata. + * @return Map of containerID -> containerMetadata. * @throws IOException on failure. */ @Override diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperTask.java index bf34c9f8930..fd5d8864080 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperTask.java @@ -57,7 +57,7 @@ /** * Class to iterate over the OM DB and populate the Recon container DB with - * the container -> Key reverse mapping. + * the container -> Key reverse mapping. */ public class ContainerKeyMapperTask implements ReconOmTask { @@ -81,8 +81,8 @@ public ContainerKeyMapperTask(ReconContainerMetadataManager } /** - * Read Key -> ContainerId data from OM snapshot DB and write reverse map - * (container, key) -> count to Recon Container DB. + * Read Key -> ContainerId data from OM snapshot DB and write reverse map + * (container, key) -> count to Recon Container DB. */ @Override public Pair reprocess(OMMetadataManager omMetadataManager) { diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmUpdateEventValidator.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmUpdateEventValidator.java index b5a690f5eb4..3c7ce844e9c 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmUpdateEventValidator.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmUpdateEventValidator.java @@ -23,6 +23,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.IOException; /** * OmUpdateEventValidator is a utility class for validating OMDBUpdateEvents @@ -47,6 +48,7 @@ public OmUpdateEventValidator(OMDBDefinition omdbDefinition) { * @param keyType the key type of the event. * @param action the action performed on the event. * @return true if the event is valid, false otherwise. + * @throws IOException if an I/O error occurs during the validation. */ public boolean isValidEvent(String tableName, Object actualValueType, diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconOmTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconOmTask.java index 2092d6a326c..e904334bb31 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconOmTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconOmTask.java @@ -35,14 +35,14 @@ public interface ReconOmTask { /** * Process a set of OM events on tables that the task is listening on. * @param events Set of events to be processed by the task. - * @return Pair of task name -> task success. + * @return Pair of task name -> task success. */ Pair process(OMUpdateEventBatch events); /** * Process a on tables that the task is listening on. * @param omMetadataManager OM Metadata manager instance. - * @return Pair of task name -> task success. + * @return Pair of task name -> task success. */ Pair reprocess(OMMetadataManager omMetadataManager); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskController.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskController.java index 1a514ceb90b..d66a7279cce 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskController.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskController.java @@ -53,7 +53,7 @@ void reInitializeTasks(ReconOMMetadataManager omMetadataManager) /** * Get set of registered tasks. - * @return Map of Task name -> Task. + * @return Map of Task name -> Task. */ Map getRegisteredTasks(); diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json index f1d5dc36703..8cfb23ad685 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json @@ -1480,7 +1480,7 @@ "path": "/dummyVolume/dummyBucket", "size": 200000, "sizeWithReplica": -1, - "subPathCount": 8, + "subPathCount": 5, "subPaths": [ { "path": "/dummyVolume/dummyBucket/dir1", diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json index c2c046f1120..d931a0ed79b 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json @@ -60,11 +60,11 @@ "@testing-library/react": "^12.1.5", "@types/react": "16.8.15", "@types/react-dom": "16.8.4", - "@types/react-router-dom": "^5.3.3", + "@types/react-router-dom": "^4.3.5", "@types/react-select": "^3.0.13", "@typescript-eslint/eslint-plugin": "^5.30.0", "@typescript-eslint/parser": "^5.30.0", - "@vitejs/plugin-react-swc": "^3.5.0", + "@vitejs/plugin-react": "^4.0.0", "eslint": "^7.28.0", "eslint-config-prettier": "^8.10.0", "eslint-plugin-prettier": "^3.4.1", @@ -73,7 +73,8 @@ "msw": "1.3.3", "npm-run-all": "^4.1.5", "prettier": "^2.8.4", - "vite": "4.5.5", + "vite": "4.5.3", + "vite-plugin-svgr": "^4.2.0", "vite-tsconfig-paths": "^3.6.0", "vitest": "^1.6.0" }, diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml index 361705adc44..d1b8844ac62 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml @@ -28,7 +28,7 @@ dependencies: version: 2.5.1 echarts: specifier: ^5.5.0 - version: 5.5.1 + version: 5.5.0 filesize: specifier: ^6.4.0 version: 6.4.0 @@ -63,7 +63,7 @@ dependencies: devDependencies: '@testing-library/jest-dom': specifier: ^6.4.8 - version: 6.5.0 + version: 6.4.8 '@testing-library/react': specifier: ^12.1.5 version: 12.1.5(react-dom@16.14.0)(react@16.14.0) @@ -74,8 +74,8 @@ devDependencies: specifier: 16.8.4 version: 16.8.4 '@types/react-router-dom': - specifier: ^5.3.3 - version: 5.3.3 + specifier: ^4.3.5 + version: 4.3.5 '@types/react-select': specifier: ^3.0.13 version: 3.1.2 @@ -85,9 +85,9 @@ devDependencies: '@typescript-eslint/parser': specifier: ^5.30.0 version: 5.62.0(eslint@7.32.0)(typescript@4.9.5) - '@vitejs/plugin-react-swc': - specifier: ^3.5.0 - version: 3.7.0(vite@4.5.5) + '@vitejs/plugin-react': + specifier: ^4.0.0 + version: 4.3.1(vite@4.5.3) eslint: specifier: ^7.28.0 version: 7.32.0 @@ -99,7 +99,7 @@ devDependencies: version: 3.4.1(eslint-config-prettier@8.10.0)(eslint@7.32.0)(prettier@2.8.8) jsdom: specifier: ^24.1.1 - version: 24.1.3 + version: 24.1.1 json-server: specifier: ^0.15.1 version: 0.15.1 @@ -113,14 +113,17 @@ devDependencies: specifier: ^2.8.4 version: 2.8.8 vite: - specifier: 4.5.5 - version: 4.5.5(less@3.13.1) + specifier: 4.5.3 + version: 4.5.3(less@3.13.1) + vite-plugin-svgr: + specifier: ^4.2.0 + version: 4.2.0(typescript@4.9.5)(vite@4.5.3) vite-tsconfig-paths: specifier: ^3.6.0 - version: 3.6.0(vite@4.5.5) + version: 3.6.0(vite@4.5.3) vitest: specifier: ^1.6.0 - version: 1.6.0(jsdom@24.1.3)(less@3.13.1) + version: 1.6.0(jsdom@24.1.1)(less@3.13.1) packages: @@ -128,6 +131,14 @@ packages: resolution: {integrity: sha512-Ff9+ksdQQB3rMncgqDK78uLznstjyfIf2Arnh22pW8kBpLs6rpKDwgnZT46hin5Hl1WzazzK64DOrhSwYpS7bQ==} dev: true + /@ampproject/remapping@2.3.0: + resolution: {integrity: sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==} + engines: {node: '>=6.0.0'} + dependencies: + '@jridgewell/gen-mapping': 0.3.5 + '@jridgewell/trace-mapping': 0.3.25 + dev: true + /@ant-design/colors@5.1.1: resolution: {integrity: sha512-Txy4KpHrp3q4XZdfgOBqLl+lkQIc3tEvHXOimRN1giX1AEC7mGtyrO9p8iRGJ3FLuVMGa2gNEzQyghVymLttKQ==} dependencies: @@ -153,7 +164,7 @@ packages: dependencies: '@ant-design/colors': 6.0.0 '@ant-design/icons-svg': 4.4.2 - '@babel/runtime': 7.25.6 + '@babel/runtime': 7.24.7 classnames: 2.5.1 lodash: 4.17.21 rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -166,7 +177,7 @@ packages: peerDependencies: react: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.6 + '@babel/runtime': 7.24.7 classnames: 2.5.1 json2mq: 0.2.0 lodash: 4.17.21 @@ -187,35 +198,140 @@ packages: '@babel/highlight': 7.24.7 picocolors: 1.0.1 - /@babel/generator@7.25.6: - resolution: {integrity: sha512-VPC82gr1seXOpkjAAKoLhP50vx4vGNlF4msF64dSFq1P8RfB+QAuJWGHPXXPc8QyfVWwwB/TNNU4+ayZmHNbZw==} + /@babel/compat-data@7.24.7: + resolution: {integrity: sha512-qJzAIcv03PyaWqxRgO4mSU3lihncDT296vnyuE2O8uA4w3UHWI4S3hgeZd1L8W1Bft40w9JxJ2b412iDUFFRhw==} + engines: {node: '>=6.9.0'} + dev: true + + /@babel/core@7.24.7: + resolution: {integrity: sha512-nykK+LEK86ahTkX/3TgauT0ikKoNCfKHEaZYTUVupJdTLzGNvrblu4u6fa7DhZONAltdf8e662t/abY8idrd/g==} engines: {node: '>=6.9.0'} dependencies: - '@babel/types': 7.25.6 + '@ampproject/remapping': 2.3.0 + '@babel/code-frame': 7.24.7 + '@babel/generator': 7.24.7 + '@babel/helper-compilation-targets': 7.24.7 + '@babel/helper-module-transforms': 7.24.7(@babel/core@7.24.7) + '@babel/helpers': 7.24.7 + '@babel/parser': 7.24.7 + '@babel/template': 7.24.7 + '@babel/traverse': 7.24.7 + '@babel/types': 7.24.7 + convert-source-map: 2.0.0 + debug: 4.3.5 + gensync: 1.0.0-beta.2 + json5: 2.2.3 + semver: 6.3.1 + transitivePeerDependencies: + - supports-color + dev: true + + /@babel/generator@7.24.7: + resolution: {integrity: sha512-oipXieGC3i45Y1A41t4tAqpnEZWgB/lC6Ehh6+rOviR5XWpTtMmLN+fGjz9vOiNRt0p6RtO6DtD0pdU3vpqdSA==} + engines: {node: '>=6.9.0'} + dependencies: + '@babel/types': 7.24.7 '@jridgewell/gen-mapping': 0.3.5 '@jridgewell/trace-mapping': 0.3.25 jsesc: 2.5.2 - dev: false + + /@babel/helper-compilation-targets@7.24.7: + resolution: {integrity: sha512-ctSdRHBi20qWOfy27RUb4Fhp07KSJ3sXcuSvTrXrc4aG8NSYDo1ici3Vhg9bg69y5bj0Mr1lh0aeEgTvc12rMg==} + engines: {node: '>=6.9.0'} + dependencies: + '@babel/compat-data': 7.24.7 + '@babel/helper-validator-option': 7.24.7 + browserslist: 4.23.1 + lru-cache: 5.1.1 + semver: 6.3.1 + dev: true + + /@babel/helper-environment-visitor@7.24.7: + resolution: {integrity: sha512-DoiN84+4Gnd0ncbBOM9AZENV4a5ZiL39HYMyZJGZ/AZEykHYdJw0wW3kdcsh9/Kn+BRXHLkkklZ51ecPKmI1CQ==} + engines: {node: '>=6.9.0'} + dependencies: + '@babel/types': 7.24.7 + + /@babel/helper-function-name@7.24.7: + resolution: {integrity: sha512-FyoJTsj/PEUWu1/TYRiXTIHc8lbw+TDYkZuoE43opPS5TrI7MyONBE1oNvfguEXAD9yhQRrVBnXdXzSLQl9XnA==} + engines: {node: '>=6.9.0'} + dependencies: + '@babel/template': 7.24.7 + '@babel/types': 7.24.7 + + /@babel/helper-hoist-variables@7.24.7: + resolution: {integrity: sha512-MJJwhkoGy5c4ehfoRyrJ/owKeMl19U54h27YYftT0o2teQ3FJ3nQUf/I3LlJsX4l3qlw7WRXUmiyajvHXoTubQ==} + engines: {node: '>=6.9.0'} + dependencies: + '@babel/types': 7.24.7 /@babel/helper-module-imports@7.24.7: resolution: {integrity: sha512-8AyH3C+74cgCVVXow/myrynrAGv+nTVg5vKu2nZph9x7RcRwzmh0VFallJuFTZ9mx6u4eSdXZfcOzSqTUm0HCA==} engines: {node: '>=6.9.0'} dependencies: - '@babel/traverse': 7.25.6 - '@babel/types': 7.25.6 + '@babel/traverse': 7.24.7 + '@babel/types': 7.24.7 transitivePeerDependencies: - supports-color - dev: false - /@babel/helper-string-parser@7.24.8: - resolution: {integrity: sha512-pO9KhhRcuUyGnJWwyEgnRJTSIZHiT+vMD0kPeD+so0l7mxkMT19g3pjY9GTnHySck/hDzq+dtW/4VgnMkippsQ==} + /@babel/helper-module-transforms@7.24.7(@babel/core@7.24.7): + resolution: {integrity: sha512-1fuJEwIrp+97rM4RWdO+qrRsZlAeL1lQJoPqtCYWv0NL115XM93hIH4CSRln2w52SqvmY5hqdtauB6QFCDiZNQ==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0 + dependencies: + '@babel/core': 7.24.7 + '@babel/helper-environment-visitor': 7.24.7 + '@babel/helper-module-imports': 7.24.7 + '@babel/helper-simple-access': 7.24.7 + '@babel/helper-split-export-declaration': 7.24.7 + '@babel/helper-validator-identifier': 7.24.7 + transitivePeerDependencies: + - supports-color + dev: true + + /@babel/helper-plugin-utils@7.24.7: + resolution: {integrity: sha512-Rq76wjt7yz9AAc1KnlRKNAi/dMSVWgDRx43FHoJEbcYU6xOWaE2dVPwcdTukJrjxS65GITyfbvEYHvkirZ6uEg==} + engines: {node: '>=6.9.0'} + dev: true + + /@babel/helper-simple-access@7.24.7: + resolution: {integrity: sha512-zBAIvbCMh5Ts+b86r/CjU+4XGYIs+R1j951gxI3KmmxBMhCg4oQMsv6ZXQ64XOm/cvzfU1FmoCyt6+owc5QMYg==} + engines: {node: '>=6.9.0'} + dependencies: + '@babel/traverse': 7.24.7 + '@babel/types': 7.24.7 + transitivePeerDependencies: + - supports-color + dev: true + + /@babel/helper-split-export-declaration@7.24.7: + resolution: {integrity: sha512-oy5V7pD+UvfkEATUKvIjvIAH/xCzfsFVw7ygW2SI6NClZzquT+mwdTfgfdbUiceh6iQO0CHtCPsyze/MZ2YbAA==} + engines: {node: '>=6.9.0'} + dependencies: + '@babel/types': 7.24.7 + + /@babel/helper-string-parser@7.24.7: + resolution: {integrity: sha512-7MbVt6xrwFQbunH2DNQsAP5sTGxfqQtErvBIvIMi6EQnbgUOuVYanvREcmFrOPhoXBrTtjhhP+lW+o5UfK+tDg==} engines: {node: '>=6.9.0'} - dev: false /@babel/helper-validator-identifier@7.24.7: resolution: {integrity: sha512-rR+PBcQ1SMQDDyF6X0wxtG8QyLCgUB0eRAGguqRLfkCA87l7yAP7ehq8SNj96OOGTO8OBV70KhuFYcIkHXOg0w==} engines: {node: '>=6.9.0'} + /@babel/helper-validator-option@7.24.7: + resolution: {integrity: sha512-yy1/KvjhV/ZCL+SM7hBrvnZJ3ZuT9OuZgIJAGpPEToANvc3iM6iDvBnRjtElWibHU6n8/LPR/EjX9EtIEYO3pw==} + engines: {node: '>=6.9.0'} + dev: true + + /@babel/helpers@7.24.7: + resolution: {integrity: sha512-NlmJJtvcw72yRJRcnCmGvSi+3jDEg8qFu3z0AFoymmzLx5ERVWyzd9kVXr7Th9/8yIJi2Zc6av4Tqz3wFs8QWg==} + engines: {node: '>=6.9.0'} + dependencies: + '@babel/template': 7.24.7 + '@babel/types': 7.24.7 + dev: true + /@babel/highlight@7.24.7: resolution: {integrity: sha512-EStJpq4OuY8xYfhGVXngigBJRWxftKX9ksiGDnmlY3o7B/V7KIAc9X4oiK87uPJSc/vs5L869bem5fhZa8caZw==} engines: {node: '>=6.9.0'} @@ -225,52 +341,71 @@ packages: js-tokens: 4.0.0 picocolors: 1.0.1 - /@babel/parser@7.25.6: - resolution: {integrity: sha512-trGdfBdbD0l1ZPmcJ83eNxB9rbEax4ALFTF7fN386TMYbeCQbyme5cOEXQhbGXKebwGaB/J52w1mrklMcbgy6Q==} + /@babel/parser@7.24.7: + resolution: {integrity: sha512-9uUYRm6OqQrCqQdG1iCBwBPZgN8ciDBro2nIOFaiRz1/BCxaI7CNvQbDHvsArAC7Tw9Hda/B3U+6ui9u4HWXPw==} engines: {node: '>=6.0.0'} hasBin: true dependencies: - '@babel/types': 7.25.6 - dev: false + '@babel/types': 7.24.7 + + /@babel/plugin-transform-react-jsx-self@7.24.7(@babel/core@7.24.7): + resolution: {integrity: sha512-fOPQYbGSgH0HUp4UJO4sMBFjY6DuWq+2i8rixyUMb3CdGixs/gccURvYOAhajBdKDoGajFr3mUq5rH3phtkGzw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.24.7 + '@babel/helper-plugin-utils': 7.24.7 + dev: true + + /@babel/plugin-transform-react-jsx-source@7.24.7(@babel/core@7.24.7): + resolution: {integrity: sha512-J2z+MWzZHVOemyLweMqngXrgGC42jQ//R0KdxqkIz/OrbVIIlhFI3WigZ5fO+nwFvBlncr4MGapd8vTyc7RPNQ==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.24.7 + '@babel/helper-plugin-utils': 7.24.7 + dev: true - /@babel/runtime@7.25.6: - resolution: {integrity: sha512-VBj9MYyDb9tuLq7yzqjgzt6Q+IBQLrGZfdjOekyEirZPHxXWoTSGUTMrpsfi58Up73d13NfYLv8HT9vmznjzhQ==} + /@babel/runtime@7.24.7: + resolution: {integrity: sha512-UwgBRMjJP+xv857DCngvqXI3Iq6J4v0wXmwc6sapg+zyhbwmQX67LUEFrkK5tbyJ30jGuG3ZvWpBiB9LCy1kWw==} engines: {node: '>=6.9.0'} dependencies: regenerator-runtime: 0.14.1 - /@babel/template@7.25.0: - resolution: {integrity: sha512-aOOgh1/5XzKvg1jvVz7AVrx2piJ2XBi227DHmbY6y+bM9H2FlN+IfecYu4Xl0cNiiVejlsCri89LUsbj8vJD9Q==} + /@babel/template@7.24.7: + resolution: {integrity: sha512-jYqfPrU9JTF0PmPy1tLYHW4Mp4KlgxJD9l2nP9fD6yT/ICi554DmrWBAEYpIelzjHf1msDP3PxJIRt/nFNfBig==} engines: {node: '>=6.9.0'} dependencies: '@babel/code-frame': 7.24.7 - '@babel/parser': 7.25.6 - '@babel/types': 7.25.6 - dev: false + '@babel/parser': 7.24.7 + '@babel/types': 7.24.7 - /@babel/traverse@7.25.6: - resolution: {integrity: sha512-9Vrcx5ZW6UwK5tvqsj0nGpp/XzqthkT0dqIc9g1AdtygFToNtTF67XzYS//dm+SAK9cp3B9R4ZO/46p63SCjlQ==} + /@babel/traverse@7.24.7: + resolution: {integrity: sha512-yb65Ed5S/QAcewNPh0nZczy9JdYXkkAbIsEo+P7BE7yO3txAY30Y/oPa3QkQ5It3xVG2kpKMg9MsdxZaO31uKA==} engines: {node: '>=6.9.0'} dependencies: '@babel/code-frame': 7.24.7 - '@babel/generator': 7.25.6 - '@babel/parser': 7.25.6 - '@babel/template': 7.25.0 - '@babel/types': 7.25.6 - debug: 4.3.6 + '@babel/generator': 7.24.7 + '@babel/helper-environment-visitor': 7.24.7 + '@babel/helper-function-name': 7.24.7 + '@babel/helper-hoist-variables': 7.24.7 + '@babel/helper-split-export-declaration': 7.24.7 + '@babel/parser': 7.24.7 + '@babel/types': 7.24.7 + debug: 4.3.5 globals: 11.12.0 transitivePeerDependencies: - supports-color - dev: false - /@babel/types@7.25.6: - resolution: {integrity: sha512-/l42B1qxpG6RdfYf343Uw1vmDjeNhneUXtzhojE7pDgfpEypmRhI6j1kr17XCVv4Cgl9HdAiQY2x0GwKm7rWCw==} + /@babel/types@7.24.7: + resolution: {integrity: sha512-XEFXSlxiG5td2EJRe8vOmRbaXVgfcBlszKujvVmWIK/UpywWljQCfzAv3RQCGujWQ1RD4YYWEAqDXfuJiy8f5Q==} engines: {node: '>=6.9.0'} dependencies: - '@babel/helper-string-parser': 7.24.8 + '@babel/helper-string-parser': 7.24.7 '@babel/helper-validator-identifier': 7.24.7 to-fast-properties: 2.0.0 - dev: false /@ctrl/tinycolor@3.6.1: resolution: {integrity: sha512-SITSV6aIXsuVNV3f3O0f2n/cgyEDWoSqtZMYiAmcsYHydcKrOz3gUxB/iXd/Qf08+IZX4KpgNbvUdMBmWz+kcA==} @@ -295,7 +430,7 @@ packages: peerDependencies: react: '>=16.3.0' dependencies: - '@babel/runtime': 7.25.6 + '@babel/runtime': 7.24.7 '@emotion/cache': 10.0.29 '@emotion/css': 10.0.27 '@emotion/serialize': 0.11.16 @@ -769,8 +904,8 @@ packages: eslint-visitor-keys: 3.4.3 dev: true - /@eslint-community/regexpp@4.11.0: - resolution: {integrity: sha512-G/M/tIiMrTAxEWRfLfQJMmGNX28IxBg4PBz8XqQhqUHLFI6TL2htpIB1iQCj144V5ee/JaKyT9/WZ0MGZWfA7A==} + /@eslint-community/regexpp@4.10.1: + resolution: {integrity: sha512-Zm2NGpWELsQAD1xsJzGQpYfvICSsFkEpU0jxBjfdC6uNEWXcHnfs9hScFWtXVDVl+rBQJGrl4g1vcKIejpH9dA==} engines: {node: ^12.0.0 || ^14.0.0 || >=16.0.0} dev: true @@ -779,7 +914,7 @@ packages: engines: {node: ^10.12.0 || >=12.0.0} dependencies: ajv: 6.12.6 - debug: 4.3.6 + debug: 4.3.5 espree: 7.3.1 globals: 13.24.0 ignore: 4.0.6 @@ -801,7 +936,7 @@ packages: deprecated: Use @eslint/config-array instead dependencies: '@humanwhocodes/object-schema': 1.2.1 - debug: 4.3.6 + debug: 4.3.5 minimatch: 3.1.2 transitivePeerDependencies: - supports-color @@ -836,7 +971,7 @@ packages: engines: {node: '>=6.0.0'} dependencies: '@jridgewell/set-array': 1.2.1 - '@jridgewell/sourcemap-codec': 1.5.0 + '@jridgewell/sourcemap-codec': 1.4.15 '@jridgewell/trace-mapping': 0.3.25 /@jridgewell/resolve-uri@3.1.2: @@ -847,21 +982,21 @@ packages: resolution: {integrity: sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==} engines: {node: '>=6.0.0'} - /@jridgewell/sourcemap-codec@1.5.0: - resolution: {integrity: sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ==} + /@jridgewell/sourcemap-codec@1.4.15: + resolution: {integrity: sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==} /@jridgewell/trace-mapping@0.3.25: resolution: {integrity: sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==} dependencies: '@jridgewell/resolve-uri': 3.1.2 - '@jridgewell/sourcemap-codec': 1.5.0 + '@jridgewell/sourcemap-codec': 1.4.15 /@mswjs/cookies@0.2.2: resolution: {integrity: sha512-mlN83YSrcFgk7Dm1Mys40DLssI1KdJji2CMKN8eOlBqsTADYzj2+jWzsANsUTFbxDMWPD5e9bfA1RGqBpS3O1g==} engines: {node: '>=14'} dependencies: '@types/set-cookie-parser': 2.4.10 - set-cookie-parser: 2.7.0 + set-cookie-parser: 2.6.0 dev: true /@mswjs/interceptors@0.17.10: @@ -871,7 +1006,7 @@ packages: '@open-draft/until': 1.0.3 '@types/debug': 4.1.12 '@xmldom/xmldom': 0.8.10 - debug: 4.3.6 + debug: 4.3.5 headers-polyfill: 3.2.5 outvariant: 1.4.3 strict-event-emitter: 0.2.8 @@ -912,128 +1047,142 @@ packages: dev: true optional: true - /@rollup/rollup-android-arm-eabi@4.21.1: - resolution: {integrity: sha512-2thheikVEuU7ZxFXubPDOtspKn1x0yqaYQwvALVtEcvFhMifPADBrgRPyHV0TF3b+9BgvgjgagVyvA/UqPZHmg==} + /@rollup/pluginutils@5.1.0: + resolution: {integrity: sha512-XTIWOPPcpvyKI6L1NHo0lFlCyznUEyPmPY1mc3KpPVDYulHSTvyeLNVW00QTLIAFNhR3kYnJTQHeGqU4M3n09g==} + engines: {node: '>=14.0.0'} + peerDependencies: + rollup: ^1.20.0||^2.0.0||^3.0.0||^4.0.0 + peerDependenciesMeta: + rollup: + optional: true + dependencies: + '@types/estree': 1.0.5 + estree-walker: 2.0.2 + picomatch: 2.3.1 + dev: true + + /@rollup/rollup-android-arm-eabi@4.18.0: + resolution: {integrity: sha512-Tya6xypR10giZV1XzxmH5wr25VcZSncG0pZIjfePT0OVBvqNEurzValetGNarVrGiq66EBVAFn15iYX4w6FKgQ==} cpu: [arm] os: [android] requiresBuild: true dev: true optional: true - /@rollup/rollup-android-arm64@4.21.1: - resolution: {integrity: sha512-t1lLYn4V9WgnIFHXy1d2Di/7gyzBWS8G5pQSXdZqfrdCGTwi1VasRMSS81DTYb+avDs/Zz4A6dzERki5oRYz1g==} + /@rollup/rollup-android-arm64@4.18.0: + resolution: {integrity: sha512-avCea0RAP03lTsDhEyfy+hpfr85KfyTctMADqHVhLAF3MlIkq83CP8UfAHUssgXTYd+6er6PaAhx/QGv4L1EiA==} cpu: [arm64] os: [android] requiresBuild: true dev: true optional: true - /@rollup/rollup-darwin-arm64@4.21.1: - resolution: {integrity: sha512-AH/wNWSEEHvs6t4iJ3RANxW5ZCK3fUnmf0gyMxWCesY1AlUj8jY7GC+rQE4wd3gwmZ9XDOpL0kcFnCjtN7FXlA==} + /@rollup/rollup-darwin-arm64@4.18.0: + resolution: {integrity: sha512-IWfdwU7KDSm07Ty0PuA/W2JYoZ4iTj3TUQjkVsO/6U+4I1jN5lcR71ZEvRh52sDOERdnNhhHU57UITXz5jC1/w==} cpu: [arm64] os: [darwin] requiresBuild: true dev: true optional: true - /@rollup/rollup-darwin-x64@4.21.1: - resolution: {integrity: sha512-dO0BIz/+5ZdkLZrVgQrDdW7m2RkrLwYTh2YMFG9IpBtlC1x1NPNSXkfczhZieOlOLEqgXOFH3wYHB7PmBtf+Bg==} + /@rollup/rollup-darwin-x64@4.18.0: + resolution: {integrity: sha512-n2LMsUz7Ynu7DoQrSQkBf8iNrjOGyPLrdSg802vk6XT3FtsgX6JbE8IHRvposskFm9SNxzkLYGSq9QdpLYpRNA==} cpu: [x64] os: [darwin] requiresBuild: true dev: true optional: true - /@rollup/rollup-linux-arm-gnueabihf@4.21.1: - resolution: {integrity: sha512-sWWgdQ1fq+XKrlda8PsMCfut8caFwZBmhYeoehJ05FdI0YZXk6ZyUjWLrIgbR/VgiGycrFKMMgp7eJ69HOF2pQ==} + /@rollup/rollup-linux-arm-gnueabihf@4.18.0: + resolution: {integrity: sha512-C/zbRYRXFjWvz9Z4haRxcTdnkPt1BtCkz+7RtBSuNmKzMzp3ZxdM28Mpccn6pt28/UWUCTXa+b0Mx1k3g6NOMA==} cpu: [arm] os: [linux] requiresBuild: true dev: true optional: true - /@rollup/rollup-linux-arm-musleabihf@4.21.1: - resolution: {integrity: sha512-9OIiSuj5EsYQlmwhmFRA0LRO0dRRjdCVZA3hnmZe1rEwRk11Jy3ECGGq3a7RrVEZ0/pCsYWx8jG3IvcrJ6RCew==} + /@rollup/rollup-linux-arm-musleabihf@4.18.0: + resolution: {integrity: sha512-l3m9ewPgjQSXrUMHg93vt0hYCGnrMOcUpTz6FLtbwljo2HluS4zTXFy2571YQbisTnfTKPZ01u/ukJdQTLGh9A==} cpu: [arm] os: [linux] requiresBuild: true dev: true optional: true - /@rollup/rollup-linux-arm64-gnu@4.21.1: - resolution: {integrity: sha512-0kuAkRK4MeIUbzQYu63NrJmfoUVicajoRAL1bpwdYIYRcs57iyIV9NLcuyDyDXE2GiZCL4uhKSYAnyWpjZkWow==} + /@rollup/rollup-linux-arm64-gnu@4.18.0: + resolution: {integrity: sha512-rJ5D47d8WD7J+7STKdCUAgmQk49xuFrRi9pZkWoRD1UeSMakbcepWXPF8ycChBoAqs1pb2wzvbY6Q33WmN2ftw==} cpu: [arm64] os: [linux] requiresBuild: true dev: true optional: true - /@rollup/rollup-linux-arm64-musl@4.21.1: - resolution: {integrity: sha512-/6dYC9fZtfEY0vozpc5bx1RP4VrtEOhNQGb0HwvYNwXD1BBbwQ5cKIbUVVU7G2d5WRE90NfB922elN8ASXAJEA==} + /@rollup/rollup-linux-arm64-musl@4.18.0: + resolution: {integrity: sha512-be6Yx37b24ZwxQ+wOQXXLZqpq4jTckJhtGlWGZs68TgdKXJgw54lUUoFYrg6Zs/kjzAQwEwYbp8JxZVzZLRepQ==} cpu: [arm64] os: [linux] requiresBuild: true dev: true optional: true - /@rollup/rollup-linux-powerpc64le-gnu@4.21.1: - resolution: {integrity: sha512-ltUWy+sHeAh3YZ91NUsV4Xg3uBXAlscQe8ZOXRCVAKLsivGuJsrkawYPUEyCV3DYa9urgJugMLn8Z3Z/6CeyRQ==} + /@rollup/rollup-linux-powerpc64le-gnu@4.18.0: + resolution: {integrity: sha512-hNVMQK+qrA9Todu9+wqrXOHxFiD5YmdEi3paj6vP02Kx1hjd2LLYR2eaN7DsEshg09+9uzWi2W18MJDlG0cxJA==} cpu: [ppc64] os: [linux] requiresBuild: true dev: true optional: true - /@rollup/rollup-linux-riscv64-gnu@4.21.1: - resolution: {integrity: sha512-BggMndzI7Tlv4/abrgLwa/dxNEMn2gC61DCLrTzw8LkpSKel4o+O+gtjbnkevZ18SKkeN3ihRGPuBxjaetWzWg==} + /@rollup/rollup-linux-riscv64-gnu@4.18.0: + resolution: {integrity: sha512-ROCM7i+m1NfdrsmvwSzoxp9HFtmKGHEqu5NNDiZWQtXLA8S5HBCkVvKAxJ8U+CVctHwV2Gb5VUaK7UAkzhDjlg==} cpu: [riscv64] os: [linux] requiresBuild: true dev: true optional: true - /@rollup/rollup-linux-s390x-gnu@4.21.1: - resolution: {integrity: sha512-z/9rtlGd/OMv+gb1mNSjElasMf9yXusAxnRDrBaYB+eS1shFm6/4/xDH1SAISO5729fFKUkJ88TkGPRUh8WSAA==} + /@rollup/rollup-linux-s390x-gnu@4.18.0: + resolution: {integrity: sha512-0UyyRHyDN42QL+NbqevXIIUnKA47A+45WyasO+y2bGJ1mhQrfrtXUpTxCOrfxCR4esV3/RLYyucGVPiUsO8xjg==} cpu: [s390x] os: [linux] requiresBuild: true dev: true optional: true - /@rollup/rollup-linux-x64-gnu@4.21.1: - resolution: {integrity: sha512-kXQVcWqDcDKw0S2E0TmhlTLlUgAmMVqPrJZR+KpH/1ZaZhLSl23GZpQVmawBQGVhyP5WXIsIQ/zqbDBBYmxm5w==} + /@rollup/rollup-linux-x64-gnu@4.18.0: + resolution: {integrity: sha512-xuglR2rBVHA5UsI8h8UbX4VJ470PtGCf5Vpswh7p2ukaqBGFTnsfzxUBetoWBWymHMxbIG0Cmx7Y9qDZzr648w==} cpu: [x64] os: [linux] requiresBuild: true dev: true optional: true - /@rollup/rollup-linux-x64-musl@4.21.1: - resolution: {integrity: sha512-CbFv/WMQsSdl+bpX6rVbzR4kAjSSBuDgCqb1l4J68UYsQNalz5wOqLGYj4ZI0thGpyX5kc+LLZ9CL+kpqDovZA==} + /@rollup/rollup-linux-x64-musl@4.18.0: + resolution: {integrity: sha512-LKaqQL9osY/ir2geuLVvRRs+utWUNilzdE90TpyoX0eNqPzWjRm14oMEE+YLve4k/NAqCdPkGYDaDF5Sw+xBfg==} cpu: [x64] os: [linux] requiresBuild: true dev: true optional: true - /@rollup/rollup-win32-arm64-msvc@4.21.1: - resolution: {integrity: sha512-3Q3brDgA86gHXWHklrwdREKIrIbxC0ZgU8lwpj0eEKGBQH+31uPqr0P2v11pn0tSIxHvcdOWxa4j+YvLNx1i6g==} + /@rollup/rollup-win32-arm64-msvc@4.18.0: + resolution: {integrity: sha512-7J6TkZQFGo9qBKH0pk2cEVSRhJbL6MtfWxth7Y5YmZs57Pi+4x6c2dStAUvaQkHQLnEQv1jzBUW43GvZW8OFqA==} cpu: [arm64] os: [win32] requiresBuild: true dev: true optional: true - /@rollup/rollup-win32-ia32-msvc@4.21.1: - resolution: {integrity: sha512-tNg+jJcKR3Uwe4L0/wY3Ro0H+u3nrb04+tcq1GSYzBEmKLeOQF2emk1whxlzNqb6MMrQ2JOcQEpuuiPLyRcSIw==} + /@rollup/rollup-win32-ia32-msvc@4.18.0: + resolution: {integrity: sha512-Txjh+IxBPbkUB9+SXZMpv+b/vnTEtFyfWZgJ6iyCmt2tdx0OF5WhFowLmnh8ENGNpfUlUZkdI//4IEmhwPieNg==} cpu: [ia32] os: [win32] requiresBuild: true dev: true optional: true - /@rollup/rollup-win32-x64-msvc@4.21.1: - resolution: {integrity: sha512-xGiIH95H1zU7naUyTKEyOA/I0aexNMUdO9qRv0bLKN3qu25bBdrxZHqA3PTJ24YNN/GdMzG4xkDcd/GvjuhfLg==} + /@rollup/rollup-win32-x64-msvc@4.18.0: + resolution: {integrity: sha512-UOo5FdvOL0+eIVTgS4tIdbW+TtnBLWg1YBCcU2KWM7nuNwRz9bksDX1bekJJCpu25N1DVWaCwnT39dVQxzqS8g==} cpu: [x64] os: [win32] requiresBuild: true @@ -1049,129 +1198,130 @@ packages: engines: {node: '>=6'} dev: true - /@swc/core-darwin-arm64@1.7.21: - resolution: {integrity: sha512-hh5uOZ7jWF66z2TRMhhXtWMQkssuPCSIZPy9VHf5KvZ46cX+5UeECDthchYklEVZQyy4Qr6oxfh4qff/5spoMA==} - engines: {node: '>=10'} - cpu: [arm64] - os: [darwin] - requiresBuild: true - dev: true - optional: true - - /@swc/core-darwin-x64@1.7.21: - resolution: {integrity: sha512-lTsPquqSierQ6jWiWM7NnYXXZGk9zx3NGkPLHjPbcH5BmyiauX0CC/YJYJx7YmS2InRLyALlGmidHkaF4JY28A==} - engines: {node: '>=10'} - cpu: [x64] - os: [darwin] - requiresBuild: true + /@svgr/babel-plugin-add-jsx-attribute@8.0.0(@babel/core@7.24.7): + resolution: {integrity: sha512-b9MIk7yhdS1pMCZM8VeNfUlSKVRhsHZNMl5O9SfaX0l0t5wjdgu4IDzGB8bpnGBBOjGST3rRFVsaaEtI4W6f7g==} + engines: {node: '>=14'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.24.7 dev: true - optional: true - /@swc/core-linux-arm-gnueabihf@1.7.21: - resolution: {integrity: sha512-AgSd0fnSzAqCvWpzzZCq75z62JVGUkkXEOpfdi99jj/tryPy38KdXJtkVWJmufPXlRHokGTBitalk33WDJwsbA==} - engines: {node: '>=10'} - cpu: [arm] - os: [linux] - requiresBuild: true + /@svgr/babel-plugin-remove-jsx-attribute@8.0.0(@babel/core@7.24.7): + resolution: {integrity: sha512-BcCkm/STipKvbCl6b7QFrMh/vx00vIP63k2eM66MfHJzPr6O2U0jYEViXkHJWqXqQYjdeA9cuCl5KWmlwjDvbA==} + engines: {node: '>=14'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.24.7 dev: true - optional: true - /@swc/core-linux-arm64-gnu@1.7.21: - resolution: {integrity: sha512-l+jw6RQ4Y43/8dIst0c73uQE+W3kCWrCFqMqC/xIuE/iqHOnvYK6YbA1ffOct2dImkHzNiKuoehGqtQAc6cNaQ==} - engines: {node: '>=10'} - cpu: [arm64] - os: [linux] - requiresBuild: true + /@svgr/babel-plugin-remove-jsx-empty-expression@8.0.0(@babel/core@7.24.7): + resolution: {integrity: sha512-5BcGCBfBxB5+XSDSWnhTThfI9jcO5f0Ai2V24gZpG+wXF14BzwxxdDb4g6trdOux0rhibGs385BeFMSmxtS3uA==} + engines: {node: '>=14'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.24.7 dev: true - optional: true - /@swc/core-linux-arm64-musl@1.7.21: - resolution: {integrity: sha512-29KKZXrTo/c9F1JFL9WsNvCa6UCdIVhHP5EfuYhlKbn5/YmSsNFkuHdUtZFEd5U4+jiShXDmgGCtLW2d08LIwg==} - engines: {node: '>=10'} - cpu: [arm64] - os: [linux] - requiresBuild: true + /@svgr/babel-plugin-replace-jsx-attribute-value@8.0.0(@babel/core@7.24.7): + resolution: {integrity: sha512-KVQ+PtIjb1BuYT3ht8M5KbzWBhdAjjUPdlMtpuw/VjT8coTrItWX6Qafl9+ji831JaJcu6PJNKCV0bp01lBNzQ==} + engines: {node: '>=14'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.24.7 dev: true - optional: true - /@swc/core-linux-x64-gnu@1.7.21: - resolution: {integrity: sha512-HsP3JwddvQj5HvnjmOr+Bd5plEm6ccpfP5wUlm3hywzvdVkj+yR29bmD7UwpV/1zCQ60Ry35a7mXhKI6HQxFgw==} - engines: {node: '>=10'} - cpu: [x64] - os: [linux] - requiresBuild: true + /@svgr/babel-plugin-svg-dynamic-title@8.0.0(@babel/core@7.24.7): + resolution: {integrity: sha512-omNiKqwjNmOQJ2v6ge4SErBbkooV2aAWwaPFs2vUY7p7GhVkzRkJ00kILXQvRhA6miHnNpXv7MRnnSjdRjK8og==} + engines: {node: '>=14'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.24.7 dev: true - optional: true - /@swc/core-linux-x64-musl@1.7.21: - resolution: {integrity: sha512-hYKLVeUTHqvFK628DFJEwxoX6p42T3HaQ4QjNtf3oKhiJWFh9iTRUrN/oCB5YI3R9WMkFkKh+99gZ/Dd0T5lsg==} - engines: {node: '>=10'} - cpu: [x64] - os: [linux] - requiresBuild: true + /@svgr/babel-plugin-svg-em-dimensions@8.0.0(@babel/core@7.24.7): + resolution: {integrity: sha512-mURHYnu6Iw3UBTbhGwE/vsngtCIbHE43xCRK7kCw4t01xyGqb2Pd+WXekRRoFOBIY29ZoOhUCTEweDMdrjfi9g==} + engines: {node: '>=14'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.24.7 dev: true - optional: true - /@swc/core-win32-arm64-msvc@1.7.21: - resolution: {integrity: sha512-qyWAKW10aMBe6iUqeZ7NAJIswjfggVTUpDINpQGUJhz+pR71YZDidXgZXpaDB84YyDB2JAlRqd1YrLkl7CMiIw==} - engines: {node: '>=10'} - cpu: [arm64] - os: [win32] - requiresBuild: true + /@svgr/babel-plugin-transform-react-native-svg@8.1.0(@babel/core@7.24.7): + resolution: {integrity: sha512-Tx8T58CHo+7nwJ+EhUwx3LfdNSG9R2OKfaIXXs5soiy5HtgoAEkDay9LIimLOcG8dJQH1wPZp/cnAv6S9CrR1Q==} + engines: {node: '>=14'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.24.7 dev: true - optional: true - /@swc/core-win32-ia32-msvc@1.7.21: - resolution: {integrity: sha512-cy61wS3wgH5mEwBiQ5w6/FnQrchBDAdPsSh0dKSzNmI+4K8hDxS8uzdBycWqJXO0cc+mA77SIlwZC3hP3Kum2g==} - engines: {node: '>=10'} - cpu: [ia32] - os: [win32] - requiresBuild: true + /@svgr/babel-plugin-transform-svg-component@8.0.0(@babel/core@7.24.7): + resolution: {integrity: sha512-DFx8xa3cZXTdb/k3kfPeaixecQLgKh5NVBMwD0AQxOzcZawK4oo1Jh9LbrcACUivsCA7TLG8eeWgrDXjTMhRmw==} + engines: {node: '>=12'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.24.7 dev: true - optional: true - /@swc/core-win32-x64-msvc@1.7.21: - resolution: {integrity: sha512-/rexGItJURNJOdae+a48M+loT74nsEU+PyRRVAkZMKNRtLoYFAr0cpDlS5FodIgGunp/nqM0bst4H2w6Y05IKA==} - engines: {node: '>=10'} - cpu: [x64] - os: [win32] - requiresBuild: true + /@svgr/babel-preset@8.1.0(@babel/core@7.24.7): + resolution: {integrity: sha512-7EYDbHE7MxHpv4sxvnVPngw5fuR6pw79SkcrILHJ/iMpuKySNCl5W1qcwPEpU+LgyRXOaAFgH0KhwD18wwg6ug==} + engines: {node: '>=14'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.24.7 + '@svgr/babel-plugin-add-jsx-attribute': 8.0.0(@babel/core@7.24.7) + '@svgr/babel-plugin-remove-jsx-attribute': 8.0.0(@babel/core@7.24.7) + '@svgr/babel-plugin-remove-jsx-empty-expression': 8.0.0(@babel/core@7.24.7) + '@svgr/babel-plugin-replace-jsx-attribute-value': 8.0.0(@babel/core@7.24.7) + '@svgr/babel-plugin-svg-dynamic-title': 8.0.0(@babel/core@7.24.7) + '@svgr/babel-plugin-svg-em-dimensions': 8.0.0(@babel/core@7.24.7) + '@svgr/babel-plugin-transform-react-native-svg': 8.1.0(@babel/core@7.24.7) + '@svgr/babel-plugin-transform-svg-component': 8.0.0(@babel/core@7.24.7) dev: true - optional: true - /@swc/core@1.7.21: - resolution: {integrity: sha512-7/cN0SZ+y2V6e0hsDD8koGR0QVh7Jl3r756bwaHLLSN+kReoUb/yVcLsA8iTn90JLME3DkQK4CPjxDCQiyMXNg==} - engines: {node: '>=10'} - requiresBuild: true - peerDependencies: - '@swc/helpers': '*' - peerDependenciesMeta: - '@swc/helpers': - optional: true + /@svgr/core@8.1.0(typescript@4.9.5): + resolution: {integrity: sha512-8QqtOQT5ACVlmsvKOJNEaWmRPmcojMOzCz4Hs2BGG/toAp/K38LcsMRyLp349glq5AzJbCEeimEoxaX6v/fLrA==} + engines: {node: '>=14'} dependencies: - '@swc/counter': 0.1.3 - '@swc/types': 0.1.12 - optionalDependencies: - '@swc/core-darwin-arm64': 1.7.21 - '@swc/core-darwin-x64': 1.7.21 - '@swc/core-linux-arm-gnueabihf': 1.7.21 - '@swc/core-linux-arm64-gnu': 1.7.21 - '@swc/core-linux-arm64-musl': 1.7.21 - '@swc/core-linux-x64-gnu': 1.7.21 - '@swc/core-linux-x64-musl': 1.7.21 - '@swc/core-win32-arm64-msvc': 1.7.21 - '@swc/core-win32-ia32-msvc': 1.7.21 - '@swc/core-win32-x64-msvc': 1.7.21 + '@babel/core': 7.24.7 + '@svgr/babel-preset': 8.1.0(@babel/core@7.24.7) + camelcase: 6.3.0 + cosmiconfig: 8.3.6(typescript@4.9.5) + snake-case: 3.0.4 + transitivePeerDependencies: + - supports-color + - typescript dev: true - /@swc/counter@0.1.3: - resolution: {integrity: sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ==} + /@svgr/hast-util-to-babel-ast@8.0.0: + resolution: {integrity: sha512-EbDKwO9GpfWP4jN9sGdYwPBU0kdomaPIL2Eu4YwmgP+sJeXT+L7bMwJUBnhzfH8Q2qMBqZ4fJwpCyYsAN3mt2Q==} + engines: {node: '>=14'} + dependencies: + '@babel/types': 7.24.7 + entities: 4.5.0 dev: true - /@swc/types@0.1.12: - resolution: {integrity: sha512-wBJA+SdtkbFhHjTMYH+dEH1y4VpfGdAc2Kw/LK09i9bXd/K6j6PkDcFCEzb6iVfZMkPRrl/q0e3toqTAJdkIVA==} + /@svgr/plugin-jsx@8.1.0(@svgr/core@8.1.0): + resolution: {integrity: sha512-0xiIyBsLlr8quN+WyuxooNW9RJ0Dpr8uOnH/xrCVO8GLUcwHISwj1AG0k+LFzteTkAA0GbX0kj9q6Dk70PTiPA==} + engines: {node: '>=14'} + peerDependencies: + '@svgr/core': '*' dependencies: - '@swc/counter': 0.1.3 + '@babel/core': 7.24.7 + '@svgr/babel-preset': 8.1.0(@babel/core@7.24.7) + '@svgr/core': 8.1.0(typescript@4.9.5) + '@svgr/hast-util-to-babel-ast': 8.0.0 + svg-parser: 2.0.4 + transitivePeerDependencies: + - supports-color dev: true /@szmarczak/http-timer@1.1.2: @@ -1186,7 +1336,7 @@ packages: engines: {node: '>=12'} dependencies: '@babel/code-frame': 7.24.7 - '@babel/runtime': 7.25.6 + '@babel/runtime': 7.24.7 '@types/aria-query': 5.0.4 aria-query: 5.1.3 chalk: 4.1.2 @@ -1195,11 +1345,12 @@ packages: pretty-format: 27.5.1 dev: true - /@testing-library/jest-dom@6.5.0: - resolution: {integrity: sha512-xGGHpBXYSHUUr6XsKBfs85TWlYKpTc37cSBBVrXcib2MkHLboWlkClhWF37JKlDb9KEq3dHs+f2xR7XJEWGBxA==} + /@testing-library/jest-dom@6.4.8: + resolution: {integrity: sha512-JD0G+Zc38f5MBHA4NgxQMR5XtO5Jx9g86jqturNTt2WUfRmLDIY7iKkWHDCCTiDuFMre6nxAD5wHw9W5kI4rGw==} engines: {node: '>=14', npm: '>=6', yarn: '>=1'} dependencies: '@adobe/css-tools': 4.4.0 + '@babel/runtime': 7.24.7 aria-query: 5.3.0 chalk: 3.0.0 css.escape: 1.5.1 @@ -1215,7 +1366,7 @@ packages: react: <18.0.0 react-dom: <18.0.0 dependencies: - '@babel/runtime': 7.25.6 + '@babel/runtime': 7.24.7 '@testing-library/dom': 8.20.1 '@types/react-dom': 16.8.4 react: 16.14.0 @@ -1226,6 +1377,35 @@ packages: resolution: {integrity: sha512-rfT93uj5s0PRL7EzccGMs3brplhcrghnDoV26NqKhCAS1hVo+WdNsPvE/yb6ilfr5hi2MEk6d5EWJTKdxg8jVw==} dev: true + /@types/babel__core@7.20.5: + resolution: {integrity: sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==} + dependencies: + '@babel/parser': 7.24.7 + '@babel/types': 7.24.7 + '@types/babel__generator': 7.6.8 + '@types/babel__template': 7.4.4 + '@types/babel__traverse': 7.20.6 + dev: true + + /@types/babel__generator@7.6.8: + resolution: {integrity: sha512-ASsj+tpEDsEiFr1arWrlN6V3mdfjRMZt6LtK/Vp/kreFLnr5QH5+DhvD5nINYZXzwJvXeGq+05iUXcAzVrqWtw==} + dependencies: + '@babel/types': 7.24.7 + dev: true + + /@types/babel__template@7.4.4: + resolution: {integrity: sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==} + dependencies: + '@babel/parser': 7.24.7 + '@babel/types': 7.24.7 + dev: true + + /@types/babel__traverse@7.20.6: + resolution: {integrity: sha512-r1bzfrm0tomOI8g1SzvCaQHo6Lcv6zu0EA+W2kHrt8dyrHQxGzBBL4kdkzIS+jBMV+EYcMAEAqXqYaLJq5rOZg==} + dependencies: + '@babel/types': 7.24.7 + dev: true + /@types/cookie@0.4.1: resolution: {integrity: sha512-XW/Aa8APYr6jSVVA1y/DEIZX0/GMKLEVekNG727R8cs56ahETkRAy/3DR7+fJyh7oUgGwNQaRfXCun0+KbWY7Q==} dev: true @@ -1244,6 +1424,13 @@ packages: resolution: {integrity: sha512-qjDJRrmvBMiTx+jyLxvLfJU7UznFuokDv4f3WRuriHKERccVpFU+8XMQUAbDzoiJCsmexxRExQeMwwCdamSKDA==} dev: true + /@types/history@5.0.0: + resolution: {integrity: sha512-hy8b7Y1J8OGe6LbAjj3xniQrj3v6lsivCcrmf4TzSgPzLkhIeKgc5IZnT7ReIqmEuodjfO8EYAuoFvIrHi/+jQ==} + deprecated: This is a stub types definition. history provides its own type definitions, so you do not need this installed. + dependencies: + history: 5.3.0 + dev: true + /@types/js-levenshtein@1.1.3: resolution: {integrity: sha512-jd+Q+sD20Qfu9e2aEXogiO3vpOC1PYJOUdyN9gvs4Qrvkg4wF43L5OhqrPeokdv8TL0/mXoYfpkcoGZMNN2pkQ==} dev: true @@ -1255,17 +1442,17 @@ packages: /@types/keyv@3.1.4: resolution: {integrity: sha512-BQ5aZNSCpj7D6K2ksrRCTmKRLEpnPvWDiLPfoGyhZ++8YtiK9d/3DBKPJgry359X/P1PfruyYwvnvwFjuEiEIg==} dependencies: - '@types/node': 22.5.1 + '@types/node': 20.14.8 dev: true /@types/ms@0.7.34: resolution: {integrity: sha512-nG96G3Wp6acyAgJqGasjODb+acrI7KltPiRxzHPXnP3NgI28bpQDRv53olbqGXbfcgF5aiiHmO3xpwEpS5Ld9g==} dev: true - /@types/node@22.5.1: - resolution: {integrity: sha512-KkHsxej0j9IW1KKOOAA/XBA0z08UFSrRQHErzEfA3Vgq57eXIMYboIlHJuYIfd+lwCQjtKqUu3UnmKbtUc9yRw==} + /@types/node@20.14.8: + resolution: {integrity: sha512-DO+2/jZinXfROG7j7WKFn/3C6nFwxy2lLpgLjEXJz+0XKphZlTLJ14mo8Vfg8X5BWN6XjyESXq+LcYdT7tR3bA==} dependencies: - undici-types: 6.19.8 + undici-types: 5.26.5 dev: true /@types/parse-json@4.0.2: @@ -1282,10 +1469,10 @@ packages: '@types/react': 16.8.15 dev: true - /@types/react-router-dom@5.3.3: - resolution: {integrity: sha512-kpqnYK4wcdm5UaWI3fLcELopqLrHgLqNsdpHauzlQktfkHL3npOSwtj1Uz9oKBAzs7lFtVkV8j83voAz2D8fhw==} + /@types/react-router-dom@4.3.5: + resolution: {integrity: sha512-eFajSUASYbPHg2BDM1G8Btx+YqGgvROPIg6sBhl3O4kbDdYXdFdfrgQFf/pcBuQVObjfT9AL/dd15jilR5DIEA==} dependencies: - '@types/history': 4.7.11 + '@types/history': 5.0.0 '@types/react': 16.8.15 '@types/react-router': 5.1.20 dev: true @@ -1302,11 +1489,11 @@ packages: dependencies: '@types/react': 16.8.15 '@types/react-dom': 16.8.4 - '@types/react-transition-group': 4.4.11 + '@types/react-transition-group': 4.4.10 dev: true - /@types/react-transition-group@4.4.11: - resolution: {integrity: sha512-RM05tAniPZ5DZPzzNFP+DmrcOdD0efDUxMy3145oljWSl3x9ZV5vhme98gTxFrj2lhXvmGNnUiuDyJgY9IKkNA==} + /@types/react-transition-group@4.4.10: + resolution: {integrity: sha512-hT/+s0VQs2ojCX823m60m5f0sL5idt9SO6Tj6Dg+rdphGPIeJbJ6CxvBYkgkGKrYeDjvIpKTR38UzmtHJOGW3Q==} dependencies: '@types/react': 16.8.15 dev: true @@ -1321,7 +1508,7 @@ packages: /@types/responselike@1.0.3: resolution: {integrity: sha512-H/+L+UkTV33uf49PH5pCAUBVPNj2nDBXTN+qS1dOwyyg24l3CcicicCA7ca+HMvJBZcFgl5r8e+RR6elsb4Lyw==} dependencies: - '@types/node': 22.5.1 + '@types/node': 20.14.8 dev: true /@types/semver@7.5.8: @@ -1331,7 +1518,7 @@ packages: /@types/set-cookie-parser@2.4.10: resolution: {integrity: sha512-GGmQVGpQWUe5qglJozEjZV/5dyxbOOZ0LHe/lqyWssB88Y4svNfst0uqBVscdDeIKl5Jy5+aPSvy7mI9tYRguw==} dependencies: - '@types/node': 22.5.1 + '@types/node': 20.14.8 dev: true /@typescript-eslint/eslint-plugin@5.62.0(@typescript-eslint/parser@5.62.0)(eslint@7.32.0)(typescript@4.9.5): @@ -1345,17 +1532,17 @@ packages: typescript: optional: true dependencies: - '@eslint-community/regexpp': 4.11.0 + '@eslint-community/regexpp': 4.10.1 '@typescript-eslint/parser': 5.62.0(eslint@7.32.0)(typescript@4.9.5) '@typescript-eslint/scope-manager': 5.62.0 '@typescript-eslint/type-utils': 5.62.0(eslint@7.32.0)(typescript@4.9.5) '@typescript-eslint/utils': 5.62.0(eslint@7.32.0)(typescript@4.9.5) - debug: 4.3.6 + debug: 4.3.5 eslint: 7.32.0 graphemer: 1.4.0 - ignore: 5.3.2 + ignore: 5.3.1 natural-compare-lite: 1.4.0 - semver: 7.6.3 + semver: 7.6.2 tsutils: 3.21.0(typescript@4.9.5) typescript: 4.9.5 transitivePeerDependencies: @@ -1375,7 +1562,7 @@ packages: '@typescript-eslint/scope-manager': 5.62.0 '@typescript-eslint/types': 5.62.0 '@typescript-eslint/typescript-estree': 5.62.0(typescript@4.9.5) - debug: 4.3.6 + debug: 4.3.5 eslint: 7.32.0 typescript: 4.9.5 transitivePeerDependencies: @@ -1402,7 +1589,7 @@ packages: dependencies: '@typescript-eslint/typescript-estree': 5.62.0(typescript@4.9.5) '@typescript-eslint/utils': 5.62.0(eslint@7.32.0)(typescript@4.9.5) - debug: 4.3.6 + debug: 4.3.5 eslint: 7.32.0 tsutils: 3.21.0(typescript@4.9.5) typescript: 4.9.5 @@ -1426,10 +1613,10 @@ packages: dependencies: '@typescript-eslint/types': 5.62.0 '@typescript-eslint/visitor-keys': 5.62.0 - debug: 4.3.6 + debug: 4.3.5 globby: 11.1.0 is-glob: 4.0.3 - semver: 7.6.3 + semver: 7.6.2 tsutils: 3.21.0(typescript@4.9.5) typescript: 4.9.5 transitivePeerDependencies: @@ -1450,7 +1637,7 @@ packages: '@typescript-eslint/typescript-estree': 5.62.0(typescript@4.9.5) eslint: 7.32.0 eslint-scope: 5.1.1 - semver: 7.6.3 + semver: 7.6.2 transitivePeerDependencies: - supports-color - typescript @@ -1464,15 +1651,20 @@ packages: eslint-visitor-keys: 3.4.3 dev: true - /@vitejs/plugin-react-swc@3.7.0(vite@4.5.5): - resolution: {integrity: sha512-yrknSb3Dci6svCd/qhHqhFPDSw0QtjumcqdKMoNNzmOl5lMXTTiqzjWtG4Qask2HdvvzaNgSunbQGet8/GrKdA==} + /@vitejs/plugin-react@4.3.1(vite@4.5.3): + resolution: {integrity: sha512-m/V2syj5CuVnaxcUJOQRel/Wr31FFXRFlnOoq1TVtkCxsY5veGMTEmpWHndrhB2U8ScHtCQB1e+4hWYExQc6Lg==} + engines: {node: ^14.18.0 || >=16.0.0} peerDependencies: - vite: ^4 || ^5 - dependencies: - '@swc/core': 1.7.21 - vite: 4.5.5(less@3.13.1) + vite: ^4.2.0 || ^5.0.0 + dependencies: + '@babel/core': 7.24.7 + '@babel/plugin-transform-react-jsx-self': 7.24.7(@babel/core@7.24.7) + '@babel/plugin-transform-react-jsx-source': 7.24.7(@babel/core@7.24.7) + '@types/babel__core': 7.20.5 + react-refresh: 0.14.2 + vite: 4.5.3(less@3.13.1) transitivePeerDependencies: - - '@swc/helpers' + - supports-color dev: true /@vitest/expect@1.6.0: @@ -1480,7 +1672,7 @@ packages: dependencies: '@vitest/spy': 1.6.0 '@vitest/utils': 1.6.0 - chai: 4.5.0 + chai: 4.4.1 dev: true /@vitest/runner@1.6.0: @@ -1494,7 +1686,7 @@ packages: /@vitest/snapshot@1.6.0: resolution: {integrity: sha512-+Hx43f8Chus+DCmygqqfetcAZrDJwvTj0ymqjQq4CvmpKFSTVteEOBzCusu1x2tt4OJcvBflyHUE0DZSLgEMtQ==} dependencies: - magic-string: 0.30.11 + magic-string: 0.30.10 pathe: 1.1.2 pretty-format: 29.7.0 dev: true @@ -1545,7 +1737,7 @@ packages: resolution: {integrity: sha512-MxXdReSRhGO7VlFe1bRG/oI7/mdLV9B9JJT0N8vZOhF7gFRR5l3M8W9G8JxmKV+JC5mGqJ0QvqfSOLsCPa4nUw==} engines: {node: '>=0.4.0'} dependencies: - acorn: 8.12.1 + acorn: 8.12.0 dev: true /acorn@7.4.1: @@ -1554,8 +1746,8 @@ packages: hasBin: true dev: true - /acorn@8.12.1: - resolution: {integrity: sha512-tcpGyI9zbizT9JbV6oYE477V6mTlXvvi0T0G3SNIYE2apm/G5huBa1+K89VGeovbg+jycCrfhl3ADxErOuO6Jg==} + /acorn@8.12.0: + resolution: {integrity: sha512-RTvkC4w+KNXrM39/lWCUaG0IbRkWdCv7W/IOW9oU6SawyxulvkQy5HQPVTKxEjczcUvapcrw3cFx/60VN/NRNw==} engines: {node: '>=0.4.0'} hasBin: true dev: true @@ -1581,7 +1773,7 @@ packages: resolution: {integrity: sha512-H0TSyFNDMomMNJQBn8wFV5YC/2eJ+VXECwOadZJT554xP6cODZHPX3H9QMQECxvrgiSOP1pHjy1sMWQVYJOUOA==} engines: {node: '>= 14'} dependencies: - debug: 4.3.6 + debug: 4.3.5 transitivePeerDependencies: - supports-color dev: true @@ -1595,13 +1787,13 @@ packages: uri-js: 4.4.1 dev: true - /ajv@8.17.1: - resolution: {integrity: sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g==} + /ajv@8.16.0: + resolution: {integrity: sha512-F0twR8U1ZU67JIEtekUcLkXkoO5mMMmgGD8sK/xUFzJ805jxHQl92hImFAqqXMyMYjSPOyUPAwHYhB72g5sTXw==} dependencies: fast-deep-equal: 3.1.3 - fast-uri: 3.0.1 json-schema-traverse: 1.0.0 require-from-string: 2.0.2 + uri-js: 4.4.1 dev: true /ansi-align@3.0.1: @@ -1674,7 +1866,7 @@ packages: '@ant-design/colors': 5.1.1 '@ant-design/icons': 4.8.3(react-dom@16.14.0)(react@16.14.0) '@ant-design/react-slick': 0.28.4(react@16.14.0) - '@babel/runtime': 7.25.6 + '@babel/runtime': 7.24.7 array-tree-filter: 2.1.0 classnames: 2.5.1 copy-to-clipboard: 3.3.3 @@ -1734,6 +1926,10 @@ packages: sprintf-js: 1.0.3 dev: true + /argparse@2.0.1: + resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==} + dev: true + /aria-query@5.1.3: resolution: {integrity: sha512-R5iJ5lkuHybztUfuOAznmboyjWq8O6sqNqtK7CLOqdydi54VNbORp49mb14KbWgG1QD3JFO9hJdZ+y4KutfdOQ==} dependencies: @@ -1819,8 +2015,8 @@ packages: resolution: {integrity: sha512-08kcGqnYf/YmjoRhfxyu+CLxBjUtHLXLXX/vUfx9l2LYzG3c1m61nrpyFUZI6zeS+Li/wWMMidD9KgrqtGq3mA==} dev: true - /aws4@1.13.2: - resolution: {integrity: sha512-lHe62zvbTB5eEABUVi/AwVh0ZKY9rMMDhmm+eeyuuUQbQ3+J+fONVQOZyj+DdrvD4BY33uYniyRJ4UJIaSKAfw==} + /aws4@1.13.0: + resolution: {integrity: sha512-3AungXC4I8kKsS9PuS4JH2nc+0bVY/mjgrephHTIi8fpEeGsTHBUJeosp0Wc1myYMElmD0B3Oc4XL/HVJ4PV2g==} dev: true /axios@0.28.1: @@ -1853,7 +2049,7 @@ packages: /babel-plugin-macros@2.8.0: resolution: {integrity: sha512-SEP5kJpfGYqYKpBrj5XU3ahw5p5GOHJ0U5ssOSQ/WBVdwkD2Dzlce95exQTs3jOVWPPKLBN2rlEWkCK7dSmLvg==} dependencies: - '@babel/runtime': 7.25.6 + '@babel/runtime': 7.24.7 cosmiconfig: 6.0.0 resolve: 1.22.8 dev: false @@ -1916,26 +2112,6 @@ packages: - supports-color dev: true - /body-parser@1.20.3: - resolution: {integrity: sha512-7rAxByjUMqQ3/bHJy7D6OGXvx/MMc4IqBn/X0fcM1QUcAItpZrBEYhWGem+tzXH90c+G01ypMcYJBO9Y30203g==} - engines: {node: '>= 0.8', npm: 1.2.8000 || >= 1.4.16} - dependencies: - bytes: 3.1.2 - content-type: 1.0.5 - debug: 2.6.9 - depd: 2.0.0 - destroy: 1.2.0 - http-errors: 2.0.0 - iconv-lite: 0.4.24 - on-finished: 2.4.1 - qs: 6.13.0 - raw-body: 2.5.2 - type-is: 1.6.18 - unpipe: 1.0.0 - transitivePeerDependencies: - - supports-color - dev: true - /boxen@3.2.0: resolution: {integrity: sha512-cU4J/+NodM3IHdSL2yN8bqYqnmlBTidDR4RC7nJs61ZmtGz8VZzM3HLQX0zY5mrSmPtR3xWwsq2jOUQqFZN8+A==} engines: {node: '>=6'} @@ -1970,6 +2146,17 @@ packages: fill-range: 7.1.1 dev: true + /browserslist@4.23.1: + resolution: {integrity: sha512-TUfofFo/KsK/bWZ9TWQ5O26tsWW4Uhmt8IYklbnUa70udB6P2wA7w7o4PY4muaEPBQaAX+CEnmmIA41NVHtPVw==} + engines: {node: ^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7} + hasBin: true + dependencies: + caniuse-lite: 1.0.30001636 + electron-to-chromium: 1.4.810 + node-releases: 2.0.14 + update-browserslist-db: 1.0.16(browserslist@4.23.1) + dev: true + /buffer@5.7.1: resolution: {integrity: sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==} dependencies: @@ -2025,12 +2212,21 @@ packages: engines: {node: '>=6'} dev: true + /camelcase@6.3.0: + resolution: {integrity: sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==} + engines: {node: '>=10'} + dev: true + + /caniuse-lite@1.0.30001636: + resolution: {integrity: sha512-bMg2vmr8XBsbL6Lr0UHXy/21m84FTxDLWn2FSqMd5PrlbMxwJlQnC2YWYxVgp66PZE+BBNF2jYQUBKCo1FDeZg==} + dev: true + /caseless@0.12.0: resolution: {integrity: sha512-4tYFyifaFfGacoiObjJegolkwSU4xQNGbVgUiNYVUxbQ2x2lUsFvY4hVgVzGiIe6WLOPqycWXA40l+PWsxthUw==} dev: true - /chai@4.5.0: - resolution: {integrity: sha512-RITGBfijLkBddZvnn8jdqoTypxvqbOLYQkGGxXzeFjVHvudaPw0HNFD9x928/eUwYWd2dPCugVqspGALTZZQKw==} + /chai@4.4.1: + resolution: {integrity: sha512-13sOfMv2+DWduEU+/xbun3LScLoqN17nBeTLUsmDfKdoiC1fr0n9PU4guu4AhRcOVFk/sW8LyZWHuhWtQZiF+g==} engines: {node: '>=4'} dependencies: assertion-error: 1.1.0 @@ -2039,7 +2235,7 @@ packages: get-func-name: 2.0.2 loupe: 2.3.7 pathval: 1.1.1 - type-detect: 4.1.0 + type-detect: 4.0.8 dev: true /chalk@2.4.2: @@ -2183,7 +2379,7 @@ packages: resolution: {integrity: sha512-AF3r7P5dWxL8MxyITRMlORQNaOA2IkAFaTr4k7BUumjPtRpGDTZpl0Pb1XCO6JeDCBdp126Cgs9sMxqSjgYyRg==} engines: {node: '>= 0.6'} dependencies: - mime-db: 1.53.0 + mime-db: 1.52.0 dev: true /compression@1.7.4: @@ -2245,6 +2441,10 @@ packages: resolution: {integrity: sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A==} dev: false + /convert-source-map@2.0.0: + resolution: {integrity: sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==} + dev: true + /cookie-signature@1.0.6: resolution: {integrity: sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==} dev: true @@ -2293,6 +2493,22 @@ packages: yaml: 1.10.2 dev: false + /cosmiconfig@8.3.6(typescript@4.9.5): + resolution: {integrity: sha512-kcZ6+W5QzcJ3P1Mt+83OUv/oHFqZHIx8DuxG6eZ5RGMERoLqp4BuGjhHLYGK+Kf5XVkQvqBSmAy/nGWN3qDgEA==} + engines: {node: '>=14'} + peerDependencies: + typescript: '>=4.9.5' + peerDependenciesMeta: + typescript: + optional: true + dependencies: + import-fresh: 3.3.0 + js-yaml: 4.1.0 + parse-json: 5.2.0 + path-type: 4.0.0 + typescript: 4.9.5 + dev: true + /cross-spawn@5.1.0: resolution: {integrity: sha512-pTgQJ5KC0d2hcY8eyL1IzlBPYjTkyH72XRZPnLyKus2mBfNjQs3klqbJU2VILqZryAZUt9JOb3h/mWMy23/f5A==} dependencies: @@ -2390,11 +2606,11 @@ packages: resolution: {integrity: sha512-fnULvOpxnC5/Vg3NCiWelDsLiUc9bRwAPs/+LfTLNvetFCtCTN+yQz15C/fs4AwX1R9K5GLtLfn8QW+dWisaAw==} engines: {node: '>=0.11'} dependencies: - '@babel/runtime': 7.25.6 + '@babel/runtime': 7.24.7 dev: false - /dayjs@1.11.13: - resolution: {integrity: sha512-oaMBel6gjolK862uaPQOVTA7q3TZhuSvuMQAAglQDOWYO9A91IrAOUJEyKVlqJlHE0vq5p5UXxzdPfMH/x6xNg==} + /dayjs@1.11.11: + resolution: {integrity: sha512-okzr3f11N6WuqYtZSvm+F776mB41wRZMhKP+hc34YdW+KmtYYK9iqvHSwo2k9FEH3fhGXvOPV6yz2IcSrfRUDg==} dev: false /debug@2.6.9: @@ -2419,8 +2635,8 @@ packages: ms: 2.0.0 dev: true - /debug@4.3.6: - resolution: {integrity: sha512-O/09Bd4Z1fBrU4VzkhFqVgpPzaGbw6Sm9FEkBT1A/YBXQFGuuSxa1dN2nxgxS34JmKXqYx8CZAwEVoJFImUXIg==} + /debug@4.3.5: + resolution: {integrity: sha512-pt0bNEmneDIvdL1Xsd9oDQ/wrQRkXDT4AUWlNZNPKvW5x/jyO9VFXkJUP07vQ2upmw5PlaITaPKc31jK13V+jg==} engines: {node: '>=6.0'} peerDependencies: supports-color: '*' @@ -2450,7 +2666,7 @@ packages: resolution: {integrity: sha512-SUwdGfqdKOwxCPeVYjwSyRpJ7Z+fhpwIAtmCUdZIWZ/YP5R9WAsyuSgpLVDi9bjWoN2LXHNss/dk3urXtdQxGg==} engines: {node: '>=6'} dependencies: - type-detect: 4.1.0 + type-detect: 4.0.8 dev: true /deep-equal@2.2.3: @@ -2567,10 +2783,17 @@ packages: /dom-helpers@5.2.1: resolution: {integrity: sha512-nRCa7CK3VTrM2NmGkIy4cbK7IZlgBE/PYMn55rrXefr5xXDP0LdtfPnblFDoVdcAfslJ7or6iqAUnx0CCGIWQA==} dependencies: - '@babel/runtime': 7.25.6 + '@babel/runtime': 7.24.7 csstype: 3.1.3 dev: false + /dot-case@3.0.4: + resolution: {integrity: sha512-Kv5nKlh6yRrdrGvxeJ2e5y2eRUpkUosIW4A2AS38zwSz27zu7ufDwQPi5Jhs3XAlGNetl3bmnGhQsMtkKJnj3w==} + dependencies: + no-case: 3.0.4 + tslib: 2.6.3 + dev: true + /dot-prop@4.2.1: resolution: {integrity: sha512-l0p4+mIuJIua0mhxGoh4a+iNL9bmeK5DvnSVQa6T0OhrVmaEa1XScX5Etc673FePCJOArq/4Pa2cLGODUWTPOQ==} engines: {node: '>=4'} @@ -2593,17 +2816,21 @@ packages: safer-buffer: 2.1.2 dev: true - /echarts@5.5.1: - resolution: {integrity: sha512-Fce8upazaAXUVUVsjgV6mBnGuqgO+JNDlcgF79Dksy4+wgGpQB2lmYoO4TSweFg/mZITdpGHomw/cNBJZj1icA==} + /echarts@5.5.0: + resolution: {integrity: sha512-rNYnNCzqDAPCr4m/fqyUFv7fD9qIsd50S6GDFgO1DxZhncCsNsG7IfUlAlvZe5oSEQxtsjnHiUuppzccry93Xw==} dependencies: tslib: 2.3.0 - zrender: 5.6.0 + zrender: 5.5.0 dev: false /ee-first@1.1.1: resolution: {integrity: sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==} dev: true + /electron-to-chromium@1.4.810: + resolution: {integrity: sha512-Kaxhu4T7SJGpRQx99tq216gCq2nMxJo+uuT6uzz9l8TVN2stL7M06MIIXAtr9jsrLs2Glflgf2vMQRepxawOdQ==} + dev: true + /emoji-regex@7.0.3: resolution: {integrity: sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA==} dev: true @@ -2621,11 +2848,6 @@ packages: engines: {node: '>= 0.8'} dev: true - /encodeurl@2.0.0: - resolution: {integrity: sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==} - engines: {node: '>= 0.8'} - dev: true - /end-of-stream@1.4.4: resolution: {integrity: sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==} dependencies: @@ -2915,7 +3137,7 @@ packages: ajv: 6.12.6 chalk: 4.1.2 cross-spawn: 7.0.3 - debug: 4.3.6 + debug: 4.3.5 doctrine: 3.0.0 enquirer: 2.4.1 escape-string-regexp: 4.0.0 @@ -2923,7 +3145,7 @@ packages: eslint-utils: 2.1.0 eslint-visitor-keys: 2.1.0 espree: 7.3.1 - esquery: 1.6.0 + esquery: 1.5.0 esutils: 2.0.3 fast-deep-equal: 3.1.3 file-entry-cache: 6.0.1 @@ -2943,7 +3165,7 @@ packages: optionator: 0.9.4 progress: 2.0.3 regexpp: 3.2.0 - semver: 7.6.3 + semver: 7.6.2 strip-ansi: 6.0.1 strip-json-comments: 3.1.1 table: 6.8.2 @@ -2968,8 +3190,8 @@ packages: hasBin: true dev: true - /esquery@1.6.0: - resolution: {integrity: sha512-ca9pw9fomFcKPvFLXhBKUK90ZvGibiGOvRJNbjljY7s7uq/5YO4BOzcYtJqExdx99rF6aAcnRxHmcUHcz6sQsg==} + /esquery@1.5.0: + resolution: {integrity: sha512-YQLXUplAwJgCydQ78IMJywZCceoqk1oH01OERdSAJc/7U2AylwjhSCLDEtqwg811idIS/9fIU5GjG73IgjKMVg==} engines: {node: '>=0.10'} dependencies: estraverse: 5.3.0 @@ -2992,6 +3214,10 @@ packages: engines: {node: '>=4.0'} dev: true + /estree-walker@2.0.2: + resolution: {integrity: sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==} + dev: true + /estree-walker@3.0.3: resolution: {integrity: sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==} dependencies: @@ -3044,42 +3270,42 @@ packages: /express-urlrewrite@1.4.0: resolution: {integrity: sha512-PI5h8JuzoweS26vFizwQl6UTF25CAHSggNv0J25Dn/IKZscJHWZzPrI5z2Y2jgOzIaw2qh8l6+/jUcig23Z2SA==} dependencies: - debug: 4.3.6 + debug: 4.3.5 path-to-regexp: 1.8.0 transitivePeerDependencies: - supports-color dev: true - /express@4.21.0: - resolution: {integrity: sha512-VqcNGcj/Id5ZT1LZ/cfihi3ttTn+NJmkli2eZADigjq29qTlWi/hAQ43t/VLPq8+UX06FCEx3ByOYet6ZFblng==} + /express@4.19.2: + resolution: {integrity: sha512-5T6nhjsT+EOMzuck8JjBHARTHfMht0POzlA60WV2pMD3gyXw2LZnZ+ueGdNxG+0calOJcWKbpFcuzLZ91YWq9Q==} engines: {node: '>= 0.10.0'} dependencies: accepts: 1.3.8 array-flatten: 1.1.1 - body-parser: 1.20.3 + body-parser: 1.20.2 content-disposition: 0.5.4 content-type: 1.0.5 cookie: 0.6.0 cookie-signature: 1.0.6 debug: 2.6.9 depd: 2.0.0 - encodeurl: 2.0.0 + encodeurl: 1.0.2 escape-html: 1.0.3 etag: 1.8.1 - finalhandler: 1.3.1 + finalhandler: 1.2.0 fresh: 0.5.2 http-errors: 2.0.0 - merge-descriptors: 1.0.3 + merge-descriptors: 1.0.1 methods: 1.1.2 on-finished: 2.4.1 parseurl: 1.3.3 - path-to-regexp: 0.1.10 + path-to-regexp: 0.1.7 proxy-addr: 2.0.7 - qs: 6.13.0 + qs: 6.11.0 range-parser: 1.2.1 safe-buffer: 5.2.1 - send: 0.19.0 - serve-static: 1.16.2 + send: 0.18.0 + serve-static: 1.15.0 setprototypeof: 1.2.0 statuses: 2.0.1 type-is: 1.6.18 @@ -3123,7 +3349,7 @@ packages: '@nodelib/fs.walk': 1.2.8 glob-parent: 5.1.2 merge2: 1.4.1 - micromatch: 4.0.8 + micromatch: 4.0.7 dev: true /fast-json-stable-stringify@2.1.0: @@ -3134,10 +3360,6 @@ packages: resolution: {integrity: sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==} dev: true - /fast-uri@3.0.1: - resolution: {integrity: sha512-MWipKbbYiYI0UC7cl8m/i/IWTqfC8YXsqjzybjddLsFjStroQzsHXkc73JutMvBiXmOvapk+axIl79ig5t55Bw==} - dev: true - /fastq@1.17.1: resolution: {integrity: sha512-sRVD3lWVIXWg6By68ZN7vho9a1pQcN/WBFaAAsDDFzlJjvoGx0P8z7V1t72grFJfJhu3YPZBuu25f7Kaw2jN1w==} dependencies: @@ -3170,12 +3392,12 @@ packages: to-regex-range: 5.0.1 dev: true - /finalhandler@1.3.1: - resolution: {integrity: sha512-6BN9trH7bp3qvnrRyzsBz+g3lZxTNZTbVO2EV1CS0WIcDbawYVdYvGflME/9QP0h0pYlCDBCTjYa9nZzMDpyxQ==} + /finalhandler@1.2.0: + resolution: {integrity: sha512-5uXcUVftlQMFnWC9qu/svkWv3GTd2PfUhK/3PLkYNAe7FbqJMt3515HaxE6eRL74GdsriiwujiawdaB1BpEISg==} engines: {node: '>= 0.8'} dependencies: debug: 2.6.9 - encodeurl: 2.0.0 + encodeurl: 1.0.2 escape-html: 1.0.3 on-finished: 2.4.1 parseurl: 1.3.3 @@ -3225,8 +3447,8 @@ packages: is-callable: 1.2.7 dev: true - /foreground-child@3.3.0: - resolution: {integrity: sha512-Ld2g8rrAyMYFXBhEqMz8ZAHBi4J4uS1i/CxGMDnjyFWddMXLVcDp051DZfu+t7+ab7Wv6SMqpWmyFIj5UbfFvg==} + /foreground-child@3.2.1: + resolution: {integrity: sha512-PXUUyLqrR2XCWICfv6ukppP96sdFwWbNEnfEMt7jNsISjMsvaLNinAHNDYyvkyU+SZG2BTSbT5NjG+vZslfGTA==} engines: {node: '>=14'} dependencies: cross-spawn: 7.0.3 @@ -3297,6 +3519,11 @@ packages: resolution: {integrity: sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ==} dev: true + /gensync@1.0.0-beta.2: + resolution: {integrity: sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==} + engines: {node: '>=6.9.0'} + dev: true + /get-caller-file@2.0.5: resolution: {integrity: sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==} engines: {node: 6.* || 8.* || >= 10.*} @@ -3367,13 +3594,14 @@ packages: resolution: {integrity: sha512-m5blUd3/OqDTWwzBBtWBPrGlAzatRywHameHeekAZyZrskYouOGdNB8T/q6JucucvJXtOuyHIn0/Yia7iDasDw==} dev: true - /glob@10.4.5: - resolution: {integrity: sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==} + /glob@10.4.2: + resolution: {integrity: sha512-GwMlUF6PkPo3Gk21UxkCohOv0PLcIXVtKyLlpEI28R/cO/4eNOdmLk3CMW1wROV/WR/EsZOWAfBbBOqYvs88/w==} + engines: {node: '>=16 || 14 >=14.18'} hasBin: true dependencies: - foreground-child: 3.3.0 - jackspeak: 3.4.3 - minimatch: 9.0.5 + foreground-child: 3.2.1 + jackspeak: 3.4.0 + minimatch: 9.0.4 minipass: 7.1.2 package-json-from-dist: 1.0.0 path-scurry: 1.11.1 @@ -3401,7 +3629,6 @@ packages: /globals@11.12.0: resolution: {integrity: sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==} engines: {node: '>=4'} - dev: false /globals@13.24.0: resolution: {integrity: sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==} @@ -3425,7 +3652,7 @@ packages: array-union: 2.1.0 dir-glob: 3.0.1 fast-glob: 3.3.2 - ignore: 5.3.2 + ignore: 5.3.1 merge2: 1.4.1 slash: 3.0.0 dev: true @@ -3539,7 +3766,7 @@ packages: /history@4.10.1: resolution: {integrity: sha512-36nwAD620w12kuzPAsyINPWJqlNbij+hpK1k9XRloDtym8mxzGYl2c17LnV6IAGB2Dmg4tEa7G7DlawS0+qjew==} dependencies: - '@babel/runtime': 7.25.6 + '@babel/runtime': 7.24.7 loose-envify: 1.4.0 resolve-pathname: 3.0.0 tiny-invariant: 1.3.3 @@ -3547,6 +3774,12 @@ packages: value-equal: 1.0.1 dev: false + /history@5.3.0: + resolution: {integrity: sha512-ZqaKwjjrAYUYfLG+htGaIIZ4nioX2L70ZUMIFysS3xvBsSG4x/n1V6TXV3N8ZYNuFGlDirFg32T7B6WOUPDYcQ==} + dependencies: + '@babel/runtime': 7.24.7 + dev: true + /hoist-non-react-statics@3.3.2: resolution: {integrity: sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw==} dependencies: @@ -3584,7 +3817,7 @@ packages: engines: {node: '>= 14'} dependencies: agent-base: 7.1.1 - debug: 4.3.6 + debug: 4.3.5 transitivePeerDependencies: - supports-color dev: true @@ -3603,7 +3836,7 @@ packages: engines: {node: '>= 14'} dependencies: agent-base: 7.1.1 - debug: 4.3.6 + debug: 4.3.5 transitivePeerDependencies: - supports-color dev: true @@ -3636,8 +3869,8 @@ packages: engines: {node: '>= 4'} dev: true - /ignore@5.3.2: - resolution: {integrity: sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==} + /ignore@5.3.1: + resolution: {integrity: sha512-5Fytz/IraMjqpwfd34ke28PTVMjZjJG2MPn5t7OE4eUCUNf8BAa7b5WUS9/Qvr6mwOQS7Mk6vdsMno5he+T8Xw==} engines: {node: '>= 4'} dev: true @@ -3773,8 +4006,8 @@ packages: ci-info: 2.0.0 dev: true - /is-core-module@2.15.1: - resolution: {integrity: sha512-z0vtXSwucUJtANQWldhbtbt7BnL0vxiFjIdDLAatwhDYty2bad6s+rijD6Ri4YuYJubLzIJLUidCh09e1djEVQ==} + /is-core-module@2.14.0: + resolution: {integrity: sha512-a5dFJih5ZLYlRtDc0dZWP7RiKr6xIKzmn/oAYCDvdLThadVgyJwlaoQPmRtMSpz+rk0OGAgIu+TcM9HUF0fk1A==} engines: {node: '>= 0.4'} dependencies: hasown: 2.0.2 @@ -3987,8 +4220,9 @@ packages: resolution: {integrity: sha512-Yljz7ffyPbrLpLngrMtZ7NduUgVvi6wG9RJ9IUcyCd59YQ911PBJphODUcbOVbqYfxe1wuYf/LJ8PauMRwsM/g==} dev: true - /jackspeak@3.4.3: - resolution: {integrity: sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==} + /jackspeak@3.4.0: + resolution: {integrity: sha512-JVYhQnN59LVPFCEcVa2C3CrEKYacvjRfqIQl+h8oi91aLYQVWRYbxjPcv1bUiUy/kLmQaANrYfNMCO3kuEDHfw==} + engines: {node: '>=14'} dependencies: '@isaacs/cliui': 8.0.2 optionalDependencies: @@ -4019,12 +4253,19 @@ packages: esprima: 4.0.1 dev: true + /js-yaml@4.1.0: + resolution: {integrity: sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==} + hasBin: true + dependencies: + argparse: 2.0.1 + dev: true + /jsbn@0.1.1: resolution: {integrity: sha512-UVU9dibq2JcFWxQPA6KCqj5O42VOmAY3zQUfEKxU0KpTGXwNoCjkX1e13eHNvw/xPynt6pU0rZ1htjWTNTSXsg==} dev: true - /jsdom@24.1.3: - resolution: {integrity: sha512-MyL55p3Ut3cXbeBEG7Hcv0mVM8pp8PBNWxRqchZnSfAiES1v1mRnMeFfaHWIPULpwsYfvO+ZmMZz5tGCnjzDUQ==} + /jsdom@24.1.1: + resolution: {integrity: sha512-5O1wWV99Jhq4DV7rCLIoZ/UIhyQeDR7wHVyZAHAshbrvZsLs+Xzz7gtwnlJTJDjleiTKh54F4dXrX70vJQTyJQ==} engines: {node: '>=18'} peerDependencies: canvas: ^2.11.2 @@ -4063,7 +4304,6 @@ packages: resolution: {integrity: sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==} engines: {node: '>=4'} hasBin: true - dev: false /json-buffer@3.0.0: resolution: {integrity: sha512-CuUqjv0FUZIdXkHPI8MezCnFCdaTAacej1TZYulLoAg1h/PhwkdXFN4V/gzY4g+fMBCOV2xF+rp7t2XD2ns/NQ==} @@ -4079,7 +4319,6 @@ packages: /json-parse-even-better-errors@2.3.1: resolution: {integrity: sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==} - dev: false /json-parse-helpfulerror@1.0.3: resolution: {integrity: sha512-XgP0FGR77+QhUxjXkwOMkC94k3WtqEBfcnjWqhRd82qTat4SWKRE+9kUnynz/shm3I4ea2+qISvTIeGTNU7kJg==} @@ -4110,7 +4349,7 @@ packages: connect-pause: 0.1.1 cors: 2.8.5 errorhandler: 1.5.1 - express: 4.21.0 + express: 4.19.2 express-urlrewrite: 1.4.0 json-parse-helpfulerror: 1.0.3 lodash: 4.17.21 @@ -4192,7 +4431,7 @@ packages: image-size: 0.5.5 make-dir: 2.1.0 mime: 1.6.0 - native-request: 1.1.2 + native-request: 1.1.0 source-map: 0.6.1 /levn@0.4.1: @@ -4221,7 +4460,7 @@ packages: engines: {node: '>=14'} dependencies: mlly: 1.7.1 - pkg-types: 1.2.0 + pkg-types: 1.1.1 dev: true /locate-path@3.0.0: @@ -4279,6 +4518,12 @@ packages: steno: 0.4.4 dev: true + /lower-case@2.0.2: + resolution: {integrity: sha512-7fm3l3NAF9WfN6W3JOmf5drwpVqX78JtoGJ3A6W0a6ZnldM41w2fV5D490psKFTpMds8TJse/eHLFFsNHHjHgg==} + dependencies: + tslib: 2.6.3 + dev: true + /lowercase-keys@1.0.1: resolution: {integrity: sha512-G2Lj61tXDnVFFOi8VZds+SoQjtQC3dgokKdDG2mTm1tx4m50NUHBOZSBwQQHyy0V12A0JTG4icfZQH+xPyh8VA==} engines: {node: '>=0.10.0'} @@ -4289,8 +4534,9 @@ packages: engines: {node: '>=8'} dev: true - /lru-cache@10.4.3: - resolution: {integrity: sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==} + /lru-cache@10.2.2: + resolution: {integrity: sha512-9hp3Vp2/hFQUiIwKo8XCeFVnrg8Pk3TYNPIR7tJADKi5YfcF7vEaK7avFHTlSy3kOKYaJQaalfEo6YuXdceBOQ==} + engines: {node: 14 || >=16.14} dev: true /lru-cache@4.1.5: @@ -4300,15 +4546,21 @@ packages: yallist: 2.1.2 dev: true + /lru-cache@5.1.1: + resolution: {integrity: sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==} + dependencies: + yallist: 3.1.1 + dev: true + /lz-string@1.5.0: resolution: {integrity: sha512-h5bgJWpxJNswbU7qCrV0tIKQCaS3blPDrqKWx+QxzuzL1zGUzij9XCWLrSLsJPu5t+eWA/ycetzYAO5IOMcWAQ==} hasBin: true dev: true - /magic-string@0.30.11: - resolution: {integrity: sha512-+Wri9p0QHMy+545hKww7YAu5NyzF8iomPL/RQazugQ9+Ez4Ic3mERMd8ZTX5rfK944j+560ZJi8iAwgak1Ac7A==} + /magic-string@0.30.10: + resolution: {integrity: sha512-iIRwTIf0QKV3UAnYK4PU8uiEc4SRh5jX0mwpIwETPpHdhVM4f53RSwS/vXvN1JhGX+Cs7B8qIq3d6AH49O5fAQ==} dependencies: - '@jridgewell/sourcemap-codec': 1.5.0 + '@jridgewell/sourcemap-codec': 1.4.15 dev: true /make-dir@1.3.0: @@ -4341,8 +4593,8 @@ packages: engines: {node: '>= 0.10.0'} dev: true - /merge-descriptors@1.0.3: - resolution: {integrity: sha512-gaNvAS7TZ897/rVaZ0nMtAyxNyi/pdbjbAwUpFQpN70GqnVfOiXpeUUMKRBmzXaSQ8DdTX4/0ms62r2K+hE6mQ==} + /merge-descriptors@1.0.1: + resolution: {integrity: sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w==} dev: true /merge-stream@2.0.0: @@ -4371,8 +4623,8 @@ packages: engines: {node: '>= 0.6'} dev: true - /micromatch@4.0.8: - resolution: {integrity: sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==} + /micromatch@4.0.7: + resolution: {integrity: sha512-LPP/3KorzCwBxfeUuZmaR6bG2kdeHSbe0P2tY3FLRU4vYrjYz5hI4QZwV0njUx3jeuKe67YukQ1LSPZBKDqO/Q==} engines: {node: '>=8.6'} dependencies: braces: 3.0.3 @@ -4383,11 +4635,6 @@ packages: resolution: {integrity: sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==} engines: {node: '>= 0.6'} - /mime-db@1.53.0: - resolution: {integrity: sha512-oHlN/w+3MQ3rba9rqFr6V/ypF10LSkdwUysQL7GkXoTgIWeV+tcXGA852TBxH+gsh8UWoyhR1hKcoMJTuWflpg==} - engines: {node: '>= 0.6'} - dev: true - /mime-types@2.1.35: resolution: {integrity: sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==} engines: {node: '>= 0.6'} @@ -4437,8 +4684,8 @@ packages: brace-expansion: 1.1.11 dev: true - /minimatch@9.0.5: - resolution: {integrity: sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==} + /minimatch@9.0.4: + resolution: {integrity: sha512-KqWh+VchfxcMNRAJjj2tnsSJdNbHsVgnkBhTNrW7AjVo6OvLtxw8zfT9oLw1JSohlFzJ8jCoTgaoXvJ+kHt6fw==} engines: {node: '>=16 || 14 >=14.17'} dependencies: brace-expansion: 2.0.1 @@ -4456,10 +4703,10 @@ packages: /mlly@1.7.1: resolution: {integrity: sha512-rrVRZRELyQzrIUAVMHxP97kv+G786pHmOKzuFII8zDYahFBS7qnHh2AlYSl1GAHhaMPCz6/oHjVMcfFYgFYHgA==} dependencies: - acorn: 8.12.1 + acorn: 8.12.0 pathe: 1.1.2 - pkg-types: 1.2.0 - ufo: 1.5.4 + pkg-types: 1.1.1 + ufo: 1.5.3 dev: true /moment@2.30.1: @@ -4548,8 +4795,8 @@ packages: hasBin: true dev: true - /native-request@1.1.2: - resolution: {integrity: sha512-/etjwrK0J4Ebbcnt35VMWnfiUX/B04uwGJxyJInagxDqf2z5drSt/lsOvEMWGYunz1kaLZAFrV4NDAbOoDKvAQ==} + /native-request@1.1.0: + resolution: {integrity: sha512-uZ5rQaeRn15XmpgE0xoPL8YWqcX90VtCFglYwAgkvKM5e8fog+vePLAhHxuuv/gRkrQxIeh5U3q9sMNUrENqWw==} requiresBuild: true optional: true @@ -4570,6 +4817,13 @@ packages: resolution: {integrity: sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ==} dev: true + /no-case@3.0.4: + resolution: {integrity: sha512-fgAN3jGAh+RoxUGZHTSOLJIqUc2wmoBwGR4tbpNAKmmovFoWq0OdRkb0VkldReO2a2iBT/OEulG9XSUc10r3zg==} + dependencies: + lower-case: 2.0.2 + tslib: 2.6.3 + dev: true + /node-fetch@2.7.0: resolution: {integrity: sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==} engines: {node: 4.x || >=6.0.0} @@ -4582,6 +4836,10 @@ packages: whatwg-url: 5.0.0 dev: true + /node-releases@2.0.14: + resolution: {integrity: sha512-y10wOWt8yZpqXmOgRo77WaHEmhYQYGNA6y421PKsKYWEK8aW+cqAphborZDhqfyKrbZEN92CN1X2KbafY2s7Yw==} + dev: true + /normalize-package-data@2.5.0: resolution: {integrity: sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==} dependencies: @@ -4767,7 +5025,7 @@ packages: resolution: {integrity: sha512-/Eaoq+QyLSiXQ4lyYV23f14mZRQcXnxfHrN0vCai+ak9G0pp9iEQukIIZq5NccEvwRB8PUnZT0KsOoDCINS1qQ==} engines: {node: '>=18'} dependencies: - yocto-queue: 1.1.1 + yocto-queue: 1.0.0 dev: true /p-locate@3.0.0: @@ -4818,7 +5076,6 @@ packages: error-ex: 1.3.2 json-parse-even-better-errors: 2.3.1 lines-and-columns: 1.2.4 - dev: false /parse-ms@2.1.0: resolution: {integrity: sha512-kHt7kzLoS9VBZfUsiKjv43mr91ea+U05EyKkEtqp7vNbHxmaVuEqN7XxeEVnGrMtYOAxGrDElSi96K7EgO1zCA==} @@ -4872,12 +5129,12 @@ packages: resolution: {integrity: sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==} engines: {node: '>=16 || 14 >=14.18'} dependencies: - lru-cache: 10.4.3 + lru-cache: 10.2.2 minipass: 7.1.2 dev: true - /path-to-regexp@0.1.10: - resolution: {integrity: sha512-7lf7qcQidTku0Gu3YDPc8DJ1q7OOucfa/BSsIwjuh56VU7katFvuM8hULfkwB3Fns/rsVF7PwPKVw1sl5KQS9w==} + /path-to-regexp@0.1.7: + resolution: {integrity: sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ==} dev: true /path-to-regexp@1.8.0: @@ -4942,8 +5199,8 @@ packages: engines: {node: '>= 6'} dev: true - /pkg-types@1.2.0: - resolution: {integrity: sha512-+ifYuSSqOQ8CqP4MbZA5hDpb97n3E8SVWdJe+Wms9kj745lmd3b7EZJiqvmLwAlmRfjrI7Hi5z3kdBJ93lFNPA==} + /pkg-types@1.1.1: + resolution: {integrity: sha512-ko14TjmDuQJ14zsotODv7dBlwxKhUKQEhuhmbqo1uCi9BB0Z2alo/wAXg6q1dTR5TyuqYyWhjtfe/Tsh+X28jQ==} dependencies: confbox: 0.1.7 mlly: 1.7.1 @@ -4966,8 +5223,8 @@ packages: engines: {node: '>= 0.4'} dev: true - /postcss@8.4.41: - resolution: {integrity: sha512-TesUflQ0WKZqAvg52PWL6kHgLKP6xB6heTOdoYM0Wt2UHyxNa4K25EZZMgKns3BH1RLVbZCREPpLY0rhnNoHVQ==} + /postcss@8.4.38: + resolution: {integrity: sha512-Wglpdk03BSfXkHoQa3b/oulrotAkwrlLDRSOb9D0bN86FdRyE9lppSp33aHNPgBa0JKCoB+drFLZkQoRRYae5A==} engines: {node: ^10 || ^12 || >=14} dependencies: nanoid: 3.3.7 @@ -5079,13 +5336,6 @@ packages: side-channel: 1.0.6 dev: true - /qs@6.13.0: - resolution: {integrity: sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==} - engines: {node: '>=0.6'} - dependencies: - side-channel: 1.0.6 - dev: true - /qs@6.5.3: resolution: {integrity: sha512-qxXIEh4pCGfHICj1mAJQ2/2XVZkjCDTcEgfoSQxc/fYivUZxTkk7L3bDBJSoNrEzXI17oUO5Dp07ktqE5KzczA==} engines: {node: '>=0.6'} @@ -5120,7 +5370,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.6 + '@babel/runtime': 7.24.7 classnames: 2.5.1 dom-align: 1.12.4 rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -5135,7 +5385,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.6 + '@babel/runtime': 7.24.7 array-tree-filter: 2.1.0 rc-trigger: 5.3.4(react-dom@16.14.0)(react@16.14.0) rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -5150,7 +5400,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.6 + '@babel/runtime': 7.24.7 classnames: 2.5.1 react: 16.14.0 react-dom: 16.14.0(react@16.14.0) @@ -5162,7 +5412,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.6 + '@babel/runtime': 7.24.7 classnames: 2.5.1 rc-motion: 2.9.2(react-dom@16.14.0)(react@16.14.0) rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -5177,7 +5427,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.6 + '@babel/runtime': 7.24.7 classnames: 2.5.1 rc-motion: 2.9.2(react-dom@16.14.0)(react@16.14.0) rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -5191,7 +5441,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.6 + '@babel/runtime': 7.24.7 classnames: 2.5.1 rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) react: 16.14.0 @@ -5204,7 +5454,7 @@ packages: react: '*' react-dom: '*' dependencies: - '@babel/runtime': 7.25.6 + '@babel/runtime': 7.24.7 classnames: 2.5.1 rc-trigger: 5.3.4(react-dom@16.14.0)(react@16.14.0) react: 16.14.0 @@ -5218,7 +5468,7 @@ packages: react: '>= 16.9.0' react-dom: '>= 16.9.0' dependencies: - '@babel/runtime': 7.25.6 + '@babel/runtime': 7.24.7 async-validator: 3.5.2 rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) react: 16.14.0 @@ -5231,7 +5481,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.6 + '@babel/runtime': 7.24.7 classnames: 2.5.1 rc-dialog: 8.5.3(react-dom@16.14.0)(react@16.14.0) rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -5245,7 +5495,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.6 + '@babel/runtime': 7.24.7 classnames: 2.5.1 rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) react: 16.14.0 @@ -5258,7 +5508,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.6 + '@babel/runtime': 7.24.7 classnames: 2.5.1 rc-menu: 8.10.8(react-dom@16.14.0)(react@16.14.0) rc-textarea: 0.3.7(react-dom@16.14.0)(react@16.14.0) @@ -5274,7 +5524,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.6 + '@babel/runtime': 7.24.7 classnames: 2.5.1 mini-store: 3.0.6(react-dom@16.14.0)(react@16.14.0) rc-motion: 2.9.2(react-dom@16.14.0)(react@16.14.0) @@ -5292,7 +5542,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.6 + '@babel/runtime': 7.24.7 classnames: 2.5.1 rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) react: 16.14.0 @@ -5306,7 +5556,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.6 + '@babel/runtime': 7.24.7 classnames: 2.5.1 rc-motion: 2.9.2(react-dom@16.14.0)(react@16.14.0) rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -5320,7 +5570,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.6 + '@babel/runtime': 7.24.7 classnames: 2.5.1 rc-resize-observer: 1.4.0(react-dom@16.14.0)(react@16.14.0) rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -5334,7 +5584,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.6 + '@babel/runtime': 7.24.7 classnames: 2.5.1 react: 16.14.0 react-dom: 16.14.0(react@16.14.0) @@ -5347,10 +5597,10 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.6 + '@babel/runtime': 7.24.7 classnames: 2.5.1 date-fns: 2.30.0 - dayjs: 1.11.13 + dayjs: 1.11.11 moment: 2.30.1 rc-trigger: 5.3.4(react-dom@16.14.0)(react@16.14.0) rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -5365,7 +5615,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.6 + '@babel/runtime': 7.24.7 classnames: 2.5.1 react: 16.14.0 react-dom: 16.14.0(react@16.14.0) @@ -5378,7 +5628,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.6 + '@babel/runtime': 7.24.7 classnames: 2.5.1 rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) react: 16.14.0 @@ -5391,7 +5641,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.6 + '@babel/runtime': 7.24.7 classnames: 2.5.1 rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) react: 16.14.0 @@ -5406,13 +5656,13 @@ packages: react: '*' react-dom: '*' dependencies: - '@babel/runtime': 7.25.6 + '@babel/runtime': 7.24.7 classnames: 2.5.1 rc-motion: 2.9.2(react-dom@16.14.0)(react@16.14.0) rc-overflow: 1.3.2(react-dom@16.14.0)(react@16.14.0) rc-trigger: 5.3.4(react-dom@16.14.0)(react@16.14.0) rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) - rc-virtual-list: 3.14.5(react-dom@16.14.0)(react@16.14.0) + rc-virtual-list: 3.14.3(react-dom@16.14.0)(react@16.14.0) react: 16.14.0 react-dom: 16.14.0(react@16.14.0) dev: false @@ -5424,7 +5674,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.6 + '@babel/runtime': 7.24.7 classnames: 2.5.1 rc-tooltip: 5.0.2(react-dom@16.14.0)(react@16.14.0) rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -5440,7 +5690,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.6 + '@babel/runtime': 7.24.7 classnames: 2.5.1 rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) react: 16.14.0 @@ -5453,7 +5703,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.6 + '@babel/runtime': 7.24.7 classnames: 2.5.1 rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) react: 16.14.0 @@ -5467,7 +5717,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.6 + '@babel/runtime': 7.24.7 classnames: 2.5.1 rc-resize-observer: 1.4.0(react-dom@16.14.0)(react@16.14.0) rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -5483,7 +5733,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.6 + '@babel/runtime': 7.24.7 classnames: 2.5.1 rc-dropdown: 3.2.5(react-dom@16.14.0)(react@16.14.0) rc-menu: 8.10.8(react-dom@16.14.0)(react@16.14.0) @@ -5499,7 +5749,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.6 + '@babel/runtime': 7.24.7 classnames: 2.5.1 rc-resize-observer: 1.4.0(react-dom@16.14.0)(react@16.14.0) rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -5514,7 +5764,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.6 + '@babel/runtime': 7.24.7 rc-trigger: 5.3.4(react-dom@16.14.0)(react@16.14.0) react: 16.14.0 react-dom: 16.14.0(react@16.14.0) @@ -5526,7 +5776,7 @@ packages: react: '*' react-dom: '*' dependencies: - '@babel/runtime': 7.25.6 + '@babel/runtime': 7.24.7 classnames: 2.5.1 rc-select: 12.1.13(react-dom@16.14.0)(react@16.14.0) rc-tree: 4.1.5(react-dom@16.14.0)(react@16.14.0) @@ -5542,11 +5792,11 @@ packages: react: '*' react-dom: '*' dependencies: - '@babel/runtime': 7.25.6 + '@babel/runtime': 7.24.7 classnames: 2.5.1 rc-motion: 2.9.2(react-dom@16.14.0)(react@16.14.0) rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) - rc-virtual-list: 3.14.5(react-dom@16.14.0)(react@16.14.0) + rc-virtual-list: 3.14.3(react-dom@16.14.0)(react@16.14.0) react: 16.14.0 react-dom: 16.14.0(react@16.14.0) dev: false @@ -5558,7 +5808,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.6 + '@babel/runtime': 7.24.7 classnames: 2.5.1 rc-align: 4.0.15(react-dom@16.14.0)(react@16.14.0) rc-motion: 2.9.2(react-dom@16.14.0)(react@16.14.0) @@ -5573,7 +5823,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.6 + '@babel/runtime': 7.24.7 classnames: 2.5.1 rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) react: 16.14.0 @@ -5586,20 +5836,20 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.6 + '@babel/runtime': 7.24.7 react: 16.14.0 react-dom: 16.14.0(react@16.14.0) react-is: 18.3.1 dev: false - /rc-virtual-list@3.14.5(react-dom@16.14.0)(react@16.14.0): - resolution: {integrity: sha512-ZMOnkCLv2wUN8Jz7yI4XiSLa9THlYvf00LuMhb1JlsQCewuU7ydPuHw1rGVPhe9VZYl/5UqODtNd7QKJ2DMGfg==} + /rc-virtual-list@3.14.3(react-dom@16.14.0)(react@16.14.0): + resolution: {integrity: sha512-6+6wiEhdqakNBnbRJymgMlh+90qpkgqherTRo1l1cX7mK6F9hWsazPczmP0lA+64yhC9/t+M9Dh5pjvDWimn8A==} engines: {node: '>=8.x'} peerDependencies: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.6 + '@babel/runtime': 7.24.7 classnames: 2.5.1 rc-resize-observer: 1.4.0(react-dom@16.14.0)(react@16.14.0) rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -5647,12 +5897,17 @@ packages: /react-is@18.3.1: resolution: {integrity: sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==} + /react-refresh@0.14.2: + resolution: {integrity: sha512-jCvmsr+1IUSMUyzOkRcvnVbX3ZYC6g9TDrDbFuFmRDq7PD4yaGbLKNQL6k2jnArV8hjYxh7hVhAZB6s9HDGpZA==} + engines: {node: '>=0.10.0'} + dev: true + /react-router-dom@5.3.4(react@16.14.0): resolution: {integrity: sha512-m4EqFMHv/Ih4kpcBCONHbkT68KoAeHN4p3lAGoNryfHi0dMy0kCzEZakiKRsvg5wHZ/JLrLW8o8KomWiz/qbYQ==} peerDependencies: react: '>=15' dependencies: - '@babel/runtime': 7.25.6 + '@babel/runtime': 7.24.7 history: 4.10.1 loose-envify: 1.4.0 prop-types: 15.8.1 @@ -5667,7 +5922,7 @@ packages: peerDependencies: react: '>=15' dependencies: - '@babel/runtime': 7.25.6 + '@babel/runtime': 7.24.7 history: 4.10.1 hoist-non-react-statics: 3.3.2 loose-envify: 1.4.0 @@ -5685,7 +5940,7 @@ packages: react: ^16.8.0 || ^17.0.0 react-dom: ^16.8.0 || ^17.0.0 dependencies: - '@babel/runtime': 7.25.6 + '@babel/runtime': 7.24.7 '@emotion/cache': 10.0.29 '@emotion/core': 10.3.1(react@16.14.0) '@emotion/css': 10.0.27 @@ -5705,7 +5960,7 @@ packages: react: '>=16.6.0' react-dom: '>=16.6.0' dependencies: - '@babel/runtime': 7.25.6 + '@babel/runtime': 7.24.7 dom-helpers: 5.2.1 loose-envify: 1.4.0 prop-types: 15.8.1 @@ -5802,7 +6057,7 @@ packages: deprecated: request has been deprecated, see https://github.com/request/request/issues/3142 dependencies: aws-sign2: 0.7.0 - aws4: 1.13.2 + aws4: 1.13.0 caseless: 0.12.0 combined-stream: 1.0.8 extend: 3.0.2 @@ -5857,7 +6112,7 @@ packages: resolution: {integrity: sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==} hasBin: true dependencies: - is-core-module: 2.15.1 + is-core-module: 2.14.0 path-parse: 1.0.7 supports-preserve-symlinks-flag: 1.0.0 @@ -5888,37 +6143,37 @@ packages: glob: 7.2.3 dev: true - /rollup@3.29.5: - resolution: {integrity: sha512-GVsDdsbJzzy4S/v3dqWPJ7EfvZJfCHiDqe80IyrF59LYuP+e6U1LJoUqeuqRbwAWoMNoXivMNeNAOf5E22VA1w==} + /rollup@3.29.4: + resolution: {integrity: sha512-oWzmBZwvYrU0iJHtDmhsm662rC15FRXmcjCk1xD771dFDx5jJ02ufAQQTn0etB2emNk4J9EZg/yWKpsn9BWGRw==} engines: {node: '>=14.18.0', npm: '>=8.0.0'} hasBin: true optionalDependencies: fsevents: 2.3.3 dev: true - /rollup@4.21.1: - resolution: {integrity: sha512-ZnYyKvscThhgd3M5+Qt3pmhO4jIRR5RGzaSovB6Q7rGNrK5cUncrtLmcTTJVSdcKXyZjW8X8MB0JMSuH9bcAJg==} + /rollup@4.18.0: + resolution: {integrity: sha512-QmJz14PX3rzbJCN1SG4Xe/bAAX2a6NpCP8ab2vfu2GiUr8AQcr2nCV/oEO3yneFarB67zk8ShlIyWb2LGTb3Sg==} engines: {node: '>=18.0.0', npm: '>=8.0.0'} hasBin: true dependencies: '@types/estree': 1.0.5 optionalDependencies: - '@rollup/rollup-android-arm-eabi': 4.21.1 - '@rollup/rollup-android-arm64': 4.21.1 - '@rollup/rollup-darwin-arm64': 4.21.1 - '@rollup/rollup-darwin-x64': 4.21.1 - '@rollup/rollup-linux-arm-gnueabihf': 4.21.1 - '@rollup/rollup-linux-arm-musleabihf': 4.21.1 - '@rollup/rollup-linux-arm64-gnu': 4.21.1 - '@rollup/rollup-linux-arm64-musl': 4.21.1 - '@rollup/rollup-linux-powerpc64le-gnu': 4.21.1 - '@rollup/rollup-linux-riscv64-gnu': 4.21.1 - '@rollup/rollup-linux-s390x-gnu': 4.21.1 - '@rollup/rollup-linux-x64-gnu': 4.21.1 - '@rollup/rollup-linux-x64-musl': 4.21.1 - '@rollup/rollup-win32-arm64-msvc': 4.21.1 - '@rollup/rollup-win32-ia32-msvc': 4.21.1 - '@rollup/rollup-win32-x64-msvc': 4.21.1 + '@rollup/rollup-android-arm-eabi': 4.18.0 + '@rollup/rollup-android-arm64': 4.18.0 + '@rollup/rollup-darwin-arm64': 4.18.0 + '@rollup/rollup-darwin-x64': 4.18.0 + '@rollup/rollup-linux-arm-gnueabihf': 4.18.0 + '@rollup/rollup-linux-arm-musleabihf': 4.18.0 + '@rollup/rollup-linux-arm64-gnu': 4.18.0 + '@rollup/rollup-linux-arm64-musl': 4.18.0 + '@rollup/rollup-linux-powerpc64le-gnu': 4.18.0 + '@rollup/rollup-linux-riscv64-gnu': 4.18.0 + '@rollup/rollup-linux-s390x-gnu': 4.18.0 + '@rollup/rollup-linux-x64-gnu': 4.18.0 + '@rollup/rollup-linux-x64-musl': 4.18.0 + '@rollup/rollup-win32-arm64-msvc': 4.18.0 + '@rollup/rollup-win32-ia32-msvc': 4.18.0 + '@rollup/rollup-win32-x64-msvc': 4.18.0 fsevents: 2.3.3 dev: true @@ -5944,7 +6199,7 @@ packages: /rxjs@7.8.1: resolution: {integrity: sha512-AA3TVj+0A2iuIoQkWEK/tqFjBq2j+6PO6Y0zJcvzLAFhEFIO3HL0vls9hWLncZbAAbK0mar7oZ4V079I/qPMxg==} dependencies: - tslib: 2.7.0 + tslib: 2.6.3 dev: true /safe-array-concat@1.1.2: @@ -6017,14 +6272,14 @@ packages: hasBin: true dev: true - /semver@7.6.3: - resolution: {integrity: sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A==} + /semver@7.6.2: + resolution: {integrity: sha512-FNAIBWCx9qcRhoHcgcJ0gvU7SN1lYU2ZXuSfl04bSC5OpvDHFyJCjdNHomPXxjQlCBU67YW64PzY7/VIEH7F2w==} engines: {node: '>=10'} hasBin: true dev: true - /send@0.19.0: - resolution: {integrity: sha512-dW41u5VfLXu8SJh5bwRmyYUbAoSB3c9uQh6L8h/KtsFREPWpbX1lrljJo186Jc4nmci/sGUZ9a0a0J2zgfq2hw==} + /send@0.18.0: + resolution: {integrity: sha512-qqWzuOjSFOuqPjFe4NOsMLafToQQwBSOEpS+FwEt3A2V3vKubTquT3vmLTQpFgMXp8AlFWFuP1qKaJZOtPpVXg==} engines: {node: '>= 0.8.0'} dependencies: debug: 2.6.9 @@ -6044,14 +6299,14 @@ packages: - supports-color dev: true - /serve-static@1.16.2: - resolution: {integrity: sha512-VqpjJZKadQB/PEbEwvFdO43Ax5dFBZ2UECszz8bQ7pi7wt//PWe1P6MN7eCnjsatYtBT6EuiClbjSWP2WrIoTw==} + /serve-static@1.15.0: + resolution: {integrity: sha512-XGuRDNjXUijsUL0vl6nSD7cwURuzEgglbOaFuZM9g3kwDXOWVTck0jLzjPzGD+TazWbboZYu52/9/XPdUgne9g==} engines: {node: '>= 0.8.0'} dependencies: - encodeurl: 2.0.0 + encodeurl: 1.0.2 escape-html: 1.0.3 parseurl: 1.3.3 - send: 0.19.0 + send: 0.18.0 transitivePeerDependencies: - supports-color dev: true @@ -6064,8 +6319,8 @@ packages: resolution: {integrity: sha512-KiKBS8AnWGEyLzofFfmvKwpdPzqiy16LvQfK3yv/fVH7Bj13/wl3JSR1J+rfgRE9q7xUJK4qvgS8raSOeLUehw==} dev: true - /set-cookie-parser@2.7.0: - resolution: {integrity: sha512-lXLOiqpkUumhRdFF3k1osNXCy9akgx/dyPZ5p8qAg9seJzXr5ZrlqZuWIMuY6ejOsVLE6flJ5/h3lsn57fQ/PQ==} + /set-cookie-parser@2.6.0: + resolution: {integrity: sha512-RVnVQxTXuerk653XfuliOxBP81Sf0+qfQE73LIYKcyMYHG94AuH0kgrQpRDuTZnSmjpysHmzxJXKNfa6PjFhyQ==} dev: true /set-function-length@1.2.2: @@ -6163,6 +6418,13 @@ packages: is-fullwidth-code-point: 3.0.0 dev: true + /snake-case@3.0.4: + resolution: {integrity: sha512-LAOh4z89bGQvl9pFfNF8V146i7o7/CqFPbqzYgP+yYzDIDeS9HaNFtXABamRW+AQzEVODcvE79ljJ+8a9YSdMg==} + dependencies: + dot-case: 3.0.4 + tslib: 2.6.3 + dev: true + /source-map-js@1.2.0: resolution: {integrity: sha512-itJW8lvSA0TXEphiRoawsCksnlf8SyvmFzIhltqAHluXd88pkCd+cXJVHTDwdCr0IzwptSm035IHQktUu1QUMg==} engines: {node: '>=0.10.0'} @@ -6183,7 +6445,7 @@ packages: resolution: {integrity: sha512-kN9dJbvnySHULIluDHy32WHRUu3Og7B9sbY7tsFLctQkIqnMh3hErYgdMjTYuqmcXX+lK5T1lnUt3G7zNswmZA==} dependencies: spdx-expression-parse: 3.0.1 - spdx-license-ids: 3.0.20 + spdx-license-ids: 3.0.18 dev: true /spdx-exceptions@2.5.0: @@ -6194,11 +6456,11 @@ packages: resolution: {integrity: sha512-cbqHunsQWnJNE6KhVSMsMeH5H/L9EpymbzqTQ3uLwNCLZ1Q481oWaofqH7nO6V07xlXwY6PhQdQ2IedWx/ZK4Q==} dependencies: spdx-exceptions: 2.5.0 - spdx-license-ids: 3.0.20 + spdx-license-ids: 3.0.18 dev: true - /spdx-license-ids@3.0.20: - resolution: {integrity: sha512-jg25NiDV/1fLtSgEgyvVyDunvaNHbuwF9lfNV17gSmPFAlYzdfNBlLtLzXTevwkPj7DhGbmN9VnmJIgLnhvaBw==} + /spdx-license-ids@3.0.18: + resolution: {integrity: sha512-xxRs31BqRYHwiMzudOrpSiHtZ8i/GeionCBDSilhYRj+9gIcI8wCZTlXZKu9vZIVqViP3dcp9qE5G6AlIaD+TQ==} dev: true /sprintf-js@1.0.3: @@ -6412,7 +6674,7 @@ packages: dependencies: '@jridgewell/gen-mapping': 0.3.5 commander: 4.1.1 - glob: 10.4.5 + glob: 10.4.2 lines-and-columns: 1.2.4 mz: 2.7.0 pirates: 4.0.6 @@ -6436,6 +6698,10 @@ packages: resolution: {integrity: sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==} engines: {node: '>= 0.4'} + /svg-parser@2.0.4: + resolution: {integrity: sha512-e4hG1hRwoOdRb37cIMSgzNsxyzKfayW6VOflrwvR+/bzrkyxY/31WkbgnQpgtrNp1SdpJvpUAGTa/ZoiPNDuRQ==} + dev: true + /symbol-tree@3.2.4: resolution: {integrity: sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw==} dev: true @@ -6444,7 +6710,7 @@ packages: resolution: {integrity: sha512-w2sfv80nrAh2VCbqR5AK27wswXhqcck2AhfnNW76beQXskGZ1V12GwS//yYVa3d3fcvAip2OUnbDAjW2k3v9fA==} engines: {node: '>=10.0.0'} dependencies: - ajv: 8.17.1 + ajv: 8.16.0 lodash.truncate: 4.4.2 slice-ansi: 4.0.0 string-width: 4.2.3 @@ -6487,8 +6753,8 @@ packages: resolution: {integrity: sha512-lBN9zLN/oAf68o3zNXYrdCt1kP8WsiGW8Oo2ka41b2IM5JL/S1CTyX1rW0mb/zSuJun0ZUrDxx4sqvYS2FWzPA==} dev: false - /tinybench@2.9.0: - resolution: {integrity: sha512-0+DUvqWMValLmha6lr4kD8iAMK1HzV0/aKnCtWb9v9641TnP/MFb7Pc2bxoxQjTXAErryXVgUOfv2YqNllqGeg==} + /tinybench@2.8.0: + resolution: {integrity: sha512-1/eK7zUnIklz4JUUlL+658n58XO2hHLQfSk1Zf2LKieUjxidN16eKFEoDEfjHc3ohofSSqK3X5yO6VGb6iW8Lw==} dev: true /tinypool@0.8.4: @@ -6511,7 +6777,6 @@ packages: /to-fast-properties@2.0.0: resolution: {integrity: sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==} engines: {node: '>=4'} - dev: false /to-readable-stream@1.0.0: resolution: {integrity: sha512-Iq25XBt6zD5npPhlLVXGFN3/gyR2/qODcKNNyTMd4vbm39HUaOiAM4PMq0eMVC/Tkxz+Zjdsc55g9yyz+Yq00Q==} @@ -6583,8 +6848,8 @@ packages: resolution: {integrity: sha512-N82ooyxVNm6h1riLCoyS9e3fuJ3AMG2zIZs2Gd1ATcSFjSA23Q0fzjjZeh0jbJvWVDZ0cJT8yaNNaaXHzueNjg==} dev: false - /tslib@2.7.0: - resolution: {integrity: sha512-gLXCKdN1/j47AiHiOkJN69hJmcbGTHI0ImLmbYLHykhgeN0jVGola9yVjFgzCUklsZQMW55o+dW7IXv3RCXDzA==} + /tslib@2.6.3: + resolution: {integrity: sha512-xNvxJEOUiWPGhUuUdQgAJPKOOJfGnIyKySOc09XkKsgdUV/3E2zvwZYdejjmRgPCgcym1juLH3226yA7sEFJKQ==} dev: true /tsutils@3.21.0(typescript@4.9.5): @@ -6614,8 +6879,8 @@ packages: prelude-ls: 1.2.1 dev: true - /type-detect@4.1.0: - resolution: {integrity: sha512-Acylog8/luQ8L7il+geoSxhEkazvkslg7PSNKOX59mbB9cOveP5aq9h74Y7YU8yDpJwetzQQrfIwtf4Wp4LKcw==} + /type-detect@4.0.8: + resolution: {integrity: sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==} engines: {node: '>=4'} dev: true @@ -6696,8 +6961,8 @@ packages: engines: {node: '>=4.2.0'} hasBin: true - /ufo@1.5.4: - resolution: {integrity: sha512-UsUk3byDzKd04EyoZ7U4DOlxQaD14JUKQl6/P7wiX4FNvUfm3XL246n9W5AmqwW5RSFJ27NAuM0iLscAOYUiGQ==} + /ufo@1.5.3: + resolution: {integrity: sha512-Y7HYmWaFwPUmkoQCUIAYpKqkOf+SbVj/2fJJZ4RJMCfZp0rTGwRbzQD+HghfnhKOjL9E01okqz+ncJskGYfBNw==} dev: true /unbox-primitive@1.0.2: @@ -6709,8 +6974,8 @@ packages: which-boxed-primitive: 1.0.2 dev: true - /undici-types@6.19.8: - resolution: {integrity: sha512-ve2KP6f/JnbPBFyobGHuerC9g1FYGn/F8n1LWTwNxCEzd6IfqTwUQcNXgEtmmQ6DlRrC1hrSrBnCZPokRrDHjw==} + /undici-types@5.26.5: + resolution: {integrity: sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==} dev: true /unique-string@1.0.0: @@ -6730,6 +6995,17 @@ packages: engines: {node: '>= 0.8'} dev: true + /update-browserslist-db@1.0.16(browserslist@4.23.1): + resolution: {integrity: sha512-KVbTxlBYlckhF5wgfyZXTWnMn7MMZjMu9XG8bPlliUOP9ThaF4QnhP8qrjrH7DRzHfSk0oQv1wToW+iA5GajEQ==} + hasBin: true + peerDependencies: + browserslist: '>= 4.21.0' + dependencies: + browserslist: 4.23.1 + escalade: 3.1.2 + picocolors: 1.0.1 + dev: true + /update-notifier@3.0.1: resolution: {integrity: sha512-grrmrB6Zb8DUiyDIaeRTBCkgISYUgETNe7NglEbVsrLWXeESnlCSP50WfRSj/GmzMPl6Uchj24S/p80nP/ZQrQ==} engines: {node: '>=8'} @@ -6828,38 +7104,52 @@ packages: hasBin: true dependencies: cac: 6.7.14 - debug: 4.3.6 + debug: 4.3.5 pathe: 1.1.2 picocolors: 1.0.1 - vite: 5.4.2(less@3.13.1) + vite: 5.3.1(less@3.13.1) transitivePeerDependencies: - '@types/node' - less - lightningcss - sass - - sass-embedded - stylus - sugarss - supports-color - terser dev: true - /vite-tsconfig-paths@3.6.0(vite@4.5.5): + /vite-plugin-svgr@4.2.0(typescript@4.9.5)(vite@4.5.3): + resolution: {integrity: sha512-SC7+FfVtNQk7So0XMjrrtLAbEC8qjFPifyD7+fs/E6aaNdVde6umlVVh0QuwDLdOMu7vp5RiGFsB70nj5yo0XA==} + peerDependencies: + vite: ^2.6.0 || 3 || 4 || 5 + dependencies: + '@rollup/pluginutils': 5.1.0 + '@svgr/core': 8.1.0(typescript@4.9.5) + '@svgr/plugin-jsx': 8.1.0(@svgr/core@8.1.0) + vite: 4.5.3(less@3.13.1) + transitivePeerDependencies: + - rollup + - supports-color + - typescript + dev: true + + /vite-tsconfig-paths@3.6.0(vite@4.5.3): resolution: {integrity: sha512-UfsPYonxLqPD633X8cWcPFVuYzx/CMNHAjZTasYwX69sXpa4gNmQkR0XCjj82h7zhLGdTWagMjC1qfb9S+zv0A==} peerDependencies: vite: '>2.0.0-0' dependencies: - debug: 4.3.6 + debug: 4.3.5 globrex: 0.1.2 recrawl-sync: 2.2.3 tsconfig-paths: 4.2.0 - vite: 4.5.5(less@3.13.1) + vite: 4.5.3(less@3.13.1) transitivePeerDependencies: - supports-color dev: true - /vite@4.5.5(less@3.13.1): - resolution: {integrity: sha512-ifW3Lb2sMdX+WU91s3R0FyQlAyLxOzCSCP37ujw0+r5POeHPwe6udWVIElKQq8gk3t7b8rkmvqC6IHBpCff4GQ==} + /vite@4.5.3(less@3.13.1): + resolution: {integrity: sha512-kQL23kMeX92v3ph7IauVkXkikdDRsYMGTVl5KY2E9OY4ONLvkHf04MDTbnfo6NKxZiDLWzVpP5oTa8hQD8U3dg==} engines: {node: ^14.18.0 || >=16.0.0} hasBin: true peerDependencies: @@ -6888,14 +7178,14 @@ packages: dependencies: esbuild: 0.18.20 less: 3.13.1 - postcss: 8.4.41 - rollup: 3.29.5 + postcss: 8.4.38 + rollup: 3.29.4 optionalDependencies: fsevents: 2.3.3 dev: true - /vite@5.4.2(less@3.13.1): - resolution: {integrity: sha512-dDrQTRHp5C1fTFzcSaMxjk6vdpKvT+2/mIdE07Gw2ykehT49O0z/VHS3zZ8iV/Gh8BJJKHWOe5RjaNrW5xf/GA==} + /vite@5.3.1(less@3.13.1): + resolution: {integrity: sha512-XBmSKRLXLxiaPYamLv3/hnP/KXDai1NDexN0FpkTaZXTfycHvkRHoenpgl/fvuK/kPbB6xAgoyiryAhQNxYmAQ==} engines: {node: ^18.0.0 || >=20.0.0} hasBin: true peerDependencies: @@ -6903,7 +7193,6 @@ packages: less: '*' lightningcss: ^1.21.0 sass: '*' - sass-embedded: '*' stylus: '*' sugarss: '*' terser: ^5.4.0 @@ -6916,8 +7205,6 @@ packages: optional: true sass: optional: true - sass-embedded: - optional: true stylus: optional: true sugarss: @@ -6927,13 +7214,13 @@ packages: dependencies: esbuild: 0.21.5 less: 3.13.1 - postcss: 8.4.41 - rollup: 4.21.1 + postcss: 8.4.38 + rollup: 4.18.0 optionalDependencies: fsevents: 2.3.3 dev: true - /vitest@1.6.0(jsdom@24.1.3)(less@3.13.1): + /vitest@1.6.0(jsdom@24.1.1)(less@3.13.1): resolution: {integrity: sha512-H5r/dN06swuFnzNFhq/dnz37bPXnq8xB2xB5JOVk8K09rUtoeNN+LHWkoQ0A/i3hvbUKKcCei9KpbxqHMLhLLA==} engines: {node: ^18.0.0 || >=20.0.0} hasBin: true @@ -6964,26 +7251,25 @@ packages: '@vitest/spy': 1.6.0 '@vitest/utils': 1.6.0 acorn-walk: 8.3.3 - chai: 4.5.0 - debug: 4.3.6 + chai: 4.4.1 + debug: 4.3.5 execa: 8.0.1 - jsdom: 24.1.3 + jsdom: 24.1.1 local-pkg: 0.5.0 - magic-string: 0.30.11 + magic-string: 0.30.10 pathe: 1.1.2 picocolors: 1.0.1 std-env: 3.7.0 strip-literal: 2.1.0 - tinybench: 2.9.0 + tinybench: 2.8.0 tinypool: 0.8.4 - vite: 5.4.2(less@3.13.1) + vite: 5.3.1(less@3.13.1) vite-node: 1.6.0(less@3.13.1) - why-is-node-running: 2.3.0 + why-is-node-running: 2.2.2 transitivePeerDependencies: - less - lightningcss - sass - - sass-embedded - stylus - sugarss - supports-color @@ -7103,8 +7389,8 @@ packages: isexe: 2.0.0 dev: true - /why-is-node-running@2.3.0: - resolution: {integrity: sha512-hUrmaWBdVDcxvYqnyh09zunKzROWjbZTiNy8dBEjkS7ehEDQibXJ7XvlmtbwuTclUiIyN+CyXQD4Vmko8fNm8w==} + /why-is-node-running@2.2.2: + resolution: {integrity: sha512-6tSwToZxTOcotxHeA+qGCq1mVzKR3CwcJGmVcY+QE8SHy6TnpFnh8PAvPNHYr7EcuVeG0QSMxtYCuO1ta/G/oA==} engines: {node: '>=8'} hasBin: true dependencies: @@ -7212,6 +7498,10 @@ packages: resolution: {integrity: sha512-ncTzHV7NvsQZkYe1DW7cbDLm0YpzHmZF5r/iyP3ZnQtMiJ+pjzisCiMNI+Sj+xQF5pXhSHxSB3uDbsBTzY/c2A==} dev: true + /yallist@3.1.1: + resolution: {integrity: sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==} + dev: true + /yaml@1.10.2: resolution: {integrity: sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==} engines: {node: '>= 6'} @@ -7258,13 +7548,13 @@ packages: yargs-parser: 21.1.1 dev: true - /yocto-queue@1.1.1: - resolution: {integrity: sha512-b4JR1PFR10y1mKjhHY9LaGo6tmrgjit7hxVIeAmyMw3jegXR4dhYqLaQF5zMXZxY7tLpMyJeLjr1C4rLmkVe8g==} + /yocto-queue@1.0.0: + resolution: {integrity: sha512-9bnSc/HEW2uRy67wc+T8UwauLuPJVn28jb+GtJY16iiKWyvmYJRXVT4UamsAEGQfPohgr2q4Tq0sQbQlxTfi1g==} engines: {node: '>=12.20'} dev: true - /zrender@5.6.0: - resolution: {integrity: sha512-uzgraf4njmmHAbEUxMJ8Oxg+P3fT04O+9p7gY+wJRVxo8Ge+KmYv0WJev945EH4wFuc4OY2NLXz46FZrWS9xJg==} + /zrender@5.5.0: + resolution: {integrity: sha512-O3MilSi/9mwoovx77m6ROZM7sXShR/O/JIanvzTwjN3FORfLSr81PsUGd7jlaYOeds9d8tw82oP44+3YucVo+w==} dependencies: tslib: 2.3.0 dev: false diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/app.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/app.tsx index 78954ebb5a5..0ad6aa3f174 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/app.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/app.tsx @@ -20,7 +20,6 @@ import React, { Suspense } from 'react'; import { Switch as AntDSwitch, Layout } from 'antd'; import NavBar from './components/navBar/navBar'; -import NavBarV2 from '@/v2/components/navBar/navBar'; import Breadcrumbs from './components/breadcrumbs/breadcrumbs'; import { HashRouter as Router, Switch, Route, Redirect } from 'react-router-dom'; import { routes } from '@/routes'; @@ -62,11 +61,7 @@ class App extends React.Component, IAppState> { return ( - { - (enableNewUI) - ? - : - } +
    diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/autoReloadPanel/autoReloadPanel.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/autoReloadPanel/autoReloadPanel.tsx index 6b2bab246b7..0230d4dd61d 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/autoReloadPanel/autoReloadPanel.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/autoReloadPanel/autoReloadPanel.tsx @@ -76,11 +76,10 @@ class AutoReloadPanel extends React.Component { ); const lastUpdatedDeltaFullText = lastUpdatedOMDBDelta === 0 || lastUpdatedOMDBDelta === undefined || lastUpdatedOMDBFull === 0 || lastUpdatedOMDBFull === undefined ? '' : - //omSyncLoad should be clickable at all times. If the response from the dbsync is false it will show DB update is already running else show triggered sync ( <>   | DB Synced at {lastUpdatedDeltaFullToolTip} -  )} - - ); - }); - breadCrumbs[breadCrumbs.length - 1] = generateSubMenu(currPath[currPath.length - 1]); - return breadCrumbs; - } - - return ( - } - className='breadcrumb-nav'> - {generateBreadCrumbs()} - - ) -} - -export default DUBreadcrumbNav; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/duMetadata/duMetadata.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/duMetadata/duMetadata.tsx deleted file mode 100644 index f2c740f7dbc..00000000000 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/duMetadata/duMetadata.tsx +++ /dev/null @@ -1,389 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import React, { useRef, useState } from 'react'; -import moment from 'moment'; -import { AxiosError } from 'axios'; -import { Table } from 'antd'; - -import { AxiosGetHelper, cancelRequests } from '@/utils/axiosRequestHelper'; -import { byteToSize, showDataFetchError } from '@/utils/common'; - -import { Acl } from '@/v2/types/acl.types'; - - -// ------------- Types -------------- // -type CountStats = { - numBucket: number; - numDir: number; - numKey: number; - numVolume: number; -}; - -type LocationInfo = { - blockID: { - containerBlockID: { - containerID: number; - localID: number; - }; - blockCommitSequenceId: number; - containerID: number; - localID: number; - }; - length: number; - offset: number; - token: null; - createVersion: number; - pipeline: null; - partNumber: number; - containerID: number; - localID: number; - blockCommitSequenceId: number; -}; - -type ObjectInfo = { - bucketName: string; - bucketLayout: string; - encInfo: null; - fileName: string; - keyName: string; - name: string; - owner: string; - volume: string; - volumeName: string; - sourceVolume: string | null; - sourceBucket: string | null; - usedBytes: number | null; - usedNamespace: number; - storageType: string; - creationTime: number; - dataSize: number; - modificationTime: number; - quotaInBytes: number; - quotaInNamespace: number; -} - -type ReplicationConfig = { - replicationFactor: string; - requiredNodes: number; - replicationType: string; -} - -type ObjectInfoResponse = ObjectInfo & { - acls: Acl[]; - versioningEnabled: boolean; - metadata: Record; - file: boolean; - keyLocationVersions: { - version: number; - locationList: LocationInfo[]; - multipartKey: boolean; - blocksLatestVersionOnly: LocationInfo[]; - locationLists: LocationInfo[][]; - locationListCount: number; - }[]; - versioning: boolean; - encryptionInfo: null; - replicationConfig: ReplicationConfig; -}; - -type SummaryResponse = { - countStats: CountStats; - objectInfo: ObjectInfoResponse; - path: string; - status: string; - type: string; -} - -type MetadataProps = { - path: string; -}; - -type MetadataState = { - keys: string[]; - values: (string | number | boolean | null)[]; -}; - - -// ------------- Component -------------- // -const DUMetadata: React.FC = ({ - path = '/' -}) => { - const [loading, setLoading] = useState(false); - const [state, setState] = useState({ - keys: [], - values: [] - }); - const cancelSummarySignal = useRef(); - const keyMetadataSummarySignal = useRef(); - const cancelQuotaSignal = useRef(); - - const getObjectInfoMapping = React.useCallback((summaryResponse) => { - - const keys: string[] = []; - const values: (string | number | boolean | null)[] = []; - /** - * We are creating a specific set of keys under Object Info response - * which do not require us to modify anything - */ - const selectedInfoKeys = [ - 'bucketName', 'bucketLayout', 'encInfo', 'fileName', 'keyName', - 'name', 'owner', 'sourceBucket', 'sourceVolume', 'storageType', - 'usedNamespace', 'volumeName', 'volume' - ] as const; - const objectInfo: ObjectInfo = summaryResponse.objectInfo ?? {}; - - selectedInfoKeys.forEach((key) => { - if (objectInfo[key as keyof ObjectInfo] !== undefined && objectInfo[key as keyof ObjectInfo] !== -1) { - // We will use regex to convert the Object key from camel case to space separated title - // The following regex will match abcDef and produce Abc Def - let keyName = key.replace(/([a-z0-9])([A-Z])/g, '$1 $2'); - keyName = keyName.charAt(0).toUpperCase() + keyName.slice(1); - keys.push(keyName); - values.push(objectInfo[key as keyof ObjectInfo]); - } - }); - - if (objectInfo?.creationTime !== undefined && objectInfo?.creationTime !== -1) { - keys.push('Creation Time'); - values.push(moment(objectInfo.creationTime).format('ll LTS')); - } - - if (objectInfo?.usedBytes !== undefined && objectInfo?.usedBytes !== -1 && objectInfo!.usedBytes !== null) { - keys.push('Used Bytes'); - values.push(byteToSize(objectInfo.usedBytes, 3)); - } - - if (objectInfo?.dataSize !== undefined && objectInfo?.dataSize !== -1) { - keys.push('Data Size'); - values.push(byteToSize(objectInfo.dataSize, 3)); - } - - if (objectInfo?.modificationTime !== undefined && objectInfo?.modificationTime !== -1) { - keys.push('Modification Time'); - values.push(moment(objectInfo.modificationTime).format('ll LTS')); - } - - if (objectInfo?.quotaInBytes !== undefined && objectInfo?.quotaInBytes !== -1) { - keys.push('Quota In Bytes'); - values.push(byteToSize(objectInfo.quotaInBytes, 3)); - } - - if (objectInfo?.quotaInNamespace !== undefined && objectInfo?.quotaInNamespace !== -1) { - keys.push('Quota In Namespace'); - values.push(byteToSize(objectInfo.quotaInNamespace, 3)); - } - - if (summaryResponse.objectInfo?.replicationConfig?.replicationFactor !== undefined) { - keys.push('Replication Factor'); - values.push(summaryResponse.objectInfo.replicationConfig.replicationFactor); - } - - if (summaryResponse.objectInfo?.replicationConfig?.replicationType !== undefined) { - keys.push('Replication Type'); - values.push(summaryResponse.objectInfo.replicationConfig.replicationType); - } - - if (summaryResponse.objectInfo?.replicationConfig?.requiredNodes !== undefined - && summaryResponse.objectInfo?.replicationConfig?.requiredNodes !== -1) { - keys.push('Replication Required Nodes'); - values.push(summaryResponse.objectInfo.replicationConfig.requiredNodes); - } - - return { keys, values } - }, [path]); - - function loadMetadataSummary(path: string) { - cancelRequests([ - cancelSummarySignal.current!, - keyMetadataSummarySignal.current! - ]); - const keys: string[] = []; - const values: (string | number | boolean | null)[] = []; - - const { request, controller } = AxiosGetHelper( - `/api/v1/namespace/summary?path=${path}`, - cancelSummarySignal.current - ); - cancelSummarySignal.current = controller; - - request.then(response => { - const summaryResponse: SummaryResponse = response.data; - keys.push('Entity Type'); - values.push(summaryResponse.type); - - if (summaryResponse.status === 'INITIALIZING') { - showDataFetchError(`The metadata is currently initializing. Please wait a moment and try again later`); - return; - } - - if (summaryResponse.status === 'PATH_NOT_FOUND') { - showDataFetchError(`Invalid Path: ${path}`); - return; - } - - // If the entity is a Key then fetch the Key metadata only - if (summaryResponse.type === 'KEY') { - const { request: metadataRequest, controller: metadataNewController } = AxiosGetHelper( - `/api/v1/namespace/du?path=${path}&replica=true`, - keyMetadataSummarySignal.current - ); - keyMetadataSummarySignal.current = metadataNewController; - metadataRequest.then(response => { - keys.push('File Size'); - values.push(byteToSize(response.data.size, 3)); - keys.push('File Size With Replication'); - values.push(byteToSize(response.data.sizeWithReplica, 3)); - keys.push("Creation Time"); - values.push(moment(summaryResponse.objectInfo.creationTime).format('ll LTS')); - keys.push("Modification Time"); - values.push(moment(summaryResponse.objectInfo.modificationTime).format('ll LTS')); - - setState({ - keys: keys, - values: values - }); - }).catch(error => { - showDataFetchError(error.toString()); - }); - return; - } - - /** - * Will iterate over the keys of the countStats to avoid multiple if blocks - * and check from the map for the respective key name / title to insert - */ - const countStats: CountStats = summaryResponse.countStats ?? {}; - const keyToNameMap: Record = { - numVolume: 'Volumes', - numBucket: 'Buckets', - numDir: 'Total Directories', - numKey: 'Total Keys' - } - Object.keys(countStats).forEach((key: string) => { - if (countStats[key as keyof CountStats] !== undefined - && countStats[key as keyof CountStats] !== -1) { - keys.push(keyToNameMap[key]); - values.push(countStats[key as keyof CountStats]); - } - }) - - const { - keys: objectInfoKeys, - values: objectInfoValues - } = getObjectInfoMapping(summaryResponse); - - keys.push(...objectInfoKeys); - values.push(...objectInfoValues); - - setState({ - keys: keys, - values: values - }); - }).catch(error => { - showDataFetchError((error as AxiosError).toString()); - }); - } - - function loadQuotaSummary(path: string) { - cancelRequests([ - cancelQuotaSignal.current! - ]); - - const { request, controller } = AxiosGetHelper( - `/api/v1/namespace/quota?path=${path}`, - cancelQuotaSignal.current - ); - cancelQuotaSignal.current = controller; - - request.then(response => { - const quotaResponse = response.data; - - if (quotaResponse.status === 'INITIALIZING') { - return; - } - if (quotaResponse.status === 'TYPE_NOT_APPLICABLE') { - return; - } - if (quotaResponse.status === 'PATH_NOT_FOUND') { - showDataFetchError(`Invalid Path: ${path}`); - return; - } - - const keys: string[] = []; - const values: (string | number | boolean | null)[] = []; - // Append quota information - // In case the object's quota isn't set - if (quotaResponse.allowed !== undefined && quotaResponse.allowed !== -1) { - keys.push('Quota Allowed'); - values.push(byteToSize(quotaResponse.allowed, 3)); - } - - if (quotaResponse.used !== undefined && quotaResponse.used !== -1) { - keys.push('Quota Used'); - values.push(byteToSize(quotaResponse.used, 3)); - } - setState((prevState) => ({ - keys: [...prevState.keys, ...keys], - values: [...prevState.values, ...values] - })); - }).catch(error => { - showDataFetchError(error.toString()); - }); - } - - React.useEffect(() => { - setLoading(true); - loadMetadataSummary(path); - loadQuotaSummary(path); - setLoading(false); - - return (() => { - cancelRequests([ - cancelSummarySignal.current!, - keyMetadataSummarySignal.current!, - cancelQuotaSignal.current! - ]); - }) - }, [path]); - - const content = []; - for (const [i, v] of state.keys.entries()) { - content.push({ - key: v, - value: state.values[i] - }); - } - - return ( -
    - - -
    - ); -} - -export default DUMetadata; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/duPieChart/duPieChart.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/duPieChart/duPieChart.tsx deleted file mode 100644 index 2601905a142..00000000000 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/duPieChart/duPieChart.tsx +++ /dev/null @@ -1,211 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import React from 'react'; - -import EChart from '@/v2/components/eChart/eChart'; -import { byteToSize } from '@/utils/common'; -import { DUSubpath } from '@/v2/types/diskUsage.types'; - -//-------Types--------// -type PieChartProps = { - path: string; - limit: number; - size: number; - subPaths: DUSubpath[]; - subPathCount: number; - sizeWithReplica: number; - loading: boolean; -} - -//-------Constants---------// -const OTHER_PATH_NAME = 'Other Objects'; -const MIN_BLOCK_SIZE = 0.05; - - -//----------Component---------// -const DUPieChart: React.FC = ({ - path, - limit, - size, - subPaths, - subPathCount, - sizeWithReplica, - loading -}) => { - - const [subpathSize, setSubpathSize] = React.useState(0); - - function getSubpathSize(subpaths: DUSubpath[]): number { - const subpathSize = subpaths - .map((subpath) => subpath.size) - .reduce((acc, curr) => acc + curr, 0); - // If there is no subpaths, then the size will be total size of path - return (subPaths.length === 0) ? size : subpathSize; - } - - function updatePieData() { - /** - * We need to calculate the size of "Other objects" in two cases: - * - * 1) If we have more subpaths listed, than the limit. - * 2) If the limit is set to the maximum limit (30) and we have any number of subpaths. - * In this case we won't necessarily have "Other objects", but we check if the - * other objects's size is more than zero (we will have other objects if there are more than 30 subpaths, - * but we can't check on that, as the response will always have - * 30 subpaths, but from the total size and the subpaths size we can calculate it). - */ - let subpaths: DUSubpath[] = subPaths; - - let pathLabels: string[] = []; - let percentage: string[] = []; - let sizeStr: string[]; - let valuesWithMinBlockSize: number[] = []; - - if (subPathCount > limit) { - // If the subpath count is greater than the provided limit - // Slice the subpath to the limit - subpaths = subpaths.slice(0, limit); - // Add the size of the subpath - const limitedSize = getSubpathSize(subpaths); - const remainingSize = size - limitedSize; - subpaths.push({ - path: OTHER_PATH_NAME, - size: remainingSize, - sizeWithReplica: (sizeWithReplica === -1) - ? -1 - : sizeWithReplica - remainingSize, - isKey: false - }) - } - - if (subPathCount === 0 || subpaths.length === 0) { - // No more subpaths available - pathLabels = [path.split('/').pop() ?? '']; - valuesWithMinBlockSize = [0.1]; - percentage = ['100.00']; - sizeStr = [byteToSize(size, 1)]; - } else { - pathLabels = subpaths.map(subpath => { - const subpathName = subpath.path.split('/').pop() ?? ''; - // Diferentiate keys by removing trailing slash - return (subpath.isKey || subpathName === OTHER_PATH_NAME) - ? subpathName - : subpathName + '/'; - }); - - let values: number[] = [0]; - if (size > 0) { - values = subpaths.map( - subpath => (subpath.size / size) - ); - } - const valueClone = structuredClone(values); - valuesWithMinBlockSize = valueClone?.map( - (val: number) => (val > 0) - ? val + MIN_BLOCK_SIZE - : val - ); - - percentage = values.map(value => (value * 100).toFixed(2)); - sizeStr = subpaths.map((subpath) => byteToSize(subpath.size, 1)); - } - - return valuesWithMinBlockSize.map((key, idx) => { - return { - value: key, - name: pathLabels[idx], - size: sizeStr[idx], - percentage: percentage[idx] - } - }); - } - - React.useEffect(() => { - setSubpathSize(getSubpathSize(subPaths)); - }, [subPaths, limit]); - - const pieData = React.useMemo(() => updatePieData(), [path, subPaths, limit]); - - const eChartsOptions = { - title: { - text: `${byteToSize(subpathSize, 1)} / ${byteToSize(size, 1)}`, - left: 'center', - top: '95%' - }, - tooltip: { - trigger: 'item', - formatter: ({ dataIndex, name, color }) => { - const nameEl = `${name}
    `; - const dataEl = `Total Data Size: ${pieData[dataIndex]['size']}
    ` - const percentageEl = `Percentage: ${pieData[dataIndex]['percentage']} %` - return `${nameEl}${dataEl}${percentageEl}` - } - }, - legend: { - top: '10%', - orient: 'vertical', - left: '0%', - width: '80%' - }, - grid: { - - }, - series: [ - { - type: 'pie', - radius: '70%', - data: pieData.map((value) => { - return { - value: value.value, - name: value.name - } - }), - emphasis: { - itemStyle: { - shadowBlur: 10, - shadowOffsetX: 0, - shadowColor: 'rgba(0, 0, 0, 0.5)' - } - } - } - ] - }; - - const handleLegendChange = ({selected}: {selected: Record}) => { - const filteredPath = subPaths.filter((value) => { - // In case of any leading '/' remove them and add a / at end - // to make it similar to legend - const splitPath = value.path?.split('/'); - const pathName = splitPath[splitPath.length - 1] ?? '' + ((value.isKey) ? '' : '/'); - return selected[pathName]; - }) - const newSize = getSubpathSize(filteredPath); - setSubpathSize(newSize); - } - - return ( - - ); -} - -export default DUPieChart; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/eChart/eChart.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/eChart/eChart.tsx index 9d483efd6b0..79fa0760338 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/eChart/eChart.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/eChart/eChart.tsx @@ -28,10 +28,6 @@ export interface EChartProps { loading?: boolean; theme?: 'light'; onClick?: () => any | void; - eventHandler?: { - name: string, - handler: (arg0: any) => void - }; } const EChart = ({ @@ -40,8 +36,7 @@ const EChart = ({ settings, loading, theme, - onClick, - eventHandler + onClick }: EChartProps): JSX.Element => { const chartRef = useRef(null); useEffect(() => { @@ -52,10 +47,6 @@ const EChart = ({ if (onClick) { chart.on('click', onClick); } - - if (eventHandler) { - chart.on(eventHandler.name, eventHandler.handler); - } } // Add chart resize listener @@ -80,10 +71,6 @@ const EChart = ({ if (onClick) { chart!.on('click', onClick); } - - if (eventHandler) { - chart!.on(eventHandler.name, eventHandler.handler); - } } }, [option, settings, theme]); // Whenever theme changes we need to add option and setting due to it being deleted in cleanup function diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/navBar/navBar.less b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/navBar/navBar.less deleted file mode 100644 index 09ec283d555..00000000000 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/navBar/navBar.less +++ /dev/null @@ -1,65 +0,0 @@ -/* -* Licensed to the Apache Software Foundation (ASF) under one -* or more contributor license agreements. See the NOTICE file -* distributed with this work for additional information -* regarding copyright ownership. The ASF licenses this file -* to you under the Apache License, Version 2.0 (the -* "License"); you may not use this file except in compliance -* with the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - -.logo-v2 { - color: #FFFFFF; - font-size: 18px; - font-weight: 500; - padding: 20px; - background-color: #142329; - .logo-text-v2 { - margin-left: 10px; - } -} - -.ant-layout-sider-collapsed { - .logo-v2 { - padding: 10px; - - .logo-text-v2 { - display: none; - } - } - .ant-layout-sider-trigger { - background: #142329 !important; - text-align: center !important; - padding-left: 20px !important; - } -} - -.ant-layout-sider { - background: #142329 !important; - - .ant-menu-dark { - background: #142329 !important; - - .ant-menu-item-selected { - span { - color: #4DCF4C !important; - } - background: #224452 !important; - color: #4DCF4C !important; - } - } - - .ant-layout-sider-trigger { - background: #142329 !important; - text-align: unset !important; - padding-left: 25px; - } -} \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/navBar/navBar.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/navBar/navBar.tsx deleted file mode 100644 index 3da4104634c..00000000000 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/navBar/navBar.tsx +++ /dev/null @@ -1,180 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import React, { useState, useEffect, useRef } from 'react'; -import axios, { AxiosResponse } from 'axios'; -import { Layout, Menu, Spin } from 'antd'; -import { - BarChartOutlined, - ClusterOutlined, - ContainerOutlined, - DashboardOutlined, - DatabaseOutlined, - DeploymentUnitOutlined, - FolderOpenOutlined, - InboxOutlined, - LayoutOutlined, - PieChartOutlined -} from '@ant-design/icons'; -import { useLocation, Link } from 'react-router-dom'; - - -import logo from '@/logo.png'; -import { showDataFetchError } from '@/utils/common'; -import { AxiosGetHelper, cancelRequests } from '@/utils/axiosRequestHelper'; - -import './navBar.less'; - - -// ------------- Types -------------- // -type NavBarProps = { - collapsed: boolean; - onCollapse: (arg0: boolean) => void; -} - -const NavBar: React.FC = ({ - collapsed = false, - onCollapse = () => { } -}) => { - const [isHeatmapEnabled, setIsHeatmapEnabled] = useState(false); - const cancelDisabledFeatureSignal = useRef(); - const location = useLocation(); - - const fetchDisabledFeatures = async () => { - const disabledfeaturesEndpoint = `/api/v1/features/disabledFeatures`; - const { request, controller } = AxiosGetHelper( - disabledfeaturesEndpoint, - cancelDisabledFeatureSignal.current - ) - cancelDisabledFeatureSignal.current = controller; - try { - const response: AxiosResponse = await request; - const heatmapDisabled = response?.data?.includes('HEATMAP') - setIsHeatmapEnabled(!heatmapDisabled); - } catch (error: unknown) { - showDataFetchError((error as Error).toString()) - } - } - - - useEffect(() => { - fetchDisabledFeatures(); - // Component will unmount - return (() => { - cancelRequests([cancelDisabledFeatureSignal.current!]) - }) - }, []) - - const menuItems = [( - }> - Overview - - - ), ( - }> - Volumes - - - ), ( - }> - Buckets - - - ), ( - }> - Datanodes - - - ), ( - }> - Pipelines - - - ), ( - }> - Containers - - - ), ( - }> - }> - Insights - - - }> - OM DB Insights - - - - ), ( - }> - Disk Usage - - - ), ( - isHeatmapEnabled && - }> - Heatmap - - - )] - return ( - -
    - Ozone Recon Logo - Ozone Recon -
    - - {...menuItems} - -
    - ); -} - -export default NavBar; diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/search/search.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/search/search.tsx index d320fd659a6..21d4341787e 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/search/search.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/search/search.tsx @@ -20,11 +20,9 @@ import React from 'react'; import { Input, Select } from 'antd'; import { Option } from '@/v2/components/select/singleSelect'; -import { DownOutlined } from '@ant-design/icons'; // ------------- Types -------------- // type SearchProps = { - disabled?: boolean; searchColumn?: string; searchInput: string; searchOptions?: Option[]; @@ -41,7 +39,6 @@ type SearchProps = { // ------------- Component -------------- // const Search: React.FC = ({ - disabled = false, searchColumn, searchInput = '', searchOptions = [], @@ -51,8 +48,6 @@ const Search: React.FC = ({ const selectFilter = searchColumn ? ( { } // ------------- Component -------------- // - -const Option: React.FC> = (props) => { - return ( -
    - - null} /> - - -
    - ) -} - - const MultiSelect: React.FC = ({ options = [], selected = [], @@ -80,20 +58,24 @@ const MultiSelect: React.FC = ({ ...props }) => { - const ValueContainer = ({ children, ...props }: ValueContainerProps) => { + const Option: React.FC> = (props) => { return ( - - {React.Children.map(children, (child) => ( - ((child as React.ReactElement> - | React.ReactPortal)?.type as React.JSXElementConstructor)).name === "DummyInput" - ? child - : null - )} - {placeholder}: {selected.length} selected - - ); - }; +
    + + null} /> + + +
    + ) + } return ( = ({ classNamePrefix='multi-select' options={options} components={{ - ValueContainer, Option }} placeholder={placeholder} value={selected} - isOptionDisabled={(option) => option.value === fixedColumn} onChange={(selected: ValueType) => { if (selected?.length === options.length) return onChange!(options); return onChange!(selected); diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/select/singleSelect.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/select/singleSelect.tsx index 1d02b407334..41ab03f5982 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/select/singleSelect.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/select/singleSelect.tsx @@ -50,7 +50,7 @@ const SingleSelect: React.FC = ({ const ValueContainer = ({ children, ...props }: ValueContainerProps) => { - const selectedValue = props.getValue() as Option[]; + const selectedLimit = props.getValue() as Option[]; return ( {React.Children.map(children, (child) => ( @@ -60,7 +60,7 @@ const SingleSelect: React.FC = ({ ? child : null )} - {placeholder}: {selectedValue[0]?.label ?? ''} + Limit: {selectedLimit[0]?.label ?? ''} ); }; diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/storageBar/storageBar.less b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/storageBar/storageBar.less deleted file mode 100644 index 798287366c3..00000000000 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/storageBar/storageBar.less +++ /dev/null @@ -1,45 +0,0 @@ -/* -* Licensed to the Apache Software Foundation (ASF) under one -* or more contributor license agreements. See the NOTICE file -* distributed with this work for additional information -* regarding copyright ownership. The ASF licenses this file -* to you under the Apache License, Version 2.0 (the -* "License"); you may not use this file except in compliance -* with the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - -@progress-gray: #d0d0d0; -@progress-light-blue: rgb(230, 235, 248); -@progress-blue: #1890ff; -@progress-green: #52c41a; -@progress-red: #FFA39E; - -.storage-cell-container-v2 { - .capacity-bar-v2 { - font-size: 1em; - } -} - -.ozone-used-bg-v2 { - color: @progress-green !important; -} - -.non-ozone-used-bg-v2 { - color: @progress-blue !important; -} - -.remaining-bg-v2 { - color: @progress-light-blue !important; -} - -.committed-bg-v2 { - color: @progress-red !important; -} diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/storageBar/storageBar.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/storageBar/storageBar.tsx index fd6dd8dfe9b..591b0088b04 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/storageBar/storageBar.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/storageBar/storageBar.tsx @@ -20,73 +20,72 @@ import React from 'react'; import { Progress } from 'antd'; import filesize from 'filesize'; import Icon from '@ant-design/icons'; +import { withRouter } from 'react-router-dom'; import Tooltip from 'antd/lib/tooltip'; import { FilledIcon } from '@/utils/themeIcons'; import { getCapacityPercent } from '@/utils/common'; import type { StorageReport } from '@/v2/types/overview.types'; -import './storageBar.less'; - const size = filesize.partial({ standard: 'iec', round: 1 }); type StorageReportProps = { - showMeta?: boolean; - strokeWidth?: number; + showMeta: boolean; } & StorageReport -const StorageBar: React.FC = ({ - capacity = 0, - used = 0, - remaining = 0, - committed = 0, - showMeta = false, - strokeWidth = 3 +const StorageBar = (props: StorageReportProps = { + capacity: 0, + used: 0, + remaining: 0, + committed: 0, + showMeta: true, }) => { + const { capacity, used, remaining, committed, showMeta } = props; const nonOzoneUsed = capacity - remaining - used; const totalUsed = capacity - remaining; const tooltip = ( <>
    - + Ozone Used ({size(used)})
    - + Non Ozone Used ({size(nonOzoneUsed)})
    - + Remaining ({size(remaining)})
    - + Container Pre-allocated ({size(committed)})
    ); + const metaElement = (showMeta) ? ( +
    + {size(used + nonOzoneUsed)} / {size(capacity)} +
    + ) : <>; + return ( - - {(showMeta) && -
    - {size(used + nonOzoneUsed)} / {size(capacity)} -
    - } +
    + + {metaElement} + className='capacity-bar' strokeWidth={3} /> +
    ); } diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/bucketsTable.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/bucketsTable.tsx deleted file mode 100644 index b26ae251f95..00000000000 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/bucketsTable.tsx +++ /dev/null @@ -1,267 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import React from 'react'; - -import moment from 'moment'; -import Table, { - ColumnProps, - ColumnsType, - TablePaginationConfig -} from 'antd/es/table'; -import Tag from 'antd/es/tag'; -import { - CheckCircleOutlined, - CloseCircleOutlined, - CloudServerOutlined, - FileUnknownOutlined, - HddOutlined, - LaptopOutlined, - SaveOutlined -} from '@ant-design/icons'; - -import QuotaBar from '@/components/quotaBar/quotaBar'; -import { nullAwareLocaleCompare } from '@/utils/common'; -import { - Bucket, - BucketLayout, - BucketLayoutTypeList, - BucketsTableProps, - BucketStorage, - BucketStorageTypeList -} from '@/v2/types/bucket.types'; - -function renderIsVersionEnabled(isVersionEnabled: boolean) { - return isVersionEnabled - ? - : -}; - -function renderStorageType(bucketStorage: BucketStorage) { - const bucketStorageIconMap: Record = { - RAM_DISK: , - SSD: , - DISK: , - ARCHIVE: - }; - const icon = bucketStorage in bucketStorageIconMap - ? bucketStorageIconMap[bucketStorage] - : ; - return {icon} {bucketStorage}; -}; - -function renderBucketLayout(bucketLayout: BucketLayout) { - const bucketLayoutColorMap = { - FILE_SYSTEM_OPTIMIZED: 'green', - OBJECT_STORE: 'orange', - LEGACY: 'blue' - }; - const color = bucketLayout in bucketLayoutColorMap ? - bucketLayoutColorMap[bucketLayout] : ''; - return {bucketLayout}; -}; - -export const COLUMNS: ColumnsType = [ - { - title: 'Bucket', - dataIndex: 'name', - key: 'name', - sorter: (a: Bucket, b: Bucket) => a.name.localeCompare(b.name), - defaultSortOrder: 'ascend' as const - }, - { - title: 'Volume', - dataIndex: 'volumeName', - key: 'volumeName', - sorter: (a: Bucket, b: Bucket) => a.volumeName.localeCompare(b.volumeName), - defaultSortOrder: 'ascend' as const - }, - { - title: 'Owner', - dataIndex: 'owner', - key: 'owner', - sorter: (a: Bucket, b: Bucket) => nullAwareLocaleCompare(a.owner, b.owner) - }, - { - title: 'Versioning', - dataIndex: 'versioning', - key: 'isVersionEnabled', - render: (isVersionEnabled: boolean) => renderIsVersionEnabled(isVersionEnabled) - }, - { - title: 'Storage Type', - dataIndex: 'storageType', - key: 'storageType', - filterMultiple: true, - filters: BucketStorageTypeList.map(state => ({ text: state, value: state })), - onFilter: (value, record: Bucket) => record.storageType === value, - sorter: (a: Bucket, b: Bucket) => a.storageType.localeCompare(b.storageType), - render: (storageType: BucketStorage) => renderStorageType(storageType) - }, - { - title: 'Bucket Layout', - dataIndex: 'bucketLayout', - key: 'bucketLayout', - filterMultiple: true, - filters: BucketLayoutTypeList.map(state => ({ text: state, value: state })), - onFilter: (value, record: Bucket) => record.bucketLayout === value, - sorter: (a: Bucket, b: Bucket) => a.bucketLayout.localeCompare(b.bucketLayout), - render: (bucketLayout: BucketLayout) => renderBucketLayout(bucketLayout) - }, - { - title: 'Creation Time', - dataIndex: 'creationTime', - key: 'creationTime', - sorter: (a: Bucket, b: Bucket) => a.creationTime - b.creationTime, - render: (creationTime: number) => { - return creationTime > 0 ? moment(creationTime).format('ll LTS') : 'NA'; - } - }, - { - title: 'Modification Time', - dataIndex: 'modificationTime', - key: 'modificationTime', - sorter: (a: Bucket, b: Bucket) => a.modificationTime - b.modificationTime, - render: (modificationTime: number) => { - return modificationTime > 0 ? moment(modificationTime).format('ll LTS') : 'NA'; - } - }, - { - title: 'Storage Capacity', - key: 'quotaCapacityBytes', - sorter: (a: Bucket, b: Bucket) => a.usedBytes - b.usedBytes, - render: (text: string, record: Bucket) => ( - - ) - }, - { - title: 'Namespace Capacity', - key: 'namespaceCapacity', - sorter: (a: Bucket, b: Bucket) => a.usedNamespace - b.usedNamespace, - render: (text: string, record: Bucket) => ( - - ) - }, - { - title: 'Source Volume', - dataIndex: 'sourceVolume', - key: 'sourceVolume', - render: (sourceVolume: string) => { - return sourceVolume ? sourceVolume : 'NA'; - } - }, - { - title: 'Source Bucket', - dataIndex: 'sourceBucket', - key: 'sourceBucket', - render: (sourceBucket: string) => { - return sourceBucket ? sourceBucket : 'NA'; - } - } -]; - -const BucketsTable: React.FC = ({ - loading = false, - data, - handleAclClick, - selectedColumns, - searchColumn = 'name', - searchTerm = '' -}) => { - - React.useEffect(() => { - const aclColumn: ColumnProps = { - title: 'ACLs', - dataIndex: 'acls', - key: 'acls', - render: (_: any, record: Bucket) => { - return ( - { - handleAclClick(record); - }} - > - Show ACL - - ); - } - }; - - if (COLUMNS.length > 0 && COLUMNS[COLUMNS.length - 1].key !== 'acls') { - // Push the ACL column for initial load - COLUMNS.push(aclColumn); - selectedColumns.push({ - label: aclColumn.title as string, - value: aclColumn.key as string - }); - } else { - // Replace old ACL column with new ACL column with correct reference - // e.g. After page is reloaded / redirect from other page - COLUMNS[COLUMNS.length - 1] = aclColumn; - selectedColumns[selectedColumns.length - 1] = { - label: aclColumn.title as string, - value: aclColumn.key as string - } - } - }, []); - - function filterSelectedColumns() { - const columnKeys = selectedColumns.map((column) => column.value); - return COLUMNS.filter( - (column) => columnKeys.indexOf(column.key as string) >= 0 - ) - } - - function getFilteredData(data: Bucket[]) { - return data.filter( - (bucket: Bucket) => bucket[searchColumn].includes(searchTerm) - ); - } - - const paginationConfig: TablePaginationConfig = { - showTotal: (total: number, range) => `${range[0]}-${range[1]} of ${total} buckets`, - showSizeChanger: true - }; - - return ( -
    - - - ) -} - -export default BucketsTable; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/datanodesTable.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/datanodesTable.tsx deleted file mode 100644 index 494d898509b..00000000000 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/datanodesTable.tsx +++ /dev/null @@ -1,314 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import React from 'react'; -import moment from 'moment'; -import { Popover, Tooltip } from 'antd' -import { - CheckCircleFilled, - CloseCircleFilled, - HourglassFilled, - InfoCircleOutlined, - WarningFilled -} from '@ant-design/icons'; -import Table, { - ColumnsType, - TablePaginationConfig -} from 'antd/es/table'; -import { TableRowSelection } from 'antd/es/table/interface'; - -import StorageBar from '@/v2/components/storageBar/storageBar'; -import DecommissionSummary from '@/v2/components/decommissioningSummary/decommissioningSummary'; - -import { ReplicationIcon } from '@/utils/themeIcons'; -import { getTimeDiffFromTimestamp } from '@/v2/utils/momentUtils'; - -import { - Datanode, - DatanodeOpState, - DatanodeOpStateList, - DatanodeState, - DatanodeStateList, - DatanodeTableProps -} from '@/v2/types/datanode.types'; -import { Pipeline } from '@/v2/types/pipelines.types'; - - -let decommissioningUuids: string | string[] = []; - -const headerIconStyles: React.CSSProperties = { - display: 'flex', - alignItems: 'center' -} - -const renderDatanodeState = (state: DatanodeState) => { - const stateIconMap = { - HEALTHY: , - STALE: , - DEAD: - }; - const icon = state in stateIconMap ? stateIconMap[state] : ''; - return {icon} {state}; -}; - -const renderDatanodeOpState = (opState: DatanodeOpState) => { - const opStateIconMap = { - IN_SERVICE: , - DECOMMISSIONING: , - DECOMMISSIONED: , - ENTERING_MAINTENANCE: , - IN_MAINTENANCE: - }; - const icon = opState in opStateIconMap ? opStateIconMap[opState] : ''; - return {icon} {opState}; -}; - -export const COLUMNS: ColumnsType = [ - { - title: 'Hostname', - dataIndex: 'hostname', - key: 'hostname', - sorter: (a: Datanode, b: Datanode) => a.hostname.localeCompare( - b.hostname, undefined, { numeric: true } - ), - defaultSortOrder: 'ascend' as const - }, - { - title: 'State', - dataIndex: 'state', - key: 'state', - filterMultiple: true, - filters: DatanodeStateList.map(state => ({ text: state, value: state })), - onFilter: (value, record: Datanode) => record.state === value, - render: (text: DatanodeState) => renderDatanodeState(text), - sorter: (a: Datanode, b: Datanode) => a.state.localeCompare(b.state) - }, - { - title: 'Operational State', - dataIndex: 'opState', - key: 'opState', - filterMultiple: true, - filters: DatanodeOpStateList.map(state => ({ text: state, value: state })), - onFilter: (value, record: Datanode) => record.opState === value, - render: (text: DatanodeOpState) => renderDatanodeOpState(text), - sorter: (a: Datanode, b: Datanode) => a.opState.localeCompare(b.opState) - }, - { - title: 'UUID', - dataIndex: 'uuid', - key: 'uuid', - sorter: (a: Datanode, b: Datanode) => a.uuid.localeCompare(b.uuid), - defaultSortOrder: 'ascend' as const, - render: (uuid: string, record: Datanode) => { - return ( - //1. Compare Decommission Api's UUID with all UUID in table and show Decommission Summary - (decommissioningUuids && decommissioningUuids.includes(record.uuid) && record.opState !== 'DECOMMISSIONED') ? - : {uuid} - ); - } - }, - { - title: 'Storage Capacity', - dataIndex: 'storageUsed', - key: 'storageUsed', - sorter: (a: Datanode, b: Datanode) => a.storageRemaining - b.storageRemaining, - render: (_: string, record: Datanode) => ( - - ) - }, - { - title: 'Last Heartbeat', - dataIndex: 'lastHeartbeat', - key: 'lastHeartbeat', - sorter: (a: Datanode, b: Datanode) => moment(a.lastHeartbeat).unix() - moment(b.lastHeartbeat).unix(), - render: (heartbeat: number) => { - return heartbeat > 0 ? getTimeDiffFromTimestamp(heartbeat) : 'NA'; - } - }, - { - title: 'Pipeline ID(s)', - dataIndex: 'pipelines', - key: 'pipelines', - render: (pipelines: Pipeline[], record: Datanode) => { - const renderPipelineIds = (pipelineIds: Pipeline[]) => { - return pipelineIds?.map((pipeline: any, index: any) => ( -
    - - {pipeline.pipelineID} -
    - )) - } - - return ( - - {pipelines.length} pipelines - - ); - } - }, - { - title: () => ( - - Leader Count - - - - - ), - dataIndex: 'leaderCount', - key: 'leaderCount', - sorter: (a: Datanode, b: Datanode) => a.leaderCount - b.leaderCount - }, - { - title: 'Containers', - dataIndex: 'containers', - key: 'containers', - sorter: (a: Datanode, b: Datanode) => a.containers - b.containers - }, - { - title: () => ( - - Open Container - - - - - ), - dataIndex: 'openContainers', - key: 'openContainers', - sorter: (a: Datanode, b: Datanode) => a.openContainers - b.openContainers - }, - { - title: 'Version', - dataIndex: 'version', - key: 'version', - sorter: (a: Datanode, b: Datanode) => a.version.localeCompare(b.version), - defaultSortOrder: 'ascend' as const - }, - { - title: 'Setup Time', - dataIndex: 'setupTime', - key: 'setupTime', - sorter: (a: Datanode, b: Datanode) => a.setupTime - b.setupTime, - render: (uptime: number) => { - return uptime > 0 ? moment(uptime).format('ll LTS') : 'NA'; - } - }, - { - title: 'Revision', - dataIndex: 'revision', - key: 'revision', - sorter: (a: Datanode, b: Datanode) => a.revision.localeCompare(b.revision), - defaultSortOrder: 'ascend' as const - }, - { - title: 'Build Date', - dataIndex: 'buildDate', - key: 'buildDate', - sorter: (a: Datanode, b: Datanode) => a.buildDate.localeCompare(b.buildDate), - defaultSortOrder: 'ascend' as const - }, - { - title: 'Network Location', - dataIndex: 'networkLocation', - key: 'networkLocation', - sorter: (a: Datanode, b: Datanode) => a.networkLocation.localeCompare(b.networkLocation), - defaultSortOrder: 'ascend' as const - } -]; - -const DatanodesTable: React.FC = ({ - data, - handleSelectionChange, - decommissionUuids, - selectedColumns, - loading = false, - selectedRows = [], - searchColumn = 'hostname', - searchTerm = '' -}) => { - - function filterSelectedColumns() { - const columnKeys = selectedColumns.map((column) => column.value); - return COLUMNS.filter( - (column) => columnKeys.indexOf(column.key as string) >= 0 - ); - } - - function getFilteredData(data: Datanode[]) { - return data?.filter( - (datanode: Datanode) => datanode[searchColumn].includes(searchTerm) - ) ?? []; - } - - function isSelectable(record: Datanode) { - // Disable checkbox for any datanode which is not DEAD to prevent removal - return record.state !== 'DEAD' && true; - } - - const paginationConfig: TablePaginationConfig = { - showTotal: (total: number, range) => ( - `${range[0]}-${range[1]} of ${total} Datanodes` - ), - showSizeChanger: true - }; - - const rowSelection: TableRowSelection = { - selectedRowKeys: selectedRows, - onChange: (rows: React.Key[]) => { handleSelectionChange(rows) }, - getCheckboxProps: (record: Datanode) => ({ - disabled: isSelectable(record) - }), - }; - - React.useEffect(() => { - decommissioningUuids = decommissionUuids; - }, [decommissionUuids]) - - return ( -
    -
    - - ); -} - -export default DatanodesTable; diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/pipelinesTable.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/pipelinesTable.tsx deleted file mode 100644 index 6c07749436d..00000000000 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/pipelinesTable.tsx +++ /dev/null @@ -1,211 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import React from 'react'; - -import Table, { - ColumnsType, - TablePaginationConfig -} from 'antd/es/table'; -import Tooltip from 'antd/es/tooltip'; -import { InfoCircleOutlined } from '@ant-design/icons'; - -import { ReplicationIcon } from '@/utils/themeIcons'; -import { getDurationFromTimestamp, getTimeDiffFromTimestamp } from '@/v2/utils/momentUtils'; -import { Pipeline, PipelinesTableProps, PipelineStatusList } from '@/v2/types/pipelines.types'; - - -// TODO: When Datanodes PR gets merged remove these declarations -// And import from datanodes.types - -type SummaryDatanodeDetails = { - level: number; - parent: unknown | null; - cost: number; - uuid: string; - uuidString: string; - ipAddress: string; - hostName: string; - ports: { - name: string; - value: number - }[]; - certSerialId: null, - version: string | null; - setupTime: number; - revision: string | null; - buildDate: string; - persistedOpState: string; - persistedOpStateExpiryEpochSec: number; - initialVersion: number; - currentVersion: number; - signature: number; - decommissioned: boolean; - networkName: string; - networkLocation: string; - networkFullPath: string; - numOfLeaves: number; -} - -export const COLUMNS: ColumnsType = [ - { - title: 'Pipeline ID', - dataIndex: 'pipelineId', - key: 'pipelineId', - sorter: (a: Pipeline, b: Pipeline) => a.pipelineId.localeCompare(b.pipelineId), - - }, - { - title: 'Replication Type & Factor', - dataIndex: 'replicationType', - key: 'replicationType', - render: (replicationType: string, record: Pipeline) => { - const replicationFactor = record.replicationFactor; - return ( - - - {replicationType} ({replicationFactor}) - - ); - }, - sorter: (a: Pipeline, b: Pipeline) => - (a.replicationType + a.replicationFactor.toString()).localeCompare(b.replicationType + b.replicationFactor.toString()), - defaultSortOrder: 'descend' as const - }, - { - title: 'Status', - dataIndex: 'status', - key: 'status', - filterMultiple: true, - filters: PipelineStatusList.map(status => ({ text: status, value: status })), - onFilter: (value, record: Pipeline) => record.status === value, - sorter: (a: Pipeline, b: Pipeline) => a.status.localeCompare(b.status) - }, - { - title: 'Containers', - dataIndex: 'containers', - key: 'containers', - sorter: (a: Pipeline, b: Pipeline) => a.containers - b.containers - }, - { - title: 'Datanodes', - dataIndex: 'datanodes', - key: 'datanodes', - render: (datanodes: SummaryDatanodeDetails[]) => ( -
    - {datanodes.map(datanode => ( -
    - triggerNode}> - {datanode?.hostName ?? 'N/A'} - -
    - ))} -
    - ) - }, - { - title: 'Leader', - dataIndex: 'leaderNode', - key: 'leaderNode', - sorter: (a: Pipeline, b: Pipeline) => a.leaderNode.localeCompare(b.leaderNode) - }, - { - title: () => ( - - Last Leader Election  - - - - - ), - dataIndex: 'lastLeaderElection', - key: 'lastLeaderElection', - render: (lastLeaderElection: number) => lastLeaderElection > 0 ? - getTimeDiffFromTimestamp(lastLeaderElection) : 'NA', - sorter: (a: Pipeline, b: Pipeline) => a.lastLeaderElection - b.lastLeaderElection - }, - { - title: 'Lifetime', - dataIndex: 'duration', - key: 'duration', - render: (duration: number) => getDurationFromTimestamp(duration), - sorter: (a: Pipeline, b: Pipeline) => a.duration - b.duration - }, - { - title: () => ( - - No. of Elections  - - - - - ), - dataIndex: 'leaderElections', - key: 'leaderElections', - render: (leaderElections: number) => leaderElections > 0 ? - leaderElections : 'NA', - sorter: (a: Pipeline, b: Pipeline) => a.leaderElections - b.leaderElections - } -]; - -const PipelinesTable: React.FC = ({ - loading = false, - data, - selectedColumns, - searchTerm = '' -}) => { - const paginationConfig: TablePaginationConfig = { - showTotal: (total: number, range) => `${range[0]}-${range[1]} of ${total} pipelines`, - showSizeChanger: true, - }; - - function filterSelectedColumns() { - const columnKeys = selectedColumns.map((column) => column.value); - return COLUMNS.filter( - (column) => columnKeys.indexOf(column.key as string) >= 0 - ) - } - - function getFilteredData(data: Pipeline[]) { - return data.filter( - (pipeline: Pipeline) => pipeline['pipelineId'].includes(searchTerm) - ) - } - - return ( -
    -
    - - ) -} - -export default PipelinesTable; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/volumesTable.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/volumesTable.tsx deleted file mode 100644 index 4de0d713fce..00000000000 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/volumesTable.tsx +++ /dev/null @@ -1,179 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import QuotaBar from '@/components/quotaBar/quotaBar'; -import { byteToSize } from '@/utils/common'; -import { Volume, VolumesTableProps } from '@/v2/types/volume.types'; -import Table, { ColumnsType, ColumnType, TablePaginationConfig } from 'antd/es/table'; -import moment from 'moment'; -import React from 'react'; -import { Link } from 'react-router-dom'; - -export const COLUMNS: ColumnsType = [ - { - title: 'Volume', - dataIndex: 'volume', - key: 'volume', - sorter: (a: Volume, b: Volume) => a.volume.localeCompare(b.volume), - defaultSortOrder: 'ascend' as const, - width: '15%' - }, - { - title: 'Owner', - dataIndex: 'owner', - key: 'owner', - sorter: (a: Volume, b: Volume) => a.owner.localeCompare(b.owner) - }, - { - title: 'Admin', - dataIndex: 'admin', - key: 'admin', - sorter: (a: Volume, b: Volume) => a.admin.localeCompare(b.admin) - }, - { - title: 'Creation Time', - dataIndex: 'creationTime', - key: 'creationTime', - sorter: (a: Volume, b: Volume) => a.creationTime - b.creationTime, - render: (creationTime: number) => { - return creationTime > 0 ? moment(creationTime).format('ll LTS') : 'NA'; - } - }, - { - title: 'Modification Time', - dataIndex: 'modificationTime', - key: 'modificationTime', - sorter: (a: Volume, b: Volume) => a.modificationTime - b.modificationTime, - render: (modificationTime: number) => { - return modificationTime > 0 ? moment(modificationTime).format('ll LTS') : 'NA'; - } - }, - { - title: 'Quota (Size)', - dataIndex: 'quotaInBytes', - key: 'quotaInBytes', - render: (quotaInBytes: number) => { - return quotaInBytes && quotaInBytes !== -1 ? byteToSize(quotaInBytes, 3) : 'NA'; - } - }, - { - title: 'Namespace Capacity', - key: 'namespaceCapacity', - sorter: (a: Volume, b: Volume) => a.usedNamespace - b.usedNamespace, - render: (text: string, record: Volume) => ( - - ) - }, -]; - -const VolumesTable: React.FC = ({ - loading = false, - data, - handleAclClick, - selectedColumns, - searchColumn = 'volume', - searchTerm = '' -}) => { - - React.useEffect(() => { - // On table mount add the actions column - console.log("Adding new column"); - const actionsColumn: ColumnType = { - title: 'Actions', - key: 'actions', - render: (_: any, record: Volume) => { - const searchParams = new URLSearchParams(); - searchParams.append('volume', record.volume); - - return ( - <> - - Show buckets - - handleAclClick(record)}> - Show ACL - - - ); - } - } - - if (COLUMNS.length > 0 && COLUMNS[COLUMNS.length - 1].key !== 'actions') { - // Push the ACL column for initial - COLUMNS.push(actionsColumn); - selectedColumns.push({ - label: actionsColumn.title as string, - value: actionsColumn.key as string - }); - } else { - // Replace old ACL column with new ACL column with correct reference - // e.g. After page is reloaded / redirect from other page - COLUMNS[COLUMNS.length - 1] = actionsColumn; - selectedColumns[selectedColumns.length - 1] = { - label: actionsColumn.title as string, - value: actionsColumn.key as string - } - } - - }, []); - - function filterSelectedColumns() { - const columnKeys = selectedColumns.map((column) => column.value); - return COLUMNS.filter( - (column) => columnKeys.indexOf(column.key as string) >= 0 - ) - } - - function getFilteredData(data: Volume[]) { - return data.filter( - (volume: Volume) => volume[searchColumn].includes(searchTerm) - ); - } - - const paginationConfig: TablePaginationConfig = { - showTotal: (total: number, range) => `${range[0]}-${range[1]} of ${total} volumes`, - showSizeChanger: true - }; - - return ( -
    -
    - - ) -} - -export default VolumesTable; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/buckets/buckets.less b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/buckets/buckets.less deleted file mode 100644 index 8f4c8ffaf9f..00000000000 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/buckets/buckets.less +++ /dev/null @@ -1,41 +0,0 @@ -/* -* Licensed to the Apache Software Foundation (ASF) under one -* or more contributor license agreements. See the NOTICE file -* distributed with this work for additional information -* regarding copyright ownership. The ASF licenses this file -* to you under the Apache License, Version 2.0 (the -* "License"); you may not use this file except in compliance -* with the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - -.content-div { - min-height: unset; - - .table-header-section { - display: flex; - justify-content: space-between; - align-items: center; - - .table-filter-section { - font-size: 14px; - font-weight: normal; - display: flex; - column-gap: 8px; - padding: 16px 8px; - } - } - - .tag-block { - display: flex; - column-gap: 8px; - padding: 0px 8px 16px 8px; - } -} diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/buckets/buckets.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/buckets/buckets.tsx deleted file mode 100644 index 1e2de307b17..00000000000 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/buckets/buckets.tsx +++ /dev/null @@ -1,345 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import React, { useEffect, useRef, useState } from 'react'; -import moment from 'moment'; -import { ValueType } from 'react-select'; -import { useLocation } from 'react-router-dom'; - -import AutoReloadPanel from '@/components/autoReloadPanel/autoReloadPanel'; -import AclPanel from '@/v2/components/aclDrawer/aclDrawer'; -import Search from '@/v2/components/search/search'; -import MultiSelect from '@/v2/components/select/multiSelect'; -import SingleSelect, { Option } from '@/v2/components/select/singleSelect'; - -import { AutoReloadHelper } from '@/utils/autoReloadHelper'; -import { AxiosGetHelper, cancelRequests } from "@/utils/axiosRequestHelper"; -import { showDataFetchError } from '@/utils/common'; -import { useDebounce } from '@/v2/hooks/debounce.hook'; - -import { - Bucket, - BucketResponse, - BucketsState, -} from '@/v2/types/bucket.types'; - -import './buckets.less'; -import BucketsTable, { COLUMNS } from '@/v2/components/tables/bucketsTable'; - - -const LIMIT_OPTIONS: Option[] = [ - { - label: '1000', - value: '1000' - }, - { - label: '5000', - value: '5000' - }, - { - label: '10000', - value: '10000' - }, - { - label: '20000', - value: '20000' - } -] - -const SearchableColumnOpts = [{ - label: 'Bucket', - value: 'name' -}, { - label: 'Volume', - value: 'volumeName' -}] - -const defaultColumns = COLUMNS.map(column => ({ - label: column.title as string, - value: column.key as string -})); - -function getVolumeBucketMap(data: Bucket[]) { - const volumeBucketMap = data.reduce(( - map: Map>, - currentBucket - ) => { - const volume = currentBucket.volumeName; - if (map.has(volume)) { - const buckets = Array.from(map.get(volume)!); - map.set(volume, new Set([...buckets, currentBucket])); - } else { - map.set(volume, new Set().add(currentBucket)); - } - return map; - }, new Map>()); - return volumeBucketMap; -} - -function getFilteredBuckets( - selectedVolumes: Option[], - bucketsMap: Map> -) { - let selectedBuckets: Bucket[] = []; - selectedVolumes.forEach(selectedVolume => { - if (bucketsMap.has(selectedVolume.value) - && bucketsMap.get(selectedVolume.value)) { - selectedBuckets = [ - ...selectedBuckets, - ...Array.from(bucketsMap.get(selectedVolume.value)!) - ]; - } - }); - - return selectedBuckets; -} - -const Buckets: React.FC<{}> = () => { - - const cancelSignal = useRef(); - - const [state, setState] = useState({ - totalCount: 0, - lastUpdated: 0, - columnOptions: defaultColumns, - volumeBucketMap: new Map>(), - bucketsUnderVolume: [], - volumeOptions: [], - }); - const [loading, setLoading] = useState(false); - const [selectedColumns, setSelectedColumns] = useState(defaultColumns); - const [selectedVolumes, setSelectedVolumes] = useState([]); - const [selectedLimit, setSelectedLimit] = useState
    + import('@/v2/pages/overview/overview')); const Volumes = lazy(() => import('@/v2/pages/volumes/volumes')) -const Buckets = lazy(() => import('@/v2/pages/buckets/buckets')); -const Datanodes = lazy(() => import('@/v2/pages/datanodes/datanodes')); -const Pipelines = lazy(() => import('@/v2/pages/pipelines/pipelines')); -const DiskUsage = lazy(() => import('@/v2/pages/diskUsage/diskUsage')); export const routesV2 = [ { @@ -32,21 +28,5 @@ export const routesV2 = [ { path: '/Volumes', component: Volumes - }, - { - path: '/Buckets', - component: Buckets - }, - { - path: '/Datanodes', - component: Datanodes - }, - { - path: '/Pipelines', - component: Pipelines - }, - { - path: '/DiskUsage', - component: DiskUsage } ]; diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/bucket.types.ts b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/bucket.types.ts index eb499dc617e..8b2fd0c694c 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/bucket.types.ts +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/bucket.types.ts @@ -17,7 +17,6 @@ */ import { Acl } from "@/v2/types/acl.types"; -import { Option as MultiOption } from "@/v2/components/select/multiSelect"; // Corresponds to OzoneManagerProtocolProtos.StorageTypeProto export const BucketStorageTypeList = [ @@ -39,8 +38,8 @@ export type BucketLayout = typeof BucketLayoutTypeList[number]; export type Bucket = { volumeName: string; - name: string; - versioning: boolean; + bucketName: string; + isVersionEnabled: boolean; storageType: BucketStorage; creationTime: number; modificationTime: number; @@ -54,26 +53,3 @@ export type Bucket = { acls?: Acl[]; bucketLayout: BucketLayout; } - -export type BucketResponse = { - totalCount: number; - buckets: Bucket[]; -} - -export type BucketsState = { - totalCount: number; - lastUpdated: number; - columnOptions: MultiOption[]; - volumeBucketMap: Map>; - bucketsUnderVolume: Bucket[]; - volumeOptions: MultiOption[]; -} - -export type BucketsTableProps = { - loading: boolean; - data: Bucket[]; - handleAclClick: (arg0: Bucket) => void; - selectedColumns: MultiOption[]; - searchColumn: 'name' | 'volumeName'; - searchTerm: string; -} diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/datanode.types.ts b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/datanode.types.ts deleted file mode 100644 index 96a37020153..00000000000 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/datanode.types.ts +++ /dev/null @@ -1,167 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import { Pipeline } from "@/v2/types/pipelines.types"; -import { StorageReport } from "@/v2/types/overview.types"; -import { Option as MultiOption } from "@/v2/components/select/multiSelect"; - -// Corresponds to HddsProtos.NodeState -export const DatanodeStateList = ['HEALTHY', 'STALE', 'DEAD'] as const; -type DatanodeStateType = typeof DatanodeStateList; -export type DatanodeState = DatanodeStateType[number]; - -// Corresponds to HddsProtos.NodeOperationalState -export const DatanodeOpStateList = [ - 'IN_SERVICE', - 'DECOMMISSIONING', - 'DECOMMISSIONED', - 'ENTERING_MAINTENANCE', - 'IN_MAINTENANCE' -] as const; -export type DatanodeOpState = typeof DatanodeOpStateList[number]; - -export type DatanodeResponse = { - hostname: string; - state: DatanodeState; - opState: DatanodeOpState; - lastHeartbeat: string; - storageReport: StorageReport; - pipelines: Pipeline[]; - containers: number; - openContainers: number; - leaderCount: number; - uuid: string; - version: string; - setupTime: number; - revision: string; - buildDate: string; - networkLocation: string; -} - -export type DatanodesResponse = { - totalCount: number; - datanodes: DatanodeResponse[]; -} - -export type Datanode = { - hostname: string; - state: DatanodeState; - opState: DatanodeOpState; - lastHeartbeat: string; - storageUsed: number; - storageTotal: number; - storageRemaining: number; - storageCommitted: number; - pipelines: Pipeline[]; - containers: number; - openContainers: number; - leaderCount: number; - uuid: string; - version: string; - setupTime: number; - revision: string; - buildDate: string; - networkLocation: string; -} - -export type DatanodeDetails = { - uuid: string; -} - -export type DatanodeDecomissionInfo = { - datanodeDetails: DatanodeDetails -} - -export type DatanodesState = { - dataSource: Datanode[]; - lastUpdated: number; - columnOptions: MultiOption[]; -} - -// Datanode Summary endpoint types -type summaryByteString = { - string: string; - bytes: { - validUtf8: boolean; - empty: boolean; - } -} - -type SummaryPort = { - name: string; - value: number; -} - -type SummaryDatanodeDetails = { - level: number; - parent: unknown | null; - cost: number; - uuid: string; - uuidString: string; - ipAddress: string; - hostName: string; - ports: SummaryPort; - certSerialId: null, - version: string | null; - setupTime: number; - revision: string | null; - buildDate: string; - persistedOpState: string; - persistedOpStateExpiryEpochSec: number; - initialVersion: number; - currentVersion: number; - decommissioned: boolean; - maintenance: boolean; - ipAddressAsByteString: summaryByteString; - hostNameAsByteString: summaryByteString; - networkName: string; - networkLocation: string; - networkFullPath: string; - numOfLeaves: number; - networkNameAsByteString: summaryByteString; - networkLocationAsByteString: summaryByteString -} - -type SummaryMetrics = { - decommissionStartTime: string; - numOfUnclosedPipelines: number; - numOfUnderReplicatedContainers: number; - numOfUnclosedContainers: number; -} - -type SummaryContainers = { - UnderReplicated: string[]; - UnClosed: string[]; -} - -export type SummaryData = { - datanodeDetails: SummaryDatanodeDetails; - metrics: SummaryMetrics; - containers: SummaryContainers; -} - -export type DatanodeTableProps = { - loading: boolean; - selectedRows: React.Key[]; - data: Datanode[]; - decommissionUuids: string | string[]; - searchColumn: 'hostname' | 'uuid' | 'version' | 'revision'; - searchTerm: string; - selectedColumns: MultiOption[]; - handleSelectionChange: (arg0: React.Key[]) => void; -} diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/diskUsage.types.ts b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/diskUsage.types.ts deleted file mode 100644 index e649c143aec..00000000000 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/diskUsage.types.ts +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -export type DUSubpath = { - path: string; - size: number; - sizeWithReplica: number; - isKey: boolean; -} - -export type DUResponse = { - status: string; - path: string; - subPathCount: number; - size: number; - sizeWithReplica: number; - subPaths: DUSubpath[]; - sizeDirectKey: number; -} - -export type PlotData = { - value: number; - name: string; - size: string; - percentage: string; -} diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/pipelines.types.ts b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/pipelines.types.ts deleted file mode 100644 index 7c5a23bc0af..00000000000 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/pipelines.types.ts +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import { Option } from "@/v2/components/select/multiSelect"; - -export const PipelineStatusList = [ - 'OPEN', - 'CLOSING', - 'QUASI_CLOSED', - 'CLOSED', - 'UNHEALTHY', - 'INVALID', - 'DELETED', - 'DORMANT' -] as const; -export type PipelineStatus = typeof PipelineStatusList[number]; - -export type Pipeline = { - pipelineId: string; - status: PipelineStatus; - replicationType: string; - leaderNode: string; - datanodes: string[]; - lastLeaderElection: number; - duration: number; - leaderElections: number; - replicationFactor: string; - containers: number; -} - -export type PipelinesResponse = { - totalCount: number; - pipelines: Pipeline[]; -} - -export type PipelinesState = { - activeDataSource: Pipeline[]; - columnOptions: Option[]; - lastUpdated: number; -} - -export type PipelinesTableProps = { - loading: boolean; - data: Pipeline[]; - selectedColumns: Option[]; - searchTerm: string; -} diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/volume.types.ts b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/volume.types.ts index b808d403584..67f007706a4 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/volume.types.ts +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/volume.types.ts @@ -40,13 +40,5 @@ export type VolumesState = { data: Volume[]; lastUpdated: number; columnOptions: Option[]; -} - -export type VolumesTableProps = { - loading: boolean; - data: Volume[]; - handleAclClick: (arg0: Volume) => void; - selectedColumns: Option[]; - searchColumn: 'volume' | 'owner' | 'admin'; - searchTerm: string; + currentRow: Volume | Record; } diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/utils/momentUtils.ts b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/utils/momentUtils.ts deleted file mode 100644 index fb553d0db3f..00000000000 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/utils/momentUtils.ts +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import moment from "moment"; - -moment.updateLocale('en', { - relativeTime: { - past: '%s ago', - s: '%ds', - m: '1min', - mm: '%dmins', - h: '1hr', - hh: '%dhrs', - d: '1d', - dd: '%dd', - M: '1m', - MM: '%dm', - y: '1y', - yy: '%dy' - } -}); - -export function getTimeDiffFromTimestamp(timestamp: number): string { - const timestampDate = new Date(timestamp); - return moment(timestampDate).fromNow(); -} - -export function getDurationFromTimestamp(timestamp: number): string { - const duration: moment.Duration = moment.duration(timestamp, 'milliseconds'); - // return nothing when the duration is falsy or not correctly parsed (P0D) - if(!duration || duration.toISOString() === "P0D") return ''; - - let elapsedTime = []; - const durationBreakdowns: Record = { - 'y': Math.floor(duration.years()), - 'm': Math.floor(duration.months()), - 'd': Math.floor(duration.days()), - 'h': Math.floor(duration.hours()), - 'min': Math.floor(duration.minutes()), - 's': Math.floor(duration.seconds()) - } - - for (const [key, value] of Object.entries(durationBreakdowns)) { - value > 0 && elapsedTime.push(value + key); - } - - return (elapsedTime.length === 0) ? 'Just now' : elapsedTime.join(' '); -} diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/insights.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/insights.tsx index 63f095ff7ca..f273f758ea9 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/insights.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/insights.tsx @@ -101,7 +101,9 @@ export class Insights extends React.Component, IInsightsS // Disable bucket selection dropdown if more than one volume is selected // If there is only one volume, bucket selection dropdown should not be disabled. const isBucketSelectionDisabled = !selectedVolumes || - (selectedVolumes?.length > 1 && volumeBucketMap.size !== 1); + (selectedVolumes && + (selectedVolumes.length > 1 && + (volumeBucketMap.size !== 1))); let bucketOptions: IOption[] = []; // When volume is changed and more than one volume is selected, // selected buckets value should be reset to all buckets @@ -453,7 +455,7 @@ export class Insights extends React.Component, IInsightsS
    {isLoading ? Loading... : - ((fileCountsResponse?.length > 0) ? + ((fileCountsResponse && fileCountsResponse.length > 0) ?
    @@ -504,7 +506,7 @@ export class Insights extends React.Component, IInsightsS
    {isLoading ? Loading... : - ((containerCountResponse?.length > 0) ? + ((containerCountResponse && containerCountResponse.length > 0) ?
    diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/om/om.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/om/om.tsx index fdd25929d03..b56e8d8151a 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/om/om.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/om/om.tsx @@ -530,7 +530,7 @@ export class Om extends React.Component, IOmdbInsightsSta const { request, controller } = AxiosGetHelper(mismatchEndpoint, cancelMismatchedEndpointSignal) cancelMismatchedEndpointSignal = controller; request.then(mismatchContainersResponse => { - const mismatchContainers: IContainerResponse[] = mismatchContainersResponse?.data?.containerDiscrepancyInfo && []; + const mismatchContainers: IContainerResponse[] = mismatchContainersResponse && mismatchContainersResponse.data && mismatchContainersResponse.data.containerDiscrepancyInfo; this.setState({ loading: false, @@ -567,7 +567,7 @@ export class Om extends React.Component, IOmdbInsightsSta const { request, controller } = AxiosGetHelper(openKeysEndpoint, cancelOpenKeysSignal) cancelOpenKeysSignal = controller request.then(openKeysResponse => { - const openKeys = openKeysResponse?.data ?? {"fso": []}; + const openKeys = openKeysResponse && openKeysResponse.data; let allopenKeysResponse: any[] = []; for (let key in openKeys) { if (Array.isArray(openKeys[key])) { @@ -614,7 +614,7 @@ export class Om extends React.Component, IOmdbInsightsSta cancelDeletePendingSignal = controller; request.then(deletePendingKeysResponse => { - const deletePendingKeys = deletePendingKeysResponse?.data?.deletedKeyInfo ?? []; + const deletePendingKeys = deletePendingKeysResponse && deletePendingKeysResponse.data && deletePendingKeysResponse.data.deletedKeyInfo; //Use Summation Logic iterate through all object and find sum of all datasize let deletedKeyInfoData = []; deletedKeyInfoData = deletePendingKeys && deletePendingKeys.flatMap((infoObject: any) => { @@ -714,7 +714,7 @@ export class Om extends React.Component, IOmdbInsightsSta cancelDeletedKeysSignal = controller request.then(deletedKeysResponse => { let deletedContainerKeys = []; - deletedContainerKeys = deletedKeysResponse?.data?.containers ?? []; + deletedContainerKeys = deletedKeysResponse && deletedKeysResponse.data && deletedKeysResponse.data.containers; this.setState({ loading: false, deletedContainerKeysDataSource: deletedContainerKeys @@ -748,7 +748,7 @@ export class Om extends React.Component, IOmdbInsightsSta cancelDeletedPendingDirSignal = controller request.then(deletePendingDirResponse => { let deletedDirInfo = []; - deletedDirInfo = deletePendingDirResponse?.data?.deletedDirInfo ?? []; + deletedDirInfo = deletePendingDirResponse && deletePendingDirResponse.data && deletePendingDirResponse.data.deletedDirInfo; this.setState({ loading: false, pendingDeleteDirDataSource: deletedDirInfo diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/vite.config.ts b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/vite.config.ts index 1a079c5efa4..ddb2832f39b 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/vite.config.ts +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/vite.config.ts @@ -21,7 +21,7 @@ import { defineConfig, splitVendorChunkPlugin } from 'vite'; import { resolve } from 'path'; -import react from '@vitejs/plugin-react-swc'; +import react from '@vitejs/plugin-react'; function pathResolve(dir: string) { return resolve(__dirname, '.', dir) @@ -29,12 +29,6 @@ function pathResolve(dir: string) { // https://vitejs.dev/config/ export default defineConfig({ - plugins: [ - react({ - devTarget: "es2015" //SWC by default bypasses the build target, set dev target explicitly - }), - splitVendorChunkPlugin() - ], build: { target: "es2015", outDir: 'build', @@ -54,6 +48,7 @@ export default defineConfig({ } } }, + plugins: [react(), splitVendorChunkPlugin()], server: { proxy: { "/api": { diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java index 3c39e4192d2..82c7c1b5bef 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java @@ -886,9 +886,7 @@ public void testUnhealthyContainersFilteredResponse() throws IOException, TimeoutException { String missing = UnHealthyContainerStates.MISSING.toString(); String emptyMissing = UnHealthyContainerStates.EMPTY_MISSING.toString(); - String negativeSize = UnHealthyContainerStates.NEGATIVE_SIZE.toString(); // For NEGATIVE_SIZE state - // Initial empty response verification Response response = containerEndpoint .getUnhealthyContainers(missing, 1000, 1); @@ -901,55 +899,44 @@ public void testUnhealthyContainersFilteredResponse() assertEquals(0, responseObject.getMisReplicatedCount()); assertEquals(Collections.EMPTY_LIST, responseObject.getContainers()); - // Add unhealthy records putContainerInfos(5); uuid1 = newDatanode("host1", "127.0.0.1"); uuid2 = newDatanode("host2", "127.0.0.2"); uuid3 = newDatanode("host3", "127.0.0.3"); uuid4 = newDatanode("host4", "127.0.0.4"); createUnhealthyRecords(5, 4, 3, 2); - createEmptyMissingUnhealthyRecords(2); // For EMPTY_MISSING state - createNegativeSizeUnhealthyRecords(2); // For NEGATIVE_SIZE state + createEmptyMissingUnhealthyRecords(2); - // Check for unhealthy containers response = containerEndpoint.getUnhealthyContainers(missing, 1000, 1); responseObject = (UnhealthyContainersResponse) response.getEntity(); - // Summary should have the count for all unhealthy: assertEquals(5, responseObject.getMissingCount()); assertEquals(4, responseObject.getOverReplicatedCount()); assertEquals(3, responseObject.getUnderReplicatedCount()); assertEquals(2, responseObject.getMisReplicatedCount()); - Collection records = responseObject.getContainers(); + Collection records + = responseObject.getContainers(); assertTrue(records.stream() .flatMap(containerMetadata -> containerMetadata.getReplicas().stream() .map(ContainerHistory::getState)) .allMatch(s -> s.equals("UNHEALTHY"))); - - // Verify only missing containers are returned + // There should only be 5 missing containers and no others as we asked for + // only missing. assertEquals(5, records.size()); for (UnhealthyContainerMetadata r : records) { assertEquals(missing, r.getContainerState()); } - // Check for empty missing containers, should return zero Response filteredEmptyMissingResponse = containerEndpoint .getUnhealthyContainers(emptyMissing, 1000, 1); responseObject = (UnhealthyContainersResponse) filteredEmptyMissingResponse.getEntity(); records = responseObject.getContainers(); - assertEquals(0, records.size()); - - // Check for negative size containers, should return zero - Response filteredNegativeSizeResponse = containerEndpoint - .getUnhealthyContainers(negativeSize, 1000, 1); - responseObject = (UnhealthyContainersResponse) filteredNegativeSizeResponse.getEntity(); - records = responseObject.getContainers(); + // Assert for zero empty missing containers. assertEquals(0, records.size()); } - @Test public void testUnhealthyContainersInvalidState() { WebApplicationException e = assertThrows(WebApplicationException.class, @@ -1056,15 +1043,6 @@ private void createEmptyMissingUnhealthyRecords(int emptyMissing) { } } - private void createNegativeSizeUnhealthyRecords(int negativeSize) { - int cid = 0; - for (int i = 0; i < negativeSize; i++) { - createUnhealthyRecord(++cid, UnHealthyContainerStates.NEGATIVE_SIZE.toString(), - 3, 3, 0, null); // Added for NEGATIVE_SIZE state - } - } - - private void createUnhealthyRecords(int missing, int overRep, int underRep, int misRep) { int cid = 0; diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/filters/TestAdminFilter.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/filters/TestAdminFilter.java index 7c874a9e299..e30590df55e 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/filters/TestAdminFilter.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/filters/TestAdminFilter.java @@ -33,6 +33,7 @@ import org.apache.hadoop.ozone.recon.api.NodeEndpoint; import org.apache.hadoop.ozone.recon.api.PipelineEndpoint; import org.apache.hadoop.ozone.recon.api.TaskStatusService; +import org.apache.hadoop.ozone.recon.api.TriggerDBSyncEndpoint; import org.apache.hadoop.ozone.recon.api.UtilizationEndpoint; import org.apache.hadoop.security.UserGroupInformation; import org.junit.jupiter.api.Test; @@ -69,14 +70,8 @@ public void testAdminOnlyEndpoints() { assertThat(allEndpoints).isNotEmpty(); - // If an endpoint is added, it must either require admin privileges by being - // marked with the `@AdminOnly` annotation, or be added to this set to exclude it. - // - Any endpoint that displays information related to the filesystem namespace - // (including aggregate counts), user information, or allows modification to the - // cluster's state should be marked as `@AdminOnly`. - // - Read-only endpoints that only return information about node status or - // cluster state do not require the `@AdminOnly` annotation and can be excluded - // from admin requirements by adding them to this set. + // If an endpoint is added, it must be explicitly added to this set or be + // marked with @AdminOnly for this test to pass. Set> nonAdminEndpoints = new HashSet<>(); nonAdminEndpoints.add(UtilizationEndpoint.class); nonAdminEndpoints.add(ClusterStateEndpoint.class); @@ -84,6 +79,7 @@ public void testAdminOnlyEndpoints() { nonAdminEndpoints.add(NodeEndpoint.class); nonAdminEndpoints.add(PipelineEndpoint.class); nonAdminEndpoints.add(TaskStatusService.class); + nonAdminEndpoints.add(TriggerDBSyncEndpoint.class); assertThat(allEndpoints).containsAll(nonAdminEndpoints); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java index 46e4506a5ef..8647639dd13 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java @@ -20,20 +20,14 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.assertj.core.api.Assertions.assertThat; -import static org.hadoop.ozone.recon.schema.ContainerSchemaDefinition.UnHealthyContainerStates.ALL_REPLICAS_BAD; +import static org.hadoop.ozone.recon.schema.ContainerSchemaDefinition.UnHealthyContainerStates.ALL_REPLICAS_UNHEALTHY; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertNull; -import static org.junit.jupiter.api.Assertions.fail; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.times; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyInt; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; - import java.io.IOException; import java.time.Duration; import java.util.ArrayList; @@ -108,7 +102,7 @@ public void testRun() throws Exception { // Create 7 containers. The first 5 will have various unhealthy states // defined below. The container with ID=6 will be healthy and - // container with ID=7 will be EMPTY_MISSING (but not inserted into DB) + // container with ID=7 will be EMPTY_MISSING List mockContainers = getMockContainers(7); when(scmMock.getScmServiceProvider()).thenReturn(scmClientMock); when(scmMock.getContainerManager()).thenReturn(containerManagerMock); @@ -135,20 +129,20 @@ public void testRun() throws Exception { when(containerManagerMock.getContainerReplicas(containerInfo2.containerID())) .thenReturn(getMockReplicas(2L, State.UNHEALTHY)); - // return 0 replicas for container ID 3 -> EMPTY_MISSING (will not be inserted into DB) + // return 0 replicas for container ID 3 -> Empty Missing ContainerInfo containerInfo3 = TestContainerInfo.newBuilderForTest().setContainerID(3).setReplicationConfig(replicationConfig).build(); when(containerManagerMock.getContainer(ContainerID.valueOf(3L))).thenReturn(containerInfo3); when(containerManagerMock.getContainerReplicas(containerInfo3.containerID())) .thenReturn(Collections.emptySet()); - // Return 5 Healthy Replicas -> Over-replicated + // Return 5 Healthy -> Over replicated ContainerInfo containerInfo4 = TestContainerInfo.newBuilderForTest().setContainerID(4).setReplicationConfig(replicationConfig).build(); when(containerManagerMock.getContainer(ContainerID.valueOf(4L))).thenReturn(containerInfo4); when(containerManagerMock.getContainerReplicas(containerInfo4.containerID())) .thenReturn(getMockReplicas(4L, State.CLOSED, State.CLOSED, - State.CLOSED, State.CLOSED, State.CLOSED)); + State.CLOSED, State.CLOSED, State.CLOSED)); // Mis-replicated ContainerInfo containerInfo5 = @@ -161,7 +155,7 @@ public void testRun() throws Exception { when(containerManagerMock.getContainerReplicas(containerInfo5.containerID())) .thenReturn(misReplicas); - // Return 3 Healthy Replicas -> Healthy container + // Return 3 Healthy -> Healthy container ContainerInfo containerInfo6 = TestContainerInfo.newBuilderForTest().setContainerID(6).setReplicationConfig(replicationConfig).build(); when(containerManagerMock.getContainer(ContainerID.valueOf(6L))).thenReturn(containerInfo6); @@ -169,14 +163,12 @@ public void testRun() throws Exception { .thenReturn(getMockReplicas(6L, State.CLOSED, State.CLOSED, State.CLOSED)); - // return 0 replicas for container ID 7 -> MISSING (will later transition to EMPTY_MISSING but not inserted into DB) + // return 0 replicas for container ID 7 -> MISSING ContainerInfo containerInfo7 = TestContainerInfo.newBuilderForTest().setContainerID(7).setReplicationConfig(replicationConfig).build(); when(containerManagerMock.getContainer(ContainerID.valueOf(7L))).thenReturn(containerInfo7); when(containerManagerMock.getContainerReplicas(containerInfo7.containerID())) .thenReturn(Collections.emptySet()); - when(reconContainerMetadataManager.getKeyCountForContainer( - 7L)).thenReturn(5L); // Indicates non-empty container 7 for now List all = unHealthyContainersTableHandle.findAll(); assertThat(all).isEmpty(); @@ -185,8 +177,8 @@ public void testRun() throws Exception { ReconTaskStatusDao reconTaskStatusDao = getDao(ReconTaskStatusDao.class); ReconTaskConfig reconTaskConfig = new ReconTaskConfig(); reconTaskConfig.setMissingContainerTaskInterval(Duration.ofSeconds(5)); - - // Start container health task + when(reconContainerMetadataManager.getKeyCountForContainer( + 7L)).thenReturn(5L); ContainerHealthTask containerHealthTask = new ContainerHealthTask(scmMock.getContainerManager(), scmMock.getScmServiceProvider(), @@ -194,12 +186,8 @@ public void testRun() throws Exception { placementMock, reconTaskConfig, reconContainerMetadataManager, new OzoneConfiguration()); containerHealthTask.start(); - - // Ensure unhealthy container count in DB matches expected LambdaTestUtils.await(60000, 1000, () -> - (unHealthyContainersTableHandle.count() == 5)); - - // Check for UNDER_REPLICATED container states + (unHealthyContainersTableHandle.count() == 6)); UnhealthyContainers rec = unHealthyContainersTableHandle.fetchByContainerId(1L).get(0); assertEquals("UNDER_REPLICATED", rec.getContainerState()); @@ -209,20 +197,19 @@ public void testRun() throws Exception { assertEquals("UNDER_REPLICATED", rec.getContainerState()); assertEquals(3, rec.getReplicaDelta().intValue()); - // Assert that EMPTY_MISSING state containers were never added to DB. - assertEquals(0, - unHealthyContainersTableHandle.fetchByContainerId(3L).size()); - List unhealthyContainers = containerHealthSchemaManager.getUnhealthyContainers( - ALL_REPLICAS_BAD, 0, Integer.MAX_VALUE); + ALL_REPLICAS_UNHEALTHY, 0, Integer.MAX_VALUE); assertEquals(1, unhealthyContainers.size()); assertEquals(2L, unhealthyContainers.get(0).getContainerId().longValue()); assertEquals(0, unhealthyContainers.get(0).getActualReplicaCount().intValue()); - // Check for MISSING state in container ID 7 + rec = unHealthyContainersTableHandle.fetchByContainerId(3L).get(0); + assertEquals("EMPTY_MISSING", rec.getContainerState()); + assertEquals(3, rec.getReplicaDelta().intValue()); + rec = unHealthyContainersTableHandle.fetchByContainerId(7L).get(0); assertEquals("MISSING", rec.getContainerState()); assertEquals(3, rec.getReplicaDelta().intValue()); @@ -243,7 +230,9 @@ public void testRun() throws Exception { assertThat(taskStatus.getLastUpdatedTimestamp()) .isGreaterThan(currentTime); - // Adjust the mock results and rerun to check for updates or removal of records + // Now run the job again, to check that relevant records are updated or + // removed as appropriate. Need to adjust the return value for all the mocks + // Under replicated -> Delta goes from 2 to 1 when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(1L))) .thenReturn(getMockReplicas(1L, State.CLOSED, State.CLOSED)); @@ -252,7 +241,7 @@ public void testRun() throws Exception { .thenReturn(getMockReplicas(2L, State.CLOSED, State.CLOSED, State.CLOSED)); - // Container 3 remains EMPTY_MISSING, but no DB insertion + // return 0 replicas for container ID 3 -> Still empty Missing when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(3L))) .thenReturn(Collections.emptySet()); @@ -261,16 +250,11 @@ public void testRun() throws Exception { .thenReturn(getMockReplicas(4L, State.CLOSED, State.CLOSED, State.CLOSED, State.CLOSED)); - // Convert container 7 which was MISSING to EMPTY_MISSING (not inserted into DB) - when(reconContainerMetadataManager.getKeyCountForContainer( - 7L)).thenReturn(0L); - + // Was mis-replicated - make it healthy now placementMock.setMisRepWhenDnPresent(null); - // Ensure count is reduced after EMPTY_MISSING containers are not inserted LambdaTestUtils.await(60000, 1000, () -> - (unHealthyContainersTableHandle.count() == 2)); - + (unHealthyContainersTableHandle.count() == 4)); rec = unHealthyContainersTableHandle.fetchByContainerId(1L).get(0); assertEquals("UNDER_REPLICATED", rec.getContainerState()); assertEquals(1, rec.getReplicaDelta().intValue()); @@ -279,21 +263,36 @@ public void testRun() throws Exception { assertEquals(0, unHealthyContainersTableHandle.fetchByContainerId(2L).size()); - // Assert that for container 7 no records exist in DB because it's now EMPTY_MISSING - assertEquals(0, - unHealthyContainersTableHandle.fetchByContainerId(7L).size()); + rec = unHealthyContainersTableHandle.fetchByContainerId(3L).get(0); + assertEquals("EMPTY_MISSING", rec.getContainerState()); + assertEquals(3, rec.getReplicaDelta().intValue()); + + rec = unHealthyContainersTableHandle.fetchByContainerId(7L).get(0); + assertEquals("MISSING", rec.getContainerState()); + assertEquals(3, rec.getReplicaDelta().intValue()); rec = unHealthyContainersTableHandle.fetchByContainerId(4L).get(0); assertEquals("OVER_REPLICATED", rec.getContainerState()); assertEquals(-1, rec.getReplicaDelta().intValue()); - // Ensure container 5 is now healthy and not in the table + // This container is now healthy, it should not be in the table any more assertEquals(0, unHealthyContainersTableHandle.fetchByContainerId(5L).size()); - // Just check once again that count remains consistent + // Again make container Id 7 as empty which was missing as well, so in next + // container health task run, this container also should be deleted from + // UNHEALTHY_CONTAINERS table because we want to cleanup any existing + // EMPTY and MISSING containers from UNHEALTHY_CONTAINERS table. + when(reconContainerMetadataManager.getKeyCountForContainer(7L)).thenReturn(0L); + LambdaTestUtils.await(6000, 1000, () -> { + UnhealthyContainers emptyMissingContainer = unHealthyContainersTableHandle.fetchByContainerId(7L).get(0); + return ("EMPTY_MISSING".equals(emptyMissingContainer.getContainerState())); + }); + + // Just check once again that count doesn't change, only state of + // container 7 changes from MISSING to EMPTY_MISSING LambdaTestUtils.await(60000, 1000, () -> - (unHealthyContainersTableHandle.count() == 2)); + (unHealthyContainersTableHandle.count() == 4)); } @Test @@ -368,12 +367,17 @@ public void testDeletedContainer() throws Exception { reconContainerMetadataManager, new OzoneConfiguration()); containerHealthTask.start(); LambdaTestUtils.await(6000, 1000, () -> - (unHealthyContainersTableHandle.count() == 1)); + (unHealthyContainersTableHandle.count() == 2)); UnhealthyContainers rec = unHealthyContainersTableHandle.fetchByContainerId(1L).get(0); assertEquals("MISSING", rec.getContainerState()); assertEquals(3, rec.getReplicaDelta().intValue()); + rec = + unHealthyContainersTableHandle.fetchByContainerId(3L).get(0); + assertEquals("EMPTY_MISSING", rec.getContainerState()); + assertEquals(3, rec.getReplicaDelta().intValue()); + ReconTaskStatus taskStatus = reconTaskStatusDao.findById(containerHealthTask.getTaskName()); assertThat(taskStatus.getLastUpdatedTimestamp()) @@ -381,191 +385,64 @@ public void testDeletedContainer() throws Exception { } @Test - public void testAllContainerStateInsertions() { - UnhealthyContainersDao unHealthyContainersTableHandle = + public void testNegativeSizeContainers() throws Exception { + // Setup mock objects and test environment + UnhealthyContainersDao unhealthyContainersDao = getDao(UnhealthyContainersDao.class); - ContainerHealthSchemaManager containerHealthSchemaManager = new ContainerHealthSchemaManager( getSchemaDefinition(ContainerSchemaDefinition.class), - unHealthyContainersTableHandle); - - // Iterate through each state in the UnHealthyContainerStates enum - for (ContainerSchemaDefinition.UnHealthyContainerStates state : - ContainerSchemaDefinition.UnHealthyContainerStates.values()) { - - // Create a dummy UnhealthyContainer record with the current state - UnhealthyContainers unhealthyContainer = new UnhealthyContainers(); - unhealthyContainer.setContainerId(state.ordinal() + 1L); - - // Set replica counts based on the state - switch (state) { - case MISSING: - case EMPTY_MISSING: - unhealthyContainer.setExpectedReplicaCount(3); - unhealthyContainer.setActualReplicaCount(0); - unhealthyContainer.setReplicaDelta(3); - break; - - case UNDER_REPLICATED: - unhealthyContainer.setExpectedReplicaCount(3); - unhealthyContainer.setActualReplicaCount(1); - unhealthyContainer.setReplicaDelta(2); - break; - - case OVER_REPLICATED: - unhealthyContainer.setExpectedReplicaCount(3); - unhealthyContainer.setActualReplicaCount(4); - unhealthyContainer.setReplicaDelta(-1); - break; - - case MIS_REPLICATED: - case NEGATIVE_SIZE: - unhealthyContainer.setExpectedReplicaCount(3); - unhealthyContainer.setActualReplicaCount(3); - unhealthyContainer.setReplicaDelta(0); - break; - - case ALL_REPLICAS_BAD: - unhealthyContainer.setExpectedReplicaCount(3); - unhealthyContainer.setActualReplicaCount(0); - unhealthyContainer.setReplicaDelta(3); - break; - - default: - fail("Unhandled state: " + state.name() + ". Please add this state to the switch case."); - } - - unhealthyContainer.setContainerState(state.name()); - unhealthyContainer.setInStateSince(System.currentTimeMillis()); - - // Try inserting the record and catch any exception that occurs - Exception exception = null; - try { - containerHealthSchemaManager.insertUnhealthyContainerRecords( - Collections.singletonList(unhealthyContainer)); - } catch (Exception e) { - exception = e; - } - - // Assert no exception should be thrown for each state - assertNull(exception, - "Exception was thrown during insertion for state " + state.name() + - ": " + exception); - - // Optionally, verify the record was inserted correctly - List insertedRecords = - unHealthyContainersTableHandle.fetchByContainerId( - state.ordinal() + 1L); - assertFalse(insertedRecords.isEmpty(), - "Record was not inserted for state " + state.name() + "."); - assertEquals(insertedRecords.get(0).getContainerState(), state.name(), - "The inserted container state does not match for state " + - state.name() + "."); - } - } - - @Test - public void testMissingAndEmptyMissingContainerDeletion() throws Exception { - // Setup mock DAOs and managers - UnhealthyContainersDao unHealthyContainersTableHandle = - getDao(UnhealthyContainersDao.class); - ContainerHealthSchemaManager containerHealthSchemaManager = - new ContainerHealthSchemaManager( - getSchemaDefinition(ContainerSchemaDefinition.class), - unHealthyContainersTableHandle); + unhealthyContainersDao); ReconStorageContainerManagerFacade scmMock = mock(ReconStorageContainerManagerFacade.class); - MockPlacementPolicy placementMock = new MockPlacementPolicy(); ContainerManager containerManagerMock = mock(ContainerManager.class); StorageContainerServiceProvider scmClientMock = mock(StorageContainerServiceProvider.class); ReconContainerMetadataManager reconContainerMetadataManager = mock(ReconContainerMetadataManager.class); - mock(ReconContainerMetadataManager.class); + MockPlacementPolicy placementMock = new MockPlacementPolicy(); - // Create 2 containers. They start in CLOSED state in Recon. - List mockContainers = getMockContainers(2); - when(scmMock.getScmServiceProvider()).thenReturn(scmClientMock); + // Mock container info setup + List mockContainers = getMockContainers(3); when(scmMock.getContainerManager()).thenReturn(containerManagerMock); + when(scmMock.getScmServiceProvider()).thenReturn(scmClientMock); when(containerManagerMock.getContainers(any(ContainerID.class), anyInt())).thenReturn(mockContainers); - - // Mark both containers as initially CLOSED in Recon for (ContainerInfo c : mockContainers) { - when(containerManagerMock.getContainer(c.containerID())).thenReturn(c); + when(containerManagerMock.getContainer( + c.containerID())).thenReturn(c); + when(scmClientMock.getContainerWithPipeline( + c.getContainerID())).thenReturn(new ContainerWithPipeline(c, null)); + when(containerManagerMock.getContainer(c.containerID()) + .getUsedBytes()).thenReturn(Long.valueOf(-10)); } - // Simulate SCM reporting the containers as DELETED - ContainerInfo deletedContainer1 = getMockDeletedContainer(1); - ContainerInfo deletedContainer2 = getMockDeletedContainer(2); - - when(scmClientMock.getContainerWithPipeline(1)) - .thenReturn(new ContainerWithPipeline(deletedContainer1, null)); - when(scmClientMock.getContainerWithPipeline(2)) - .thenReturn(new ContainerWithPipeline(deletedContainer2, null)); - - // Both containers start as CLOSED in Recon (MISSING or EMPTY_MISSING) - when(containerManagerMock.getContainer(ContainerID.valueOf(1L)).getState()) - .thenReturn(HddsProtos.LifeCycleState.CLOSED); - when(containerManagerMock.getContainer(ContainerID.valueOf(2L)).getState()) - .thenReturn(HddsProtos.LifeCycleState.CLOSED); - - // Replicas are empty, so both containers should be considered for deletion - when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(1L))) - .thenReturn(Collections.emptySet()); - when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(2L))) - .thenReturn(Collections.emptySet()); + // Verify the table is initially empty + assertThat(unhealthyContainersDao.findAll()).isEmpty(); - // Initialize UnhealthyContainers in DB (MISSING and EMPTY_MISSING) - // Create and set up the first UnhealthyContainer for a MISSING container - UnhealthyContainers container1 = new UnhealthyContainers(); - container1.setContainerId(1L); - container1.setContainerState("MISSING"); - container1.setExpectedReplicaCount(3); - container1.setActualReplicaCount(0); - container1.setReplicaDelta(3); - container1.setInStateSince(System.currentTimeMillis()); - - // Create and set up the second UnhealthyContainer for an EMPTY_MISSING container - UnhealthyContainers container2 = new UnhealthyContainers(); - container2.setContainerId(2L); - container2.setContainerState("MISSING"); - container2.setExpectedReplicaCount(3); - container2.setActualReplicaCount(0); - container2.setReplicaDelta(3); - container2.setInStateSince(System.currentTimeMillis()); - - unHealthyContainersTableHandle.insert(container1); - unHealthyContainersTableHandle.insert(container2); - - when(reconContainerMetadataManager.getKeyCountForContainer(1L)).thenReturn(5L); - when(reconContainerMetadataManager.getKeyCountForContainer(2L)).thenReturn(0L); - - // Start the container health task + // Setup and start the container health task ReconTaskStatusDao reconTaskStatusDao = getDao(ReconTaskStatusDao.class); ReconTaskConfig reconTaskConfig = new ReconTaskConfig(); reconTaskConfig.setMissingContainerTaskInterval(Duration.ofSeconds(2)); - ContainerHealthTask containerHealthTask = - new ContainerHealthTask(scmMock.getContainerManager(), - scmMock.getScmServiceProvider(), - reconTaskStatusDao, containerHealthSchemaManager, - placementMock, reconTaskConfig, - reconContainerMetadataManager, new OzoneConfiguration()); - + ContainerHealthTask containerHealthTask = new ContainerHealthTask( + scmMock.getContainerManager(), scmMock.getScmServiceProvider(), + reconTaskStatusDao, + containerHealthSchemaManager, placementMock, reconTaskConfig, + reconContainerMetadataManager, + new OzoneConfiguration()); containerHealthTask.start(); - // Wait for the task to complete and ensure that updateContainerState is invoked for - // container IDs 1 and 2 to mark the containers as DELETED, since they are DELETED in SCM. - LambdaTestUtils.await(60000, 1000, () -> { - verify(containerManagerMock, times(1)) - .updateContainerState(ContainerID.valueOf(1L), HddsProtos.LifeCycleEvent.DELETE); - verify(containerManagerMock, times(1)) - .updateContainerState(ContainerID.valueOf(2L), HddsProtos.LifeCycleEvent.DELETE); - return true; - }); + // Wait for the task to identify unhealthy containers + LambdaTestUtils.await(6000, 1000, + () -> unhealthyContainersDao.count() == 3); + + // Assert that all unhealthy containers have been identified as NEGATIVE_SIZE states + List negativeSizeContainers = + unhealthyContainersDao.fetchByContainerState("NEGATIVE_SIZE"); + assertThat(negativeSizeContainers).hasSize(3); } + private Set getMockReplicas( long containerId, State...states) { Set replicas = new HashSet<>(); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTaskRecordGenerator.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTaskRecordGenerator.java index 4e9965638a1..7d55e612bad 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTaskRecordGenerator.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTaskRecordGenerator.java @@ -127,58 +127,6 @@ public void testMissingRecordRetained() { )); } - @Test - public void testEmptyMissingRecordNotInsertedButLogged() { - // Create a container that is in EMPTY_MISSING state - Set replicas = new HashSet<>(); - ContainerHealthStatus status = new ContainerHealthStatus(emptyContainer, replicas, placementPolicy, - reconContainerMetadataManager, CONF); - - // Initialize stats map - Map> unhealthyContainerStateStatsMap = new HashMap<>(); - initializeUnhealthyContainerStateStatsMap(unhealthyContainerStateStatsMap); - - // Generate records for EMPTY_MISSING container - List records = ContainerHealthTask.ContainerHealthRecords.generateUnhealthyRecords( - status, (long) 345678, unhealthyContainerStateStatsMap); - - // Assert that no records are created for EMPTY_MISSING state - assertEquals(0, records.size()); - - // Assert that the EMPTY_MISSING state is logged - assertEquals(1, unhealthyContainerStateStatsMap.get(UnHealthyContainerStates.EMPTY_MISSING) - .getOrDefault(CONTAINER_COUNT, 0L)); - } - - @Test - public void testNegativeSizeRecordNotInsertedButLogged() { - // Simulate a container with NEGATIVE_SIZE state - when(container.getUsedBytes()).thenReturn(-10L); // Negative size - Set replicas = generateReplicas(container, CLOSED, CLOSED); - ContainerHealthStatus status = - new ContainerHealthStatus(container, replicas, placementPolicy, reconContainerMetadataManager, CONF); - - // Initialize stats map - Map> - unhealthyContainerStateStatsMap = new HashMap<>(); - initializeUnhealthyContainerStateStatsMap(unhealthyContainerStateStatsMap); - - // Generate records for NEGATIVE_SIZE container - List records = - ContainerHealthTask.ContainerHealthRecords.generateUnhealthyRecords( - status, (long) 123456, unhealthyContainerStateStatsMap); - - // Assert that none of the records are for negative. - records.forEach(record -> assertFalse(record.getContainerState() - .equals(UnHealthyContainerStates.NEGATIVE_SIZE.toString()))); - - - // Assert that the NEGATIVE_SIZE state is logged - assertEquals(1, unhealthyContainerStateStatsMap.get( - UnHealthyContainerStates.NEGATIVE_SIZE).getOrDefault(CONTAINER_COUNT, 0L)); - } - - @Test public void testUnderReplicatedRecordRetainedAndUpdated() { // under replicated container @@ -448,9 +396,13 @@ public void testCorrectRecordsGenerated() { status = new ContainerHealthStatus(emptyContainer, replicas, placementPolicy, reconContainerMetadataManager, CONF); - ContainerHealthTask.ContainerHealthRecords + records = ContainerHealthTask.ContainerHealthRecords .generateUnhealthyRecords(status, (long) 345678, unhealthyContainerStateStatsMap); + assertEquals(1, records.size()); + rec = records.get(0); + assertEquals(UnHealthyContainerStates.EMPTY_MISSING.toString(), + rec.getContainerState()); assertEquals(3, rec.getExpectedReplicaCount().intValue()); assertEquals(0, rec.getActualReplicaCount().intValue()); @@ -630,8 +582,6 @@ private void initializeUnhealthyContainerStateStatsMap( UnHealthyContainerStates.OVER_REPLICATED, new HashMap<>()); unhealthyContainerStateStatsMap.put( UnHealthyContainerStates.MIS_REPLICATED, new HashMap<>()); - unhealthyContainerStateStatsMap.put( - UnHealthyContainerStates.NEGATIVE_SIZE, new HashMap<>()); } private void logUnhealthyContainerStats( @@ -640,7 +590,7 @@ private void logUnhealthyContainerStats( // If any EMPTY_MISSING containers, then it is possible that such // containers got stuck in the closing state which never got // any replicas created on the datanodes. In this case, we log it as - // EMPTY_MISSING containers, but dont add it to the unhealthy container table. + // EMPTY, and insert as EMPTY_MISSING in UNHEALTHY_CONTAINERS table. unhealthyContainerStateStatsMap.entrySet().forEach(stateEntry -> { UnHealthyContainerStates unhealthyContainerState = stateEntry.getKey(); Map containerStateStatsMap = stateEntry.getValue(); diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AuthorizationFilter.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AuthorizationFilter.java index cc63663bf22..d49ff17f3bf 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AuthorizationFilter.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AuthorizationFilter.java @@ -19,9 +19,11 @@ import javax.annotation.Priority; import javax.inject.Inject; +import javax.ws.rs.WebApplicationException; import javax.ws.rs.container.ContainerRequestContext; import javax.ws.rs.container.ContainerRequestFilter; import javax.ws.rs.container.PreMatching; +import javax.ws.rs.core.Response; import javax.ws.rs.ext.Provider; import com.google.common.annotations.VisibleForTesting; @@ -39,7 +41,6 @@ import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.ACCESS_DENIED; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.INTERNAL_ERROR; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.S3_AUTHINFO_CREATION_ERROR; -import static org.apache.hadoop.ozone.s3.util.S3Utils.wrapOS3Exception; /** * Filter used to construct string to sign from unfiltered request. @@ -115,4 +116,10 @@ public SignatureInfo getSignatureInfo() { return signatureInfo; } + private WebApplicationException wrapOS3Exception(OS3Exception os3Exception) { + return new WebApplicationException(os3Exception.getErrorMessage(), + os3Exception, + Response.status(os3Exception.getHttpCode()) + .entity(os3Exception.toXml()).build()); + } } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientCache.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientCache.java index 7614c4933a8..4f08527668c 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientCache.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientCache.java @@ -154,6 +154,8 @@ private void setCertificate(String omServiceID, } } catch (CertificateException ce) { throw new IOException(ce); + } catch (IOException e) { + throw e; } finally { if (certClient != null) { certClient.close(); diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java index a705420ca35..1b845c79aeb 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java @@ -69,7 +69,6 @@ import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.Objects; import java.util.Set; import static org.apache.hadoop.ozone.OzoneConsts.ETAG; @@ -222,9 +221,6 @@ public Response get( // example prefix: dir1/ key: dir123 continue; } - if (startAfter != null && count == 0 && Objects.equals(startAfter, next.getName())) { - continue; - } String relativeKeyName = next.getName().substring(prefix.length()); int depth = StringUtils.countMatches(relativeKeyName, delimiter); diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequestUnmarshaller.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequestUnmarshaller.java index 5881baa174b..cdaaa228ecd 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequestUnmarshaller.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequestUnmarshaller.java @@ -34,9 +34,7 @@ import java.lang.reflect.Type; import javax.ws.rs.ext.Provider; -import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.INVALID_REQUEST; import static org.apache.hadoop.ozone.s3.util.S3Consts.S3_XML_NAMESPACE; -import static org.apache.hadoop.ozone.s3.util.S3Utils.wrapOS3Exception; /** * Custom unmarshaller to read CompleteMultipartUploadRequest wo namespace. @@ -71,10 +69,6 @@ public CompleteMultipartUploadRequest readFrom( MultivaluedMap multivaluedMap, InputStream inputStream) throws IOException, WebApplicationException { try { - if (inputStream.available() == 0) { - throw wrapOS3Exception(INVALID_REQUEST.withMessage("You must specify at least one part")); - } - XMLReader xmlReader = saxParserFactory.newSAXParser().getXMLReader(); UnmarshallerHandler unmarshallerHandler = context.createUnmarshaller().getUnmarshallerHandler(); @@ -84,11 +78,8 @@ public CompleteMultipartUploadRequest readFrom( filter.setParent(xmlReader); filter.parse(new InputSource(inputStream)); return (CompleteMultipartUploadRequest) unmarshallerHandler.getResult(); - } catch (WebApplicationException e) { - throw e; } catch (Exception e) { - throw wrapOS3Exception(INVALID_REQUEST.withMessage(e.getMessage())); + throw new WebApplicationException("Can't parse request body to XML.", e); } } - } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequestUnmarshaller.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequestUnmarshaller.java index 775ec789f38..0c34c08091a 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequestUnmarshaller.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequestUnmarshaller.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.s3.endpoint; import javax.ws.rs.Produces; +import javax.ws.rs.WebApplicationException; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.MultivaluedMap; import javax.ws.rs.ext.MessageBodyReader; @@ -33,9 +34,6 @@ import org.xml.sax.InputSource; import org.xml.sax.XMLReader; -import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.INVALID_REQUEST; -import static org.apache.hadoop.ozone.s3.util.S3Utils.wrapOS3Exception; - /** * Custom unmarshaller to read MultiDeleteRequest w/wo namespace. */ @@ -80,7 +78,7 @@ public MultiDeleteRequest readFrom(Class type, filter.parse(new InputSource(entityStream)); return (MultiDeleteRequest) unmarshallerHandler.getResult(); } catch (Exception e) { - throw wrapOS3Exception(INVALID_REQUEST.withMessage(e.getMessage())); + throw new WebApplicationException("Can't parse request body to XML.", e); } } } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/PutBucketAclRequestUnmarshaller.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/PutBucketAclRequestUnmarshaller.java index c832915176b..3fa6149815e 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/PutBucketAclRequestUnmarshaller.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/PutBucketAclRequestUnmarshaller.java @@ -34,9 +34,7 @@ import java.lang.annotation.Annotation; import java.lang.reflect.Type; -import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.INVALID_REQUEST; import static org.apache.hadoop.ozone.s3.util.S3Consts.S3_XML_NAMESPACE; -import static org.apache.hadoop.ozone.s3.util.S3Utils.wrapOS3Exception; /** * Custom unmarshaller to read PutBucketAclRequest wo namespace. @@ -81,7 +79,7 @@ public S3BucketAcl readFrom( filter.parse(new InputSource(inputStream)); return (S3BucketAcl)(unmarshallerHandler.getResult()); } catch (Exception e) { - throw wrapOS3Exception(INVALID_REQUEST.withMessage(e.getMessage())); + throw new WebApplicationException("Can't parse request body to XML.", e); } } } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/OS3Exception.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/OS3Exception.java index 3660457146f..810aa2085f4 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/OS3Exception.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/OS3Exception.java @@ -158,9 +158,4 @@ public String toXml() { this.getErrorMessage(), this.getResource(), this.getRequestId()); } - - /** Create a copy with specific message. */ - public OS3Exception withMessage(String message) { - return new OS3Exception(code, message, httpCode); - } } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/AWSSignatureProcessor.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/AWSSignatureProcessor.java index d517154de80..43a1e6b7130 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/AWSSignatureProcessor.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/AWSSignatureProcessor.java @@ -45,7 +45,7 @@ import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.MALFORMED_HEADER; /** - * Parser to process AWS V2 and V4 auth request. Creates string to sign and auth + * Parser to process AWS V2 & V4 auth request. Creates string to sign and auth * header. For more details refer to AWS documentation https://docs.aws * .amazon.com/general/latest/gr/sigv4-create-canonical-request.html. **/ diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/Credential.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/Credential.java index 2746de8e5c4..be9ecce7c0f 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/Credential.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/Credential.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.ozone.s3.signature; +import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -52,7 +53,7 @@ public class Credential { * Sample credential value: * Credential=AKIAIOSFODNN7EXAMPLE/20130524/us-east-1/s3/aws4_request * - * @throws MalformedResourceException + * @throws OS3Exception */ @SuppressWarnings("StringSplitter") public void parseCredential() throws MalformedResourceException { diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Utils.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Utils.java index fda298f27dc..d644162a8ec 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Utils.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Utils.java @@ -23,8 +23,6 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.s3.exception.OS3Exception; -import javax.ws.rs.WebApplicationException; -import javax.ws.rs.core.Response; import java.io.UnsupportedEncodingException; import java.net.URLDecoder; import java.net.URLEncoder; @@ -118,11 +116,4 @@ public static S3StorageType toS3StorageType(String storageType) throw newError(INVALID_ARGUMENT, storageType, ex); } } - - public static WebApplicationException wrapOS3Exception(OS3Exception ex) { - return new WebApplicationException(ex.getErrorMessage(), ex, - Response.status(ex.getHttpCode()) - .entity(ex.toXml()) - .build()); - } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java index e3e3537b1c3..41876c6e245 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java @@ -43,6 +43,7 @@ import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatusLight; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.S3SecretValue; import org.apache.hadoop.ozone.om.helpers.S3VolumeContext; import org.apache.hadoop.ozone.om.helpers.TenantStateList; @@ -300,6 +301,21 @@ public List listKeys(String volumeName, String bucketName, return null; } + @Override + public List listTrash(String volumeName, String bucketName, + String startKeyName, + String keyPrefix, int maxKeys) + throws IOException { + return null; + } + + @Override + public boolean recoverTrash(String volumeName, String bucketName, + String keyName, String destinationBucket) + throws IOException { + return false; + } + @Override public OzoneKeyDetails getKeyDetails(String volumeName, String bucketName, String keyName) throws IOException { diff --git a/hadoop-ozone/tools/pom.xml b/hadoop-ozone/tools/pom.xml index d8c5599f304..04c1c8602cb 100644 --- a/hadoop-ozone/tools/pom.xml +++ b/hadoop-ozone/tools/pom.xml @@ -160,10 +160,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ratis ratis-tools - - org.apache.ratis - ratis-shell - info.picocli diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/NSSummaryAdmin.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/NSSummaryAdmin.java index 46a311e3546..991099f2702 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/NSSummaryAdmin.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/NSSummaryAdmin.java @@ -108,6 +108,7 @@ private boolean isObjectStoreBucket(OzoneBucket bucket, ObjectStore objectStore) * Returns false if bucket is part of path but not a OBS bucket. * @param path * @return true if bucket is OBS bucket or not part of provided path. + * @throws IOException */ public boolean isNotValidBucketOrOBSBucket(String path) { OFSPath ofsPath = new OFSPath(path, diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/CancelPrepareSubCommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/CancelPrepareSubCommand.java index ca6fa428fe2..8a159adb644 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/CancelPrepareSubCommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/CancelPrepareSubCommand.java @@ -38,7 +38,7 @@ public class CancelPrepareSubCommand implements Callable { @CommandLine.Option( names = {"-id", "--service-id"}, description = "Ozone Manager Service ID", - required = false + required = true ) private String omServiceId; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java index 5e1207519ab..0c38fbe33ba 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java @@ -44,7 +44,6 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; import org.apache.hadoop.ozone.container.metadata.DatanodeSchemaThreeDBDefinition; -import org.apache.hadoop.ozone.utils.Filter; import org.kohsuke.MetaInfServices; import org.rocksdb.ColumnFamilyDescriptor; import org.rocksdb.ColumnFamilyHandle; @@ -56,11 +55,9 @@ import java.io.BufferedWriter; import java.io.IOException; import java.io.PrintWriter; -import java.lang.reflect.Field; import java.nio.file.Paths; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -124,19 +121,6 @@ public class DBScanner implements Callable, SubcommandWithParent { description = "Key at which iteration of the DB ends") private String endKey; - @CommandLine.Option(names = {"--fields"}, - description = "Comma-separated list of fields needed for each value. " + - "eg.) \"name,acls.type\" for showing name and type under acls.") - private String fieldsFilter; - - @CommandLine.Option(names = {"--filter"}, - description = "Comma-separated list of \"::\" where " + - " is any valid field of the record, " + - " is (EQUALS,MAX or MIN) and " + - " is the value of the field. " + - "eg.) \"dataSize:equals:1000\" for showing records having the value 1000 for dataSize") - private String filter; - @CommandLine.Option(names = {"--dnSchema", "--dn-schema", "-d"}, description = "Datanode DB Schema Version: V1/V2/V3", defaultValue = "V3") @@ -307,7 +291,7 @@ private void processRecords(ManagedRocksIterator iterator, } Future future = threadPool.submit( new Task(dbColumnFamilyDef, batch, logWriter, sequenceId, - withKey, schemaV3, fieldsFilter, filter)); + withKey, schemaV3)); futures.add(future); batch = new ArrayList<>(batchSize); sequenceId++; @@ -315,7 +299,7 @@ private void processRecords(ManagedRocksIterator iterator, } if (!batch.isEmpty()) { Future future = threadPool.submit(new Task(dbColumnFamilyDef, - batch, logWriter, sequenceId, withKey, schemaV3, fieldsFilter, filter)); + batch, logWriter, sequenceId, withKey, schemaV3)); futures.add(future); } @@ -481,99 +465,22 @@ private static class Task implements Callable { private final long sequenceId; private final boolean withKey; private final boolean schemaV3; - private String valueFields; - private String valueFilter; - @SuppressWarnings("checkstyle:parameternumber") Task(DBColumnFamilyDefinition dbColumnFamilyDefinition, ArrayList batch, LogWriter logWriter, - long sequenceId, boolean withKey, boolean schemaV3, String valueFields, String filter) { + long sequenceId, boolean withKey, boolean schemaV3) { this.dbColumnFamilyDefinition = dbColumnFamilyDefinition; this.batch = batch; this.logWriter = logWriter; this.sequenceId = sequenceId; this.withKey = withKey; this.schemaV3 = schemaV3; - this.valueFields = valueFields; - this.valueFilter = filter; - } - - Map getFieldSplit(List fields, Map fieldMap) { - int len = fields.size(); - if (fieldMap == null) { - fieldMap = new HashMap<>(); - } - if (len == 1) { - fieldMap.putIfAbsent(fields.get(0), null); - } else { - Map fieldMapGet = (Map) fieldMap.get(fields.get(0)); - if (fieldMapGet == null) { - fieldMap.put(fields.get(0), getFieldSplit(fields.subList(1, len), null)); - } else { - fieldMap.put(fields.get(0), getFieldSplit(fields.subList(1, len), fieldMapGet)); - } - } - return fieldMap; - } - - void getFilterSplit(List fields, Map fieldMap, Filter leafValue) throws IOException { - int len = fields.size(); - if (len == 1) { - Filter currentValue = fieldMap.get(fields.get(0)); - if (currentValue != null) { - err().println("Cannot pass multiple values for the same field and " + - "cannot have filter for both parent and child"); - throw new IOException("Invalid filter passed"); - } - fieldMap.put(fields.get(0), leafValue); - } else { - Filter fieldMapGet = fieldMap.computeIfAbsent(fields.get(0), k -> new Filter()); - if (fieldMapGet.getValue() != null) { - err().println("Cannot pass multiple values for the same field and " + - "cannot have filter for both parent and child"); - throw new IOException("Invalid filter passed"); - } - Map nextLevel = fieldMapGet.getNextLevel(); - if (nextLevel == null) { - fieldMapGet.setNextLevel(new HashMap<>()); - } - getFilterSplit(fields.subList(1, len), fieldMapGet.getNextLevel(), leafValue); - } } @Override public Void call() { try { ArrayList results = new ArrayList<>(batch.size()); - Map fieldsSplitMap = new HashMap<>(); - - if (valueFields != null) { - for (String field : valueFields.split(",")) { - String[] subfields = field.split("\\."); - fieldsSplitMap = getFieldSplit(Arrays.asList(subfields), fieldsSplitMap); - } - } - - Map fieldsFilterSplitMap = new HashMap<>(); - if (valueFilter != null) { - for (String field : valueFilter.split(",")) { - String[] fieldValue = field.split(":"); - if (fieldValue.length != 3) { - err().println("Error: Invalid format for filter \"" + field - + "\". Usage: ::. Ignoring filter passed"); - } else { - Filter filter = new Filter(fieldValue[1], fieldValue[2]); - if (filter.getOperator() == null) { - err().println("Error: Invalid format for filter \"" + filter - + "\". can be one of [EQUALS,MIN,MAX]. Ignoring filter passed"); - } else { - String[] subfields = fieldValue[0].split("\\."); - getFilterSplit(Arrays.asList(subfields), fieldsFilterSplitMap, filter); - } - } - } - } - for (ByteArrayKeyValue byteArrayKeyValue : batch) { StringBuilder sb = new StringBuilder(); if (!(sequenceId == FIRST_SEQUENCE_ID && results.isEmpty())) { @@ -608,182 +515,16 @@ public Void call() { Object o = dbColumnFamilyDefinition.getValueCodec() .fromPersistedFormat(byteArrayKeyValue.getValue()); - - if (valueFilter != null && - !checkFilteredObject(o, dbColumnFamilyDefinition.getValueType(), fieldsFilterSplitMap)) { - // the record doesn't pass the filter - continue; - } - if (valueFields != null) { - Map filteredValue = new HashMap<>(); - filteredValue.putAll(getFieldsFilteredObject(o, dbColumnFamilyDefinition.getValueType(), fieldsSplitMap)); - sb.append(WRITER.writeValueAsString(filteredValue)); - } else { - sb.append(WRITER.writeValueAsString(o)); - } - + sb.append(WRITER.writeValueAsString(o)); results.add(sb.toString()); } logWriter.log(results, sequenceId); - } catch (IOException e) { + } catch (Exception e) { exception = true; LOG.error("Exception parse Object", e); } return null; } - - boolean checkFilteredObject(Object obj, Class clazz, Map fieldsSplitMap) - throws IOException { - for (Map.Entry field : fieldsSplitMap.entrySet()) { - try { - Field valueClassField = getRequiredFieldFromAllFields(clazz, field.getKey()); - Object valueObject = valueClassField.get(obj); - Filter fieldValue = field.getValue(); - - if (valueObject == null) { - // there is no such field in the record. This filter will be ignored for the current record. - continue; - } - if (fieldValue == null) { - err().println("Malformed filter. Check input"); - throw new IOException("Invalid filter passed"); - } else if (fieldValue.getNextLevel() == null) { - // reached the end of fields hierarchy, check if they match the filter - // Currently, only equals operation is supported - if (Filter.FilterOperator.EQUALS.equals(fieldValue.getOperator()) && - !String.valueOf(valueObject).equals(fieldValue.getValue())) { - return false; - } else if (!Filter.FilterOperator.EQUALS.equals(fieldValue.getOperator())) { - err().println("Only EQUALS operator is supported currently."); - throw new IOException("Invalid filter passed"); - } - } else { - Map subfields = fieldValue.getNextLevel(); - if (Collection.class.isAssignableFrom(valueObject.getClass())) { - if (!checkFilteredObjectCollection((Collection) valueObject, subfields)) { - return false; - } - } else if (Map.class.isAssignableFrom(valueObject.getClass())) { - Map valueObjectMap = (Map) valueObject; - boolean flag = false; - for (Map.Entry ob : valueObjectMap.entrySet()) { - boolean subflag; - if (Collection.class.isAssignableFrom(ob.getValue().getClass())) { - subflag = checkFilteredObjectCollection((Collection)ob.getValue(), subfields); - } else { - subflag = checkFilteredObject(ob.getValue(), ob.getValue().getClass(), subfields); - } - if (subflag) { - // atleast one item in the map/list of the record has matched the filter, - // so record passes the filter. - flag = true; - break; - } - } - if (!flag) { - // none of the items in the map/list passed the filter => record doesn't pass the filter - return false; - } - } else { - if (!checkFilteredObject(valueObject, valueClassField.getType(), subfields)) { - return false; - } - } - } - } catch (NoSuchFieldException ex) { - err().println("ERROR: no such field: " + field); - exception = true; - return false; - } catch (IllegalAccessException e) { - err().println("ERROR: Cannot get field from object: " + field); - exception = true; - return false; - } catch (Exception ex) { - err().println("ERROR: field: " + field + ", ex: " + ex); - exception = true; - return false; - } - } - return true; - } - - boolean checkFilteredObjectCollection(Collection valueObject, Map fields) - throws NoSuchFieldException, IllegalAccessException, IOException { - for (Object ob : valueObject) { - if (checkFilteredObject(ob, ob.getClass(), fields)) { - return true; - } - } - return false; - } - - Map getFieldsFilteredObject(Object obj, Class clazz, Map fieldsSplitMap) { - Map valueMap = new HashMap<>(); - for (Map.Entry field : fieldsSplitMap.entrySet()) { - try { - Field valueClassField = getRequiredFieldFromAllFields(clazz, field.getKey()); - Object valueObject = valueClassField.get(obj); - Map subfields = (Map) field.getValue(); - - if (subfields == null) { - valueMap.put(field.getKey(), valueObject); - } else { - if (Collection.class.isAssignableFrom(valueObject.getClass())) { - List subfieldObjectsList = - getFieldsFilteredObjectCollection((Collection) valueObject, subfields); - valueMap.put(field.getKey(), subfieldObjectsList); - } else if (Map.class.isAssignableFrom(valueObject.getClass())) { - Map subfieldObjectsMap = new HashMap<>(); - Map valueObjectMap = (Map) valueObject; - for (Map.Entry ob : valueObjectMap.entrySet()) { - Object subfieldValue; - if (Collection.class.isAssignableFrom(ob.getValue().getClass())) { - subfieldValue = getFieldsFilteredObjectCollection((Collection)ob.getValue(), subfields); - } else { - subfieldValue = getFieldsFilteredObject(ob.getValue(), ob.getValue().getClass(), subfields); - } - subfieldObjectsMap.put(ob.getKey(), subfieldValue); - } - valueMap.put(field.getKey(), subfieldObjectsMap); - } else { - valueMap.put(field.getKey(), - getFieldsFilteredObject(valueObject, valueClassField.getType(), subfields)); - } - } - } catch (NoSuchFieldException ex) { - err().println("ERROR: no such field: " + field); - } catch (IllegalAccessException e) { - err().println("ERROR: Cannot get field from object: " + field); - } - } - return valueMap; - } - - List getFieldsFilteredObjectCollection(Collection valueObject, Map fields) - throws NoSuchFieldException, IllegalAccessException { - List subfieldObjectsList = new ArrayList<>(); - for (Object ob : valueObject) { - Object subfieldValue = getFieldsFilteredObject(ob, ob.getClass(), fields); - subfieldObjectsList.add(subfieldValue); - } - return subfieldObjectsList; - } - - Field getRequiredFieldFromAllFields(Class clazz, String fieldName) throws NoSuchFieldException { - List classFieldList = ValueSchema.getAllFields(clazz); - Field classField = null; - for (Field f : classFieldList) { - if (f.getName().equals(fieldName)) { - classField = f; - break; - } - } - if (classField == null) { - throw new NoSuchFieldException(); - } - classField.setAccessible(true); - return classField; - } } private static class ByteArrayKeyValue { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ValueSchema.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ValueSchema.java index b06be2aff53..a5029b3e6b9 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ValueSchema.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ValueSchema.java @@ -88,7 +88,7 @@ public Void call() throws Exception { String dbPath = parent.getDbPath(); Map fields = new HashMap<>(); - success = getValueFields(dbPath, fields, depth, tableName, dnDBSchemaVersion); + success = getValueFields(dbPath, fields); out().println(JsonUtils.toJsonStringWithDefaultPrettyPrinter(fields)); @@ -101,8 +101,7 @@ public Void call() throws Exception { return null; } - public static boolean getValueFields(String dbPath, Map valueSchema, int d, String table, - String dnDBSchemaVersion) { + private boolean getValueFields(String dbPath, Map valueSchema) { dbPath = removeTrailingSlashIfNeeded(dbPath); DBDefinitionFactory.setDnDBSchemaVersion(dnDBSchemaVersion); @@ -112,19 +111,19 @@ public static boolean getValueFields(String dbPath, Map valueSch return false; } final DBColumnFamilyDefinition columnFamilyDefinition = - dbDefinition.getColumnFamily(table); + dbDefinition.getColumnFamily(tableName); if (columnFamilyDefinition == null) { - err().print("Error: Table with name '" + table + "' not found"); + err().print("Error: Table with name '" + tableName + "' not found"); return false; } Class c = columnFamilyDefinition.getValueType(); - valueSchema.put(c.getSimpleName(), getFieldsStructure(c, d)); + valueSchema.put(c.getSimpleName(), getFieldsStructure(c, depth)); return true; } - private static Object getFieldsStructure(Class clazz, int currentDepth) { + private Object getFieldsStructure(Class clazz, int currentDepth) { if (clazz.isPrimitive() || String.class.equals(clazz)) { return clazz.getSimpleName(); } else if (currentDepth == 0) { @@ -149,7 +148,7 @@ private static Object getFieldsStructure(Class clazz, int currentDepth) { } } - public static List getAllFields(Class clazz) { + private List getAllFields(Class clazz) { // NOTE: Schema of interface type, like ReplicationConfig, cannot be fetched. // An empty list "[]" will be shown for such types of fields. if (clazz == null) { @@ -177,7 +176,7 @@ public Class getParentType() { return RDBParser.class; } - private static String removeTrailingSlashIfNeeded(String dbPath) { + private String removeTrailingSlashIfNeeded(String dbPath) { if (dbPath.endsWith(OzoneConsts.OZONE_URI_DELIMITER)) { dbPath = dbPath.substring(0, dbPath.length() - 1); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java index 3627b917f00..20acad0562a 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java @@ -290,10 +290,6 @@ public void init() { //replace environment variables to support multi-node execution prefix = resolvePrefix(prefix); } - if (duration != null && !allowDuration()) { - LOG.warn("--duration is ignored"); - duration = null; - } if (duration != null) { durationInSecond = TimeDurationUtil.getTimeDurationHelper( "--runtime", duration, TimeUnit.SECONDS); @@ -558,15 +554,6 @@ public String getPrefix() { return prefix; } - /** - * Whether to enable Duration. - * If enabled, the command will load the --duration option. - * If not enabled, the command will not load the --duration option. - */ - public boolean allowDuration() { - return true; - } - public MetricRegistry getMetrics() { return metrics; } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ContentGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ContentGenerator.java index 66bc7943676..da16026210f 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ContentGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ContentGenerator.java @@ -23,7 +23,9 @@ import com.google.common.annotations.VisibleForTesting; import org.apache.commons.lang3.RandomStringUtils; +import org.apache.hadoop.fs.StreamCapabilities; import org.apache.hadoop.fs.Syncable; +import org.apache.hadoop.fs.impl.StoreImplementationUtils; import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput; /** @@ -107,17 +109,20 @@ private void doFlushOrSync(OutputStream outputStream) throws IOException { // noop break; case HFLUSH: - if (outputStream instanceof Syncable) { - ((Syncable) outputStream).hflush(); + if (StoreImplementationUtils.hasCapability( + outputStream, StreamCapabilities.HSYNC)) { + ((Syncable)outputStream).hflush(); } break; case HSYNC: - if (outputStream instanceof Syncable) { - ((Syncable) outputStream).hsync(); + if (StoreImplementationUtils.hasCapability( + outputStream, StreamCapabilities.HSYNC)) { + ((Syncable)outputStream).hsync(); } break; default: - throw new IllegalArgumentException("Unsupported sync option" + flushOrSync); + throw new IllegalArgumentException("Unsupported sync option" + + flushOrSync); } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DNRPCLoadGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DNRPCLoadGenerator.java index a7527952ca3..f83b2a1a4a9 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DNRPCLoadGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DNRPCLoadGenerator.java @@ -33,8 +33,8 @@ import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; import org.apache.hadoop.hdds.security.x509.certificate.client.CACertificateProvider; +import org.apache.hadoop.hdds.utils.HAUtils; import org.apache.hadoop.ozone.OzoneSecurityUtil; -import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB; import org.apache.hadoop.ozone.util.PayloadUtils; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import org.slf4j.Logger; @@ -150,14 +150,11 @@ public Void call() throws Exception { } encodedContainerToken = scmClient.getEncodedContainerToken(containerID); XceiverClientFactory xceiverClientManager; - OzoneManagerProtocolClientSideTranslatorPB omClient; if (OzoneSecurityUtil.isSecurityEnabled(configuration)) { - omClient = createOmClient(configuration, null); - CACertificateProvider caCerts = () -> omClient.getServiceInfo().provideCACerts(); + CACertificateProvider caCerts = () -> HAUtils.buildCAX509List(null, configuration); xceiverClientManager = new XceiverClientCreator(configuration, new ClientTrustManager(caCerts, null)); } else { - omClient = null; xceiverClientManager = new XceiverClientCreator(configuration); } clients = new ArrayList<>(numClients); @@ -172,9 +169,6 @@ public Void call() throws Exception { try { runTests(this::sendRPCReq); } finally { - if (omClient != null) { - omClient.close(); - } for (XceiverClientSpi client : clients) { xceiverClientManager.releaseClient(client, false); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopDirTreeGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopDirTreeGenerator.java index 5bc2c409318..3eb879d5c06 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopDirTreeGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopDirTreeGenerator.java @@ -31,7 +31,7 @@ import java.util.concurrent.atomic.AtomicLong; /** - * Directory and File Generator tool to test OM performance. + * Directory & File Generator tool to test OM performance. */ @Command(name = "dtsg", aliases = "dfs-tree-generator", diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmBucketGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmBucketGenerator.java index 27ebc877633..a3e21d58e2f 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmBucketGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmBucketGenerator.java @@ -57,11 +57,6 @@ public class OmBucketGenerator extends BaseFreonGenerator private Timer bucketCreationTimer; - @Override - public boolean allowDuration() { - return false; - } - @Override public Void call() throws Exception { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java index 58b62d22b98..c964676f266 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java @@ -355,7 +355,11 @@ public Void call() throws Exception { // wait until all keys are added or exception occurred. while ((numberOfKeysAdded.get() != totalKeyCount) && exception == null) { - Thread.sleep(CHECK_INTERVAL_MILLIS); + try { + Thread.sleep(CHECK_INTERVAL_MILLIS); + } catch (InterruptedException e) { + throw e; + } } executor.shutdown(); executor.awaitTermination(Integer.MAX_VALUE, TimeUnit.MILLISECONDS); @@ -369,7 +373,11 @@ public Void call() throws Exception { if (validateExecutor != null) { while (!validationQueue.isEmpty()) { - Thread.sleep(CHECK_INTERVAL_MILLIS); + try { + Thread.sleep(CHECK_INTERVAL_MILLIS); + } catch (InterruptedException e) { + throw e; + } } validateExecutor.shutdown(); validateExecutor.awaitTermination(Integer.MAX_VALUE, @@ -413,7 +421,11 @@ private void doCleanObjects() throws InterruptedException { // wait until all Buckets are cleaned or exception occurred. while ((numberOfBucketsCleaned.get() != totalBucketCount) && exception == null) { - Thread.sleep(CHECK_INTERVAL_MILLIS); + try { + Thread.sleep(CHECK_INTERVAL_MILLIS); + } catch (InterruptedException e) { + throw e; + } } } catch (InterruptedException e) { LOG.error("Failed to wait until all Buckets are cleaned", e); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/S3BucketGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/S3BucketGenerator.java index 0233c14470a..ef7a85a4121 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/S3BucketGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/S3BucketGenerator.java @@ -28,9 +28,9 @@ * Generate buckets via the s3 interface. * * For a secure cluster, - * $> init user keytab - * $> kinit -kt /etc/security/keytabs/testuser.keytab testuser/scm@EXAMPLE.COM - * $> eval $(ozone s3 getsecret -e) + * $> init user keytab + * $> kinit -kt /etc/security/keytabs/testuser.keytab testuser/scm@EXAMPLE.COM + * $> eval $(ozone s3 getsecret -e) * for getting and exporting access_key_id and secret_access_key * to freon shell test environment * secret access key. diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/fsck/ContainerMapper.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/fsck/ContainerMapper.java index b0ac5b0033e..a7a74c2e372 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/fsck/ContainerMapper.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/fsck/ContainerMapper.java @@ -67,8 +67,8 @@ public static void main(String[] args) throws IOException { /** * Generates Container Id to Blocks and BlockDetails mapping. * @param configuration @{@link OzoneConfiguration} - * @return {@code Map>> - * Map of ContainerId -> (Block, Block info)} + * @return Map>> + * Map of ContainerId -> (Block, Block info) * @throws IOException */ public Map>> diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RecoverSCMCertificate.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RecoverSCMCertificate.java deleted file mode 100644 index aca41844a18..00000000000 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RecoverSCMCertificate.java +++ /dev/null @@ -1,261 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.repair; - -import org.apache.hadoop.hdds.cli.SubcommandWithParent; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.security.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.authority.CAType; -import org.apache.hadoop.hdds.security.x509.certificate.client.SCMCertificateClient; -import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec; -import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition; -import org.apache.hadoop.hdds.utils.db.DBDefinition; -import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; -import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksIterator; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.debug.DBDefinitionFactory; -import org.apache.hadoop.ozone.debug.RocksDBUtils; -import java.security.cert.CertificateFactory; -import org.kohsuke.MetaInfServices; -import org.rocksdb.ColumnFamilyDescriptor; -import org.rocksdb.ColumnFamilyHandle; -import org.rocksdb.RocksDBException; -import picocli.CommandLine; - -import java.io.IOException; -import java.io.PrintWriter; -import java.math.BigInteger; -import java.net.InetAddress; -import java.nio.charset.StandardCharsets; -import java.nio.file.Paths; -import java.security.cert.CertPath; -import java.security.cert.CertificateException; -import java.security.cert.X509Certificate; -import java.util.Map; -import java.util.HashMap; -import java.util.List; -import java.util.ArrayList; -import java.util.Optional; -import java.util.Arrays; -import java.util.concurrent.Callable; - -import static org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition.VALID_SCM_CERTS; -import static org.apache.hadoop.hdds.security.x509.certificate.client.DefaultCertificateClient.CERT_FILE_NAME_FORMAT; -import static org.apache.hadoop.ozone.om.helpers.OzoneFSUtils.removeTrailingSlashIfNeeded; - -/** - * In case of accidental deletion of SCM certificates from local storage, - * this tool restores the certs that are persisted into the SCM DB. - * Note that this will only work if the SCM has persisted certs in its RocksDB - * and private keys of the SCM are intact. - */ -@CommandLine.Command( - name = "cert-recover", - description = "Recover Deleted SCM Certificate from RocksDB") -@MetaInfServices(SubcommandWithParent.class) -public class RecoverSCMCertificate implements Callable, SubcommandWithParent { - - @CommandLine.Option(names = {"--db"}, - required = true, - description = "SCM DB Path") - private String dbPath; - - @CommandLine.ParentCommand - private OzoneRepair parent; - - @CommandLine.Spec - private CommandLine.Model.CommandSpec spec; - - @Override - public Class getParentType() { - return OzoneRepair.class; - } - - private PrintWriter err() { - return spec.commandLine().getErr(); - } - - private PrintWriter out() { - return spec.commandLine().getOut(); - } - - @Override - public Void call() throws Exception { - dbPath = removeTrailingSlashIfNeeded(dbPath); - String tableName = VALID_SCM_CERTS.getName(); - DBDefinition dbDefinition = - DBDefinitionFactory.getDefinition(Paths.get(dbPath), new OzoneConfiguration()); - if (dbDefinition == null) { - throw new Exception("Error: Incorrect DB Path"); - } - DBColumnFamilyDefinition columnFamilyDefinition = - getDbColumnFamilyDefinition(tableName, dbDefinition); - - try { - List cfDescList = RocksDBUtils.getColumnFamilyDescriptors(dbPath); - final List cfHandleList = new ArrayList<>(); - byte[] tableNameBytes = tableName.getBytes(StandardCharsets.UTF_8); - ColumnFamilyHandle cfHandle = null; - try (ManagedRocksDB db = ManagedRocksDB.openReadOnly(dbPath, cfDescList, - cfHandleList)) { - cfHandle = getColumnFamilyHandle(cfHandleList, tableNameBytes); - SecurityConfig securityConfig = new SecurityConfig(parent.getOzoneConf()); - - Map allCerts = getAllCerts(columnFamilyDefinition, cfHandle, db); - out().println("All Certs in DB : " + allCerts.keySet()); - String hostName = InetAddress.getLocalHost().getHostName(); - out().println("Host: " + hostName); - - X509Certificate subCertificate = getSubCertificate(allCerts, hostName); - X509Certificate rootCertificate = getRootCertificate(allCerts); - - out().println("Sub cert serialID for this host: " + subCertificate.getSerialNumber().toString()); - out().println("Root cert serialID: " + rootCertificate.getSerialNumber().toString()); - - boolean isRootCA = false; - - String caPrincipal = rootCertificate.getSubjectDN().getName(); - if (caPrincipal.contains(hostName)) { - isRootCA = true; - } - storeCerts(subCertificate, rootCertificate, isRootCA, securityConfig); - } - } catch (RocksDBException | CertificateException exception) { - err().print("Failed to recover scm cert"); - } - return null; - } - - private static ColumnFamilyHandle getColumnFamilyHandle( - List cfHandleList, byte[] tableNameBytes) throws Exception { - ColumnFamilyHandle cfHandle = null; - for (ColumnFamilyHandle cf : cfHandleList) { - if (Arrays.equals(cf.getName(), tableNameBytes)) { - cfHandle = cf; - break; - } - } - if (cfHandle == null) { - throw new Exception("Error: VALID_SCM_CERTS table not found in DB"); - } - return cfHandle; - } - - private static X509Certificate getRootCertificate( - Map allCerts) throws Exception { - Optional cert = allCerts.values().stream().filter( - c -> c.getSubjectDN().getName() - .contains(OzoneConsts.SCM_ROOT_CA_PREFIX)).findFirst(); - if (!cert.isPresent()) { - throw new Exception("Root CA Cert not found in the DB for this host, Certs in the DB : " + allCerts.keySet()); - } - return cert.get(); - } - - - private static X509Certificate getSubCertificate( - Map allCerts, String hostName) throws Exception { - Optional cert = allCerts.values().stream().filter( - c -> c.getSubjectDN().getName() - .contains(OzoneConsts.SCM_SUB_CA_PREFIX) && c.getSubjectDN() - .getName().contains(hostName)).findFirst(); - if (!cert.isPresent()) { - throw new Exception("Sub CA Cert not found in the DB for this host, Certs in the DB : " + allCerts.keySet()); - } - return cert.get(); - } - - private static Map getAllCerts( - DBColumnFamilyDefinition columnFamilyDefinition, - ColumnFamilyHandle cfHandle, ManagedRocksDB db) throws IOException, RocksDBException { - Map allCerts = new HashMap<>(); - ManagedRocksIterator rocksIterator = ManagedRocksIterator.managed(db.get().newIterator(cfHandle)); - rocksIterator.get().seekToFirst(); - while (rocksIterator.get().isValid()) { - BigInteger id = (BigInteger) columnFamilyDefinition.getKeyCodec() - .fromPersistedFormat(rocksIterator.get().key()); - X509Certificate certificate = - (X509Certificate) columnFamilyDefinition.getValueCodec() - .fromPersistedFormat(rocksIterator.get().value()); - allCerts.put(id, certificate); - rocksIterator.get().next(); - } - return allCerts; - } - - private static DBColumnFamilyDefinition getDbColumnFamilyDefinition( - String tableName, DBDefinition dbDefinition) throws Exception { - DBColumnFamilyDefinition columnFamilyDefinition = - dbDefinition.getColumnFamily(tableName); - if (columnFamilyDefinition == null) { - throw new Exception( - "Error: VALID_SCM_CERTS table no found in Definition"); - } - return columnFamilyDefinition; - } - - private void storeCerts(X509Certificate scmCertificate, - X509Certificate rootCertificate, boolean isRootCA, SecurityConfig securityConfig) - throws CertificateException, IOException { - CertificateCodec certCodec = - new CertificateCodec(securityConfig, SCMCertificateClient.COMPONENT_NAME); - - out().println("Writing certs to path : " + certCodec.getLocation().toString()); - - CertPath certPath = addRootCertInPath(scmCertificate, rootCertificate); - CertPath rootCertPath = getRootCertPath(rootCertificate); - String encodedCert = CertificateCodec.getPEMEncodedString(certPath); - String certName = String.format(CERT_FILE_NAME_FORMAT, - CAType.NONE.getFileNamePrefix() + scmCertificate.getSerialNumber().toString()); - certCodec.writeCertificate(certName, encodedCert); - - String rootCertName = String.format(CERT_FILE_NAME_FORMAT, - CAType.SUBORDINATE.getFileNamePrefix() + rootCertificate.getSerialNumber().toString()); - String encodedRootCert = CertificateCodec.getPEMEncodedString(rootCertPath); - certCodec.writeCertificate(rootCertName, encodedRootCert); - - certCodec.writeCertificate(certCodec.getLocation().toAbsolutePath(), - securityConfig.getCertificateFileName(), encodedCert); - - if (isRootCA) { - CertificateCodec rootCertCodec = - new CertificateCodec(securityConfig, OzoneConsts.SCM_ROOT_CA_COMPONENT_NAME); - out().println("Writing root certs to path : " + rootCertCodec.getLocation().toString()); - rootCertCodec.writeCertificate(rootCertCodec.getLocation().toAbsolutePath(), - securityConfig.getCertificateFileName(), encodedRootCert); - } - } - - public CertPath addRootCertInPath(X509Certificate scmCert, - X509Certificate rootCert) throws CertificateException { - ArrayList updatedList = new ArrayList<>(); - updatedList.add(scmCert); - updatedList.add(rootCert); - CertificateFactory certFactory = - CertificateCodec.getCertFactory(); - return certFactory.generateCertPath(updatedList); - } - - public CertPath getRootCertPath(X509Certificate rootCert) - throws CertificateException { - ArrayList updatedList = new ArrayList<>(); - updatedList.add(rootCert); - CertificateFactory factory = CertificateCodec.getCertFactory(); - return factory.generateCertPath(updatedList); - } -} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaRepair.java deleted file mode 100644 index 5f21b739c81..00000000000 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaRepair.java +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.repair.quota; - -import java.io.IOException; -import java.util.Collection; -import java.util.concurrent.Callable; -import org.apache.hadoop.hdds.cli.GenericCli; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ipc.ProtobufRpcEngine; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.ozone.OmUtils; -import org.apache.hadoop.ozone.client.OzoneClientException; -import org.apache.hadoop.ozone.om.protocolPB.Hadoop3OmTransportFactory; -import org.apache.hadoop.ozone.om.protocolPB.OmTransport; -import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB; -import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB; -import org.apache.hadoop.ozone.repair.OzoneRepair; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.ratis.protocol.ClientId; -import org.kohsuke.MetaInfServices; -import picocli.CommandLine; - -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY; - -/** - * Ozone Repair CLI for quota. - */ -@CommandLine.Command(name = "quota", - description = "Operational tool to repair quota in OM DB.") -@MetaInfServices(SubcommandWithParent.class) -public class QuotaRepair implements Callable, SubcommandWithParent { - - @CommandLine.Spec - private CommandLine.Model.CommandSpec spec; - - @CommandLine.ParentCommand - private OzoneRepair parent; - - @Override - public Void call() { - GenericCli.missingSubcommand(spec); - return null; - } - - public OzoneManagerProtocolClientSideTranslatorPB createOmClient( - String omServiceID, - String omHost, - boolean forceHA - ) throws Exception { - OzoneConfiguration conf = parent.getOzoneConf(); - if (omHost != null && !omHost.isEmpty()) { - omServiceID = null; - conf.set(OZONE_OM_ADDRESS_KEY, omHost); - } else if (omServiceID == null || omServiceID.isEmpty()) { - omServiceID = getTheOnlyConfiguredOmServiceIdOrThrow(); - } - RPC.setProtocolEngine(conf, OzoneManagerProtocolPB.class, - ProtobufRpcEngine.class); - String clientId = ClientId.randomId().toString(); - if (!forceHA || (forceHA && OmUtils.isOmHAServiceId(conf, omServiceID))) { - OmTransport omTransport = new Hadoop3OmTransportFactory() - .createOmTransport(conf, getUser(), omServiceID); - return new OzoneManagerProtocolClientSideTranslatorPB(omTransport, - clientId); - } else { - throw new OzoneClientException("This command works only on OzoneManager" + - " HA cluster. Service ID specified does not match" + - " with " + OZONE_OM_SERVICE_IDS_KEY + " defined in the " + - "configuration. Configured " + OZONE_OM_SERVICE_IDS_KEY + " are " + - conf.getTrimmedStringCollection(OZONE_OM_SERVICE_IDS_KEY) + "\n"); - } - } - - private String getTheOnlyConfiguredOmServiceIdOrThrow() { - if (getConfiguredServiceIds().size() != 1) { - throw new IllegalArgumentException("There is no Ozone Manager service ID " - + "specified, but there are either zero, or more than one service ID" - + "configured."); - } - return getConfiguredServiceIds().iterator().next(); - } - - private Collection getConfiguredServiceIds() { - OzoneConfiguration conf = parent.getOzoneConf(); - Collection omServiceIds = - conf.getTrimmedStringCollection(OZONE_OM_SERVICE_IDS_KEY); - return omServiceIds; - } - - public UserGroupInformation getUser() throws IOException { - return UserGroupInformation.getCurrentUser(); - } - - protected OzoneRepair getParent() { - return parent; - } - - @Override - public Class getParentType() { - return OzoneRepair.class; - } -} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaStatus.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaStatus.java deleted file mode 100644 index a78d248e055..00000000000 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaStatus.java +++ /dev/null @@ -1,80 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license - * agreements. See the NOTICE file distributed with this work for additional - * information regarding - * copyright ownership. The ASF licenses this file to you under the Apache - * License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the - * License. You may obtain a - * copy of the License at - * - *

    http://www.apache.org/licenses/LICENSE-2.0 - * - *

    Unless required by applicable law or agreed to in writing, software - * distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR - * CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.repair.quota; - -import java.util.concurrent.Callable; -import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; -import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; -import org.kohsuke.MetaInfServices; -import picocli.CommandLine; - - -/** - * Tool to get status of last triggered quota repair. - */ -@CommandLine.Command( - name = "status", - description = "CLI to get the status of last trigger quota repair if available.", - mixinStandardHelpOptions = true, - versionProvider = HddsVersionProvider.class -) -@MetaInfServices(SubcommandWithParent.class) -public class QuotaStatus implements Callable, SubcommandWithParent { - @CommandLine.Spec - private static CommandLine.Model.CommandSpec spec; - - @CommandLine.Option( - names = {"--service-id", "--om-service-id"}, - description = "Ozone Manager Service ID", - required = false - ) - private String omServiceId; - - @CommandLine.Option( - names = {"--service-host"}, - description = "Ozone Manager Host. If OM HA is enabled, use --service-id instead. " - + "If you must use --service-host with OM HA, this must point directly to the leader OM. " - + "This option is required when --service-id is not provided or when HA is not enabled." - ) - private String omHost; - - @CommandLine.ParentCommand - private QuotaRepair parent; - - @Override - public Void call() throws Exception { - OzoneManagerProtocol ozoneManagerClient = - parent.createOmClient(omServiceId, omHost, false); - System.out.println(ozoneManagerClient.getQuotaRepairStatus()); - return null; - } - - protected QuotaRepair getParent() { - return parent; - } - - @Override - public Class getParentType() { - return QuotaRepair.class; - } -} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaTrigger.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaTrigger.java deleted file mode 100644 index 19ad92340c0..00000000000 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaTrigger.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license - * agreements. See the NOTICE file distributed with this work for additional - * information regarding - * copyright ownership. The ASF licenses this file to you under the Apache - * License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the - * License. You may obtain a - * copy of the License at - * - *

    http://www.apache.org/licenses/LICENSE-2.0 - * - *

    Unless required by applicable law or agreed to in writing, software - * distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR - * CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.repair.quota; - -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.concurrent.Callable; -import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; -import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; -import org.kohsuke.MetaInfServices; -import picocli.CommandLine; - -/** - * Tool to trigger quota repair. - */ -@CommandLine.Command( - name = "start", - description = "CLI to trigger quota repair.", - mixinStandardHelpOptions = true, - versionProvider = HddsVersionProvider.class -) -@MetaInfServices(SubcommandWithParent.class) -public class QuotaTrigger implements Callable, SubcommandWithParent { - @CommandLine.Spec - private static CommandLine.Model.CommandSpec spec; - - @CommandLine.ParentCommand - private QuotaRepair parent; - - @CommandLine.Option( - names = {"--service-id", "--om-service-id"}, - description = "Ozone Manager Service ID", - required = false - ) - private String omServiceId; - - @CommandLine.Option( - names = {"--service-host"}, - description = "Ozone Manager Host. If OM HA is enabled, use --service-id instead. " - + "If you must use --service-host with OM HA, this must point directly to the leader OM. " - + "This option is required when --service-id is not provided or when HA is not enabled." - ) - private String omHost; - - @CommandLine.Option(names = {"--buckets"}, - required = false, - description = "start quota repair for specific buckets. Input will be list of uri separated by comma as" + - " //[,...]") - private String buckets; - - @Override - public Void call() throws Exception { - List bucketList = Collections.emptyList(); - if (StringUtils.isNotEmpty(buckets)) { - bucketList = Arrays.asList(buckets.split(",")); - } - - OzoneManagerProtocol ozoneManagerClient = - parent.createOmClient(omServiceId, omHost, false); - try { - ozoneManagerClient.startQuotaRepair(bucketList); - System.out.println(ozoneManagerClient.getQuotaRepairStatus()); - } catch (Exception ex) { - System.out.println(ex.getMessage()); - } - return null; - } - - protected QuotaRepair getParent() { - return parent; - } - - @Override - public Class getParentType() { - return QuotaRepair.class; - } - -} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/package-info.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/package-info.java deleted file mode 100644 index 9a433b24397..00000000000 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * Ozone Repair tools. - */ -package org.apache.hadoop.ozone.repair.quota; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneRatis.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneRatis.java deleted file mode 100644 index 5bc98268064..00000000000 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneRatis.java +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.shell; - -import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.tracing.TracingUtil; -import org.apache.ratis.shell.cli.sh.RatisShell; - -import picocli.CommandLine; - -/** - * Ozone Ratis Command line tool. - */ -@CommandLine.Command(name = "ozone ratis", - description = "Shell for running Ratis commands", - versionProvider = HddsVersionProvider.class, - mixinStandardHelpOptions = true) -public class OzoneRatis extends Shell { - - public OzoneRatis() { - super(OzoneRatis.class); - } - - /** - * Main for the OzoneRatis Command handling. - * - * @param argv - System Args Strings[] - */ - public static void main(String[] argv) throws Exception { - new OzoneRatis().run(argv); - } - - @Override - public int execute(String[] argv) { - TracingUtil.initTracing("shell", createOzoneConfiguration()); - String spanName = "ozone ratis" + String.join(" ", argv); - return TracingUtil.executeInNewSpan(spanName, () -> { - // TODO: When Ozone has RATIS-2155, update this line to use the RatisShell.Builder - // in order to setup TLS and other confs. - final RatisShell shell = new RatisShell(System.out); - return shell.run(argv); - }); - } -} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/StoreTypeOption.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/StoreTypeOption.java index c06d29a7f93..8fbab644c0e 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/StoreTypeOption.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/StoreTypeOption.java @@ -21,7 +21,7 @@ import picocli.CommandLine; /** - * Option for {@link org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType}. + * Option for {@link OzoneObj.StoreType}. */ public class StoreTypeOption implements CommandLine.ITypeConverter { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/checknative/CheckNative.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/checknative/CheckNative.java index f19548a1fa7..63b2b425c64 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/checknative/CheckNative.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/checknative/CheckNative.java @@ -19,15 +19,10 @@ package org.apache.hadoop.ozone.shell.checknative; import org.apache.hadoop.hdds.cli.GenericCli; -import org.apache.hadoop.hdds.utils.NativeLibraryLoader; -import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils; import org.apache.hadoop.io.erasurecode.ErasureCodeNative; +import org.apache.hadoop.util.NativeCodeLoader; import picocli.CommandLine; -import java.util.Collections; - -import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME; - /** * CLI command to check if native libraries are loaded. */ @@ -41,12 +36,12 @@ public static void main(String[] argv) { @Override public Void call() throws Exception { - boolean nativeHadoopLoaded = org.apache.hadoop.util.NativeCodeLoader.isNativeCodeLoaded(); + boolean nativeHadoopLoaded = NativeCodeLoader.isNativeCodeLoaded(); String hadoopLibraryName = ""; String isalDetail = ""; boolean isalLoaded = false; if (nativeHadoopLoaded) { - hadoopLibraryName = org.apache.hadoop.util.NativeCodeLoader.getLibraryName(); + hadoopLibraryName = NativeCodeLoader.getLibraryName(); isalDetail = ErasureCodeNative.getLoadingFailureReason(); if (isalDetail != null) { @@ -55,21 +50,12 @@ public Void call() throws Exception { isalDetail = ErasureCodeNative.getLibraryName(); isalLoaded = true; } + } System.out.println("Native library checking:"); System.out.printf("hadoop: %b %s%n", nativeHadoopLoaded, hadoopLibraryName); System.out.printf("ISA-L: %b %s%n", isalLoaded, isalDetail); - - // Attempt to load the rocks-tools lib - boolean nativeRocksToolsLoaded = NativeLibraryLoader.getInstance().loadLibrary( - ROCKS_TOOLS_NATIVE_LIBRARY_NAME, - Collections.singletonList(ManagedRocksObjectUtils.getRocksDBLibFileName())); - String rocksToolsDetail = ""; - if (nativeRocksToolsLoaded) { - rocksToolsDetail = NativeLibraryLoader.getJniLibraryFileName(); - } - System.out.printf("rocks-tools: %b %s%n", nativeRocksToolsLoaded, rocksToolsDetail); return null; } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/PutKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/PutKeyHandler.java index 35095dd7ff2..833f4f7e779 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/PutKeyHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/PutKeyHandler.java @@ -71,7 +71,7 @@ public class PutKeyHandler extends KeyHandler { @Option(names = "--expectedGeneration", description = "Store key only if it already exists and its generation matches the value provided") - private Long expectedGeneration; + private long expectedGeneration; @Override protected void execute(OzoneClient client, OzoneAddress address) @@ -131,14 +131,9 @@ private void async( private OzoneOutputStream createOrReplaceKey(OzoneBucket bucket, String keyName, long size, Map keyMetadata, ReplicationConfig replicationConfig ) throws IOException { - if (expectedGeneration != null) { - final long existingGeneration = expectedGeneration; - Preconditions.checkArgument(existingGeneration > 0, - "expectedGeneration must be positive, but was %s", existingGeneration); - return bucket.rewriteKey(keyName, size, existingGeneration, replicationConfig, keyMetadata); - } - - return bucket.createKey(keyName, size, replicationConfig, keyMetadata); + return expectedGeneration > 0 + ? bucket.rewriteKey(keyName, size, expectedGeneration, replicationConfig, keyMetadata) + : bucket.createKey(keyName, size, replicationConfig, keyMetadata); } private void stream( diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/DeleteVolumeHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/DeleteVolumeHandler.java index 00270310737..8cc80502386 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/DeleteVolumeHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/DeleteVolumeHandler.java @@ -227,7 +227,11 @@ private void doCleanBuckets() throws InterruptedException { // wait until all Buckets are cleaned or exception occurred. while (numberOfBucketsCleaned.get() != totalBucketCount && exception == null) { - Thread.sleep(100); + try { + Thread.sleep(100); + } catch (InterruptedException e) { + throw e; + } } } catch (InterruptedException e) { LOG.error("Failed to wait until all Buckets are cleaned", e); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/utils/Filter.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/utils/Filter.java deleted file mode 100644 index 129e1a6158d..00000000000 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/utils/Filter.java +++ /dev/null @@ -1,107 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.utils; - -import java.util.Map; - -/** - * Represent class which has info of what operation and value a set of records should be filtered with. - */ -public class Filter { - private FilterOperator operator; - private Object value; - private Map nextLevel = null; - - public Filter() { - this.operator = null; - this.value = null; - } - - public Filter(FilterOperator operator, Object value) { - this.operator = operator; - this.value = value; - } - - public Filter(String op, Object value) { - this.operator = getFilterOperator(op); - this.value = value; - } - - public Filter(FilterOperator operator, Object value, Map next) { - this.operator = operator; - this.value = value; - this.nextLevel = next; - } - - public Filter(String op, Object value, Map next) { - this.operator = getFilterOperator(op); - this.value = value; - this.nextLevel = next; - } - - public FilterOperator getOperator() { - return operator; - } - - public void setOperator(FilterOperator operator) { - this.operator = operator; - } - - public Object getValue() { - return value; - } - - public void setValue(Object value) { - this.value = value; - } - - public Map getNextLevel() { - return nextLevel; - } - - public void setNextLevel(Map nextLevel) { - this.nextLevel = nextLevel; - } - - public FilterOperator getFilterOperator(String op) { - if (op.equalsIgnoreCase("equals")) { - return FilterOperator.EQUALS; - } else if (op.equalsIgnoreCase("max")) { - return FilterOperator.MAX; - } else if (op.equalsIgnoreCase("min")) { - return FilterOperator.MIN; - } else { - return null; - } - } - - @Override - public String toString() { - return "(" + operator + "," + value + "," + nextLevel + ")"; - } - - /** - * Operation of the filter. - */ - public enum FilterOperator { - EQUALS, - MAX, - MIN; - } -} diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/checknative/TestCheckNative.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/checknative/TestCheckNative.java index 0dc1fde57fa..8e291056330 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/checknative/TestCheckNative.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/checknative/TestCheckNative.java @@ -19,7 +19,6 @@ package org.apache.hadoop.ozone.checknative; import org.apache.hadoop.ozone.shell.checknative.CheckNative; -import org.apache.ozone.test.tag.Native; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.AfterAll; @@ -28,7 +27,6 @@ import java.io.PrintStream; import java.io.UnsupportedEncodingException; -import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME; import static org.assertj.core.api.Assertions.assertThat; import static java.nio.charset.StandardCharsets.UTF_8; @@ -61,22 +59,6 @@ public void testCheckNativeNotLoaded() throws UnsupportedEncodingException { assertThat(stdOut).contains("Native library checking:"); assertThat(stdOut).contains("hadoop: false"); assertThat(stdOut).contains("ISA-L: false"); - assertThat(stdOut).contains("rocks-tools: false"); - } - - @Native(ROCKS_TOOLS_NATIVE_LIBRARY_NAME) - @Test - public void testCheckNativeRocksToolsLoaded() throws UnsupportedEncodingException { - outputStream.reset(); - new CheckNative() - .run(new String[] {}); - // trims multiple spaces - String stdOut = outputStream.toString(DEFAULT_ENCODING) - .replaceAll(" +", " "); - assertThat(stdOut).contains("Native library checking:"); - assertThat(stdOut).contains("hadoop: false"); - assertThat(stdOut).contains("ISA-L: false"); - assertThat(stdOut).contains("rocks-tools: true"); } @AfterEach diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneRatis.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneRatis.java deleted file mode 100644 index 9c27bedcf7d..00000000000 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneRatis.java +++ /dev/null @@ -1,172 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.shell; - -import org.apache.ratis.proto.RaftProtos; -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.io.TempDir; - -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.io.OutputStream; -import java.io.PrintStream; -import java.io.InputStream; -import java.io.UnsupportedEncodingException; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.charset.StandardCharsets; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertTrue; - -/** - * Tests for OzoneRatis. - */ -public class TestOzoneRatis { - private static final String DEFAULT_ENCODING = StandardCharsets.UTF_8.name(); - private final ByteArrayOutputStream outContent = new ByteArrayOutputStream(); - private final ByteArrayOutputStream errContent = new ByteArrayOutputStream(); - private final PrintStream originalOut = System.out; - private final PrintStream originalErr = System.err; - private OzoneRatis ozoneRatis; - - @BeforeEach - public void setUp() throws UnsupportedEncodingException { - System.setOut(new PrintStream(outContent, false, DEFAULT_ENCODING)); - System.setErr(new PrintStream(errContent, false, DEFAULT_ENCODING)); - ozoneRatis = new OzoneRatis(); - } - - @AfterEach - public void tearDown() { - System.setOut(originalOut); - System.setErr(originalErr); - } - - /** - * Execute method to invoke the OzoneRatis class and capture output. - * - * @param args command line arguments to pass - * @return the output from OzoneRatis - */ - private String execute(String[] args) throws IOException { - ozoneRatis.execute(args); - return outContent.toString(StandardCharsets.UTF_8.name()); - } - - @Test - public void testBasicOzoneRatisCommand() throws IOException { - String[] args = {""}; - String output = execute(args); - assertTrue(output.contains("Usage: ratis sh [generic options]")); - } - - @Test - public void testLocalRaftMetaConfSubcommand(@TempDir Path tempDir) throws IOException { - // Set up temporary directory and files - Path metadataDir = tempDir.resolve("data/metadata/ratis/test-cluster/current/"); - Files.createDirectories(metadataDir); - - // Create a dummy raft-meta.conf file using protobuf - Path raftMetaConfFile = metadataDir.resolve("raft-meta.conf"); - - // Create a LogEntryProto with a dummy index and peer - RaftProtos.RaftPeerProto raftPeerProto = RaftProtos.RaftPeerProto.newBuilder() - .setId(ByteString.copyFromUtf8("peer1")) - .setAddress("localhost:8000") - .setStartupRole(RaftProtos.RaftPeerRole.FOLLOWER) - .build(); - - RaftProtos.LogEntryProto logEntryProto = RaftProtos.LogEntryProto.newBuilder() - .setConfigurationEntry(RaftProtos.RaftConfigurationProto.newBuilder() - .addPeers(raftPeerProto).build()) - .setIndex(0) - .build(); - - // Write the logEntryProto to the raft-meta.conf file - try (OutputStream out = Files.newOutputStream(raftMetaConfFile)) { - logEntryProto.writeTo(out); - } - - - String[] args = {"local", "raftMetaConf", "-peers", "peer1|localhost:8080", "-path", metadataDir.toString()}; - String output = execute(args); - - assertTrue(output.contains("Index in the original file is: 0")); - assertTrue(output.contains("Generate new LogEntryProto info is:")); - - // Verify that the new raft-meta.conf is generated - Path newRaftMetaConfFile = metadataDir.resolve("new-raft-meta.conf"); - assertTrue(Files.exists(newRaftMetaConfFile), "New raft-meta.conf file should be created."); - - // Verify content of the newly generated file - try (InputStream in = Files.newInputStream(newRaftMetaConfFile)) { - RaftProtos.LogEntryProto newLogEntryProto = RaftProtos.LogEntryProto.parseFrom(in); - assertEquals(1, newLogEntryProto.getIndex()); - RaftProtos.RaftPeerProto peerProto = newLogEntryProto.getConfigurationEntry().getPeers(0); - assertEquals("peer1", peerProto.getId().toStringUtf8()); - assertEquals("localhost:8080", peerProto.getAddress()); - assertEquals(RaftProtos.RaftPeerRole.FOLLOWER, peerProto.getStartupRole()); - } - } - - @Test - public void testMissingRequiredArguments() throws IOException { - String[] args = {"local", "raftMetaConf"}; - String output = execute(args); - assertTrue(output.contains("Failed to parse args for raftMetaConf: Missing required options: peers, path")); - } - - @Test - public void testMissingPeerArgument() throws IOException { - String[] args = {"local", "raftMetaConf", "-path", "/path"}; - String output = execute(args); - assertTrue(output.contains("Failed to parse args for raftMetaConf: Missing required option: peers")); - } - - @Test - public void testMissingPathArgument() throws IOException { - String[] args = {"local", "raftMetaConf", "-peers", "localhost:8080"}; - String output = execute(args); - assertTrue(output.contains("Failed to parse args for raftMetaConf: Missing required option: path")); - } - - @Test - public void testInvalidPeersFormat() throws IOException { - String[] args = {"local", "raftMetaConf", "-peers", "localhost8080", "-path", "/path"}; - String output = execute(args); - assertTrue(output.contains("Failed to parse the server address parameter \"localhost8080\".")); - } - - @Test - public void testDuplicatePeersAddress() throws IOException { - String[] args = {"local", "raftMetaConf", "-peers", "localhost:8080,localhost:8080", "-path", "/path"}; - String output = execute(args); - assertTrue(output.contains("Found duplicated address: localhost:8080.")); - } - - @Test - public void testDuplicatePeersId() throws IOException { - String[] args = {"local", "raftMetaConf", "-peers", "peer1|localhost:8080,peer1|localhost:8081", "-path", "/path"}; - String output = execute(args); - assertTrue(output.contains("Found duplicated ID: peer1.")); - } -} diff --git a/pom.xml b/pom.xml index a35ac576e03..b5a6323bed9 100644 --- a/pom.xml +++ b/pom.xml @@ -24,7 +24,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs pom - dev-support hadoop-hdds hadoop-ozone @@ -76,7 +75,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs ${hdds.version} - 3.1.1 + 3.1.0 1.0.6 @@ -101,7 +100,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs false false true - 9.4.56.v20240826 + 9.4.55.v20240627 5.2.0 1.0-1 4.2.0 @@ -113,8 +112,8 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 1.8.0 1.17.0 3.2.2 - 1.27.1 - 2.11.0 + 1.27.0 + 2.10.1 1.5.6-4 1.4.0 2.16.1 @@ -156,7 +155,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 1.19.4 - 2.45 + 2.43 1.9.13 @@ -176,14 +175,14 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 2.0.16 - 2.24.0 + 2.23.1 3.4.4 1.2.25 1.0.1 1.9.25 1.11 - 4.7.5 + 4.7.6 0.16.0 0.10.2 @@ -232,7 +231,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 1.58.0 7.7.3 - 3.46.1.3 + 3.46.0.0 3.1.9.Final @@ -256,7 +255,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 3.4.0 3.9.0 - 3.1.3 + 3.1.2 3.1.0 3.6.0 3.4.2 @@ -265,10 +264,10 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 1.6.1 1.7.0 3.5.0 - 3.10.0 + 3.7.0 3.7.1 0.16.1 - 3.1.3 + 3.1.2 3.6.0 3.7.1 4.2.2 @@ -278,7 +277,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 1.0-beta-1 1.0-M1 3.4.0 - 3.20.0 + 3.12.1 3.1.0 9.3 1200 @@ -297,7 +296,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 1.15.0 2.5.0 1.4.0 - 3.9.12 + 3.9.8.1 5.3.39 3.11.10 @@ -308,8 +307,8 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 5.1.0 1.2.1 - 3.9.9 - 1.1.10.6 + 3.9.8 + 1.1.10.5 1.2.0 9.40 @@ -758,17 +757,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs ratis-common ${ratis.version} - - org.apache.ratis - ratis-shell - ${ratis.version} - - - org.slf4j - * - - - io.netty @@ -1427,7 +1415,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs maven-javadoc-plugin ${maven-javadoc-plugin.version} - none + -Xdoclint:none @@ -1720,6 +1708,25 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs org.apache.maven.plugins maven-remote-resources-plugin ${maven-remote-resources-plugin.version} + + + org.apache.hadoop:hadoop-build-tools:${hadoop.version} + + + + + org.apache.hadoop + hadoop-build-tools + ${hadoop.version} + + + + + + process + + + org.apache.maven.plugins @@ -1762,7 +1769,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs prepare-agent - org.apache.hadoop.hdds.*,org.apache.hadoop.ozone.*,org.apache.hadoop.fs.ozone.*,org.apache.ozone.*,org.hadoop.ozone.* + org.apache.hadoop.hdds.*,org.apache.hadoop.ozone.*,org.apache.hadoop.fs.ozone.* @@ -1889,6 +1896,8 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs + + dist