getAllCaCerts();
+ /**
+ * Return the pem encoded CA certificate list.
+ *
+ * If initialized return list of pem encoded CA certificates, else return
+ * null.
+ *
+ * @return list of pem encoded CA certificates.
+ */
+ List getCAList();
+
+ /**
+ * Update and returns the pem encoded CA certificate list.
+ * @return list of pem encoded CA certificates.
+ * @throws IOException
+ */
+ List updateCAList() throws IOException;
+
/**
* Verifies a digital Signature, given the signature and the certificate of
* the signer.
@@ -160,32 +176,10 @@ default void assertValidKeysAndCertificate() throws OzoneSecurityException {
}
}
- /**
- * Gets a KeyManager containing this CertificateClient's key material and trustchain.
- * During certificate rotation this KeyManager is automatically updated with the new keys/certificates.
- *
- * @return A KeyManager containing keys and the trustchain for this CertificateClient.
- * @throws CertificateException
- */
ReloadingX509KeyManager getKeyManager() throws CertificateException;
- /**
- * Gets a TrustManager containing the trusted certificates of this CertificateClient.
- * During certificate rotation this TrustManager is automatically updated with the new certificates.
- *
- * @return A TrustManager containing trusted certificates for this CertificateClient.
- * @throws CertificateException
- */
ReloadingX509TrustManager getTrustManager() throws CertificateException;
- /**
- * Creates a ClientTrustManager instance using the trusted certificates of this certificate client.
- *
- * @return The new ClientTrustManager instance.
- * @throws IOException
- */
- ClientTrustManager createClientTrustManager() throws IOException;
-
/**
* Register a receiver that will be called after the certificate renewed.
*
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java
index 66685b4bbbd..31aaca568e4 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java
@@ -131,7 +131,7 @@ public static InetSocketAddress updateListenAddress(OzoneConfiguration conf,
* Fall back to OZONE_METADATA_DIRS if not defined.
*
* @param conf
- * @return File
+ * @return
*/
public static File getScmDbDir(ConfigurationSource conf) {
File metadataDir = getDirectoryFromConfig(conf,
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LeakDetector.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LeakDetector.java
index 9579d4e73bf..477a291f928 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LeakDetector.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LeakDetector.java
@@ -31,6 +31,7 @@
/**
* Simple general resource leak detector using {@link ReferenceQueue} and {@link java.lang.ref.WeakReference} to
* observe resource object life-cycle and assert proper resource closure before they are GCed.
+ *
*
* Example usage:
*
@@ -42,18 +43,16 @@
* // report leaks, don't refer to the original object (MyResource) here.
* System.out.println("MyResource is not closed before being discarded.");
* });
- * }
- * }
- *
- *
- * {@code @Override
+ *
+ * @Override
* public void close() {
* // proper resources cleanup...
* // inform tracker that this object is closed properly.
* leakTracker.close();
* }
- * }
- *
+ * }
+ *
+ * }
*/
public class LeakDetector {
private static final Logger LOG = LoggerFactory.getLogger(LeakDetector.class);
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DelegatedCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DelegatedCodec.java
index 8d6f3c32e53..dff0b015ed5 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DelegatedCodec.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DelegatedCodec.java
@@ -23,9 +23,9 @@
import java.io.IOException;
/**
- * A {@link org.apache.hadoop.hdds.utils.db.Codec} to serialize/deserialize objects by delegation.
+ * A {@link Codec} to serialize/deserialize objects by delegation.
*
- * @param The object type of this {@link org.apache.hadoop.hdds.utils.db.Codec}.
+ * @param The object type of this {@link Codec}.
* @param The object type of the {@link #delegate}.
*/
public class DelegatedCodec implements Codec {
@@ -53,8 +53,8 @@ public enum CopyType {
* Construct a {@link Codec} using the given delegate.
*
* @param delegate the delegate {@link Codec}
- * @param forward a function to convert {@code DELEGATE} to {@code T}.
- * @param backward a function to convert {@code T} back to {@code DELEGATE}.
+ * @param forward a function to convert {@link DELEGATE} to {@link T}.
+ * @param backward a function to convert {@link T} back to {@link DELEGATE}.
* @param copyType How to {@link #copyObject(Object)}?
*/
public DelegatedCodec(Codec delegate,
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index df0fdc59a4a..c61502ff4a8 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -120,14 +120,6 @@ public final class OzoneConfigKeys {
public static final String OZONE_FS_DATASTREAM_AUTO_THRESHOLD_DEFAULT
= "4MB";
- /**
- * Flag to allow server-side HBase-related features and enhancements to be enabled.
- */
- public static final String OZONE_HBASE_ENHANCEMENTS_ALLOWED
- = "ozone.hbase.enhancements.allowed";
- public static final boolean OZONE_HBASE_ENHANCEMENTS_ALLOWED_DEFAULT
- = false;
-
/**
* Flag to enable hsync/hflush.
*/
@@ -543,6 +535,10 @@ public final class OzoneConfigKeys {
public static final int OZONE_MANAGER_STRIPED_LOCK_SIZE_DEFAULT = 512;
+ public static final String OZONE_CLIENT_LIST_TRASH_KEYS_MAX =
+ "ozone.client.list.trash.keys.max";
+ public static final int OZONE_CLIENT_LIST_TRASH_KEYS_MAX_DEFAULT = 1000;
+
public static final String OZONE_HTTP_BASEDIR = "ozone.http.basedir";
public static final String OZONE_HTTP_POLICY_KEY =
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index 101507b502e..b34a5d8387b 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -40,6 +40,7 @@ public final class OzoneConsts {
public static final String SCM_CERT_SERIAL_ID = "scmCertSerialId";
public static final String PRIMARY_SCM_NODE_ID = "primaryScmNodeId";
+ public static final String OZONE_SIMPLE_ROOT_USER = "root";
public static final String OZONE_SIMPLE_HDFS_USER = "hdfs";
public static final String STORAGE_ID = "storageID";
@@ -75,6 +76,12 @@ public final class OzoneConsts {
"EEE, dd MMM yyyy HH:mm:ss zzz";
public static final String OZONE_TIME_ZONE = "GMT";
+ public static final String OZONE_COMPONENT = "component";
+ public static final String OZONE_FUNCTION = "function";
+ public static final String OZONE_RESOURCE = "resource";
+ public static final String OZONE_USER = "user";
+ public static final String OZONE_REQUEST = "request";
+
// OM Http server endpoints
public static final String OZONE_OM_SERVICE_LIST_HTTP_ENDPOINT =
"/serviceList";
@@ -94,9 +101,14 @@ public final class OzoneConsts {
public static final String CONTAINER_EXTENSION = ".container";
+ public static final String CONTAINER_META = ".meta";
+
+ // Refer to {@link ContainerReader} for container storage layout on disk.
+ public static final String CONTAINER_PREFIX = "containers";
public static final String CONTAINER_META_PATH = "metadata";
public static final String CONTAINER_TEMPORARY_CHUNK_PREFIX = "tmp";
public static final String CONTAINER_CHUNK_NAME_DELIMITER = ".";
+ public static final String CONTAINER_ROOT_PREFIX = "repository";
public static final String FILE_HASH = "SHA-256";
public static final String MD5_HASH = "MD5";
@@ -116,6 +128,7 @@ public final class OzoneConsts {
* level DB names used by SCM and data nodes.
*/
public static final String CONTAINER_DB_SUFFIX = "container.db";
+ public static final String PIPELINE_DB_SUFFIX = "pipeline.db";
public static final String DN_CONTAINER_DB = "-dn-" + CONTAINER_DB_SUFFIX;
public static final String OM_DB_NAME = "om.db";
public static final String SCM_DB_NAME = "scm.db";
@@ -174,8 +187,10 @@ public final class OzoneConsts {
public static final String OM_USER_PREFIX = "$";
public static final String OM_S3_PREFIX = "S3:";
public static final String OM_S3_CALLER_CONTEXT_PREFIX = "S3Auth:S3G|";
+ public static final String OM_S3_VOLUME_PREFIX = "s3";
public static final String OM_S3_SECRET = "S3Secret:";
public static final String OM_PREFIX = "Prefix:";
+ public static final String OM_TENANT = "Tenant:";
/**
* Max chunk size limit.
@@ -183,6 +198,11 @@ public final class OzoneConsts {
public static final int OZONE_SCM_CHUNK_MAX_SIZE = 32 * 1024 * 1024;
+ /**
+ * Max OM Quota size of Long.MAX_VALUE.
+ */
+ public static final long MAX_QUOTA_IN_BYTES = Long.MAX_VALUE;
+
/**
* Quota RESET default is -1, which means quota is not set.
*/
@@ -194,20 +214,36 @@ public final class OzoneConsts {
*/
public enum Units { TB, GB, MB, KB, B }
+ /**
+ * Max number of keys returned per list buckets operation.
+ */
+ public static final int MAX_LISTBUCKETS_SIZE = 1024;
+
+ /**
+ * Max number of keys returned per list keys operation.
+ */
+ public static final int MAX_LISTKEYS_SIZE = 1024;
+
+ /**
+ * Max number of volumes returned per list volumes operation.
+ */
+ public static final int MAX_LISTVOLUMES_SIZE = 1024;
+
+ public static final int INVALID_PORT = -1;
+
/**
* Object ID to identify reclaimable uncommitted blocks.
*/
public static final long OBJECT_ID_RECLAIM_BLOCKS = 0L;
+
/**
* Default SCM Datanode ID file name.
*/
public static final String OZONE_SCM_DATANODE_ID_FILE_DEFAULT = "datanode.id";
- /**
- * The ServiceListJSONServlet context attribute where OzoneManager
- * instance gets stored.
- */
+ // The ServiceListJSONServlet context attribute where OzoneManager
+ // instance gets stored.
public static final String OM_CONTEXT_ATTRIBUTE = "ozone.om";
public static final String SCM_CONTEXT_ATTRIBUTE = "ozone.scm";
@@ -272,8 +308,12 @@ private OzoneConsts() {
public static final String KEY_PREFIX = "keyPrefix";
public static final String ACL = "acl";
public static final String ACLS = "acls";
+ public static final String USER_ACL = "userAcl";
+ public static final String ADD_ACLS = "addAcls";
+ public static final String REMOVE_ACLS = "removeAcls";
public static final String MAX_NUM_OF_BUCKETS = "maxNumOfBuckets";
public static final String HAS_SNAPSHOT = "hasSnapshot";
+ public static final String TO_KEY_NAME = "toKeyName";
public static final String STORAGE_TYPE = "storageType";
public static final String RESOURCE_TYPE = "resourceType";
public static final String IS_VERSION_ENABLED = "isVersionEnabled";
@@ -283,6 +323,7 @@ private OzoneConsts() {
public static final String REPLICATION_TYPE = "replicationType";
public static final String REPLICATION_FACTOR = "replicationFactor";
public static final String REPLICATION_CONFIG = "replicationConfig";
+ public static final String KEY_LOCATION_INFO = "keyLocationInfo";
public static final String MULTIPART_LIST = "multipartList";
public static final String UPLOAD_ID = "uploadID";
public static final String PART_NUMBER_MARKER = "partNumberMarker";
@@ -337,6 +378,10 @@ private OzoneConsts() {
public static final String JAVA_TMP_DIR = "java.io.tmpdir";
public static final String LOCALHOST = "localhost";
+
+ public static final int S3_BUCKET_MIN_LENGTH = 3;
+ public static final int S3_BUCKET_MAX_LENGTH = 64;
+
public static final int S3_SECRET_KEY_MIN_LENGTH = 8;
public static final int S3_REQUEST_HEADER_METADATA_SIZE_LIMIT_KB = 2;
@@ -353,6 +398,7 @@ private OzoneConsts() {
public static final String GDPR_ALGORITHM_NAME = "AES";
public static final int GDPR_DEFAULT_RANDOM_SECRET_LENGTH = 16;
public static final Charset GDPR_CHARSET = StandardCharsets.UTF_8;
+ public static final String GDPR_LENGTH = "length";
public static final String GDPR_SECRET = "secret";
public static final String GDPR_ALGORITHM = "algorithm";
@@ -363,7 +409,7 @@ private OzoneConsts() {
* contains illegal characters when creating/renaming key.
*
* Avoid the following characters in a key name:
- * {@literal "\", "{", "}", "<", ">", "^", "%", "~", "#", "|", "`", "[", "]"}, Quotation
+ * "\", "{", "}", "<", ">", "^", "%", "~", "#", "|", "`", "[", "]", Quotation
* marks and Non-printable ASCII characters (128–255 decimal characters).
* https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html
*/
@@ -380,6 +426,13 @@ private OzoneConsts() {
public static final String CONTAINER_DB_TYPE_ROCKSDB = "RocksDB";
+ // SCM HA
+ public static final String SCM_SERVICE_ID_DEFAULT = "scmServiceIdDefault";
+
+ // SCM Ratis snapshot file to store the last applied index
+ public static final String SCM_RATIS_SNAPSHOT_INDEX = "scmRatisSnapshotIndex";
+
+ public static final String SCM_RATIS_SNAPSHOT_TERM = "scmRatisSnapshotTerm";
// An on-disk transient marker file used when replacing DB with checkpoint
public static final String DB_TRANSIENT_MARKER = "dbInconsistentMarker";
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java
index 982b559c7a5..eec2ceeb5e8 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java
@@ -44,8 +44,6 @@ public enum OzoneManagerVersion implements ComponentVersion {
ATOMIC_REWRITE_KEY(6, "OzoneManager version that supports rewriting key as atomic operation"),
HBASE_SUPPORT(7, "OzoneManager version that supports HBase integration"),
- LIGHTWEIGHT_LIST_STATUS(8, "OzoneManager version that supports lightweight"
- + " listStatus API."),
FUTURE_VERSION(-1, "Used internally in the client when the server side is "
+ " newer and an unknown server version has arrived to the client.");
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBufferImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBufferImpl.java
index a5235978327..1d596bf7007 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBufferImpl.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBufferImpl.java
@@ -44,14 +44,12 @@ public class ChecksumByteBufferImpl implements ChecksumByteBuffer {
static {
Field f = null;
- if (JavaUtils.isJavaVersionAtMost(8)) {
- try {
- f = ByteBuffer.class
- .getDeclaredField("isReadOnly");
- f.setAccessible(true);
- } catch (NoSuchFieldException e) {
- LOG.error("No isReadOnly field in ByteBuffer", e);
- }
+ try {
+ f = ByteBuffer.class
+ .getDeclaredField("isReadOnly");
+ f.setAccessible(true);
+ } catch (NoSuchFieldException e) {
+ LOG.error("No isReadOnly field in ByteBuffer", e);
}
IS_READY_ONLY_FIELD = f;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBuffer.java
index a24d39e5dac..058934c2f27 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBuffer.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBuffer.java
@@ -39,12 +39,13 @@ static ChunkBuffer allocate(int capacity) {
return allocate(capacity, 0);
}
- /** Similar to {@link ByteBuffer#allocate(int)}
+ /**
+ * Similar to {@link ByteBuffer#allocate(int)}
* except that it can specify the increment.
*
* @param increment
* the increment size so that this buffer is allocated incrementally.
- * When increment {@literal <= 0}, entire buffer is allocated in the beginning.
+ * When increment <= 0, entire buffer is allocated in the beginning.
*/
static ChunkBuffer allocate(int capacity, int increment) {
if (increment > 0 && increment < capacity) {
@@ -59,8 +60,7 @@ static ChunkBuffer wrap(ByteBuffer buffer) {
return new ChunkBufferImplWithByteBuffer(buffer);
}
- /** Wrap the given list of {@link ByteBuffer}s as a {@link ChunkBuffer},
- * with a function called when buffers are released.*/
+ /** Wrap the given list of {@link ByteBuffer}s as a {@link ChunkBuffer}. */
static ChunkBuffer wrap(List buffers) {
Objects.requireNonNull(buffers, "buffers == null");
if (buffers.size() == 1) {
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java
index ea5c5453f3f..4bd170df8e8 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java
@@ -23,7 +23,6 @@
import org.apache.hadoop.hdds.utils.db.Codec;
import org.apache.hadoop.hdds.utils.db.DelegatedCodec;
import org.apache.hadoop.hdds.utils.db.Proto3Codec;
-import org.apache.hadoop.ozone.OzoneConsts;
import java.io.IOException;
import java.util.Collections;
@@ -281,14 +280,4 @@ public void appendTo(StringBuilder sb) {
sb.append(", size=").append(size);
sb.append("]");
}
-
- public long getBlockGroupLength() {
- String lenStr = getMetadata()
- .get(OzoneConsts.BLOCK_GROUP_LEN_KEY_IN_PUT_BLOCK);
- // If we don't have the length, then it indicates a problem with the stripe.
- // All replica should carry the length, so if it is not there, we return 0,
- // which will cause us to set the length of the block to zero and not
- // attempt to reconstruct it.
- return (lenStr == null) ? 0 : Long.parseLong(lenStr);
- }
}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfoList.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfoList.java
index 832ab40d30f..fdf40af9e09 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfoList.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfoList.java
@@ -27,7 +27,7 @@
/**
* Helper class to convert between protobuf lists and Java lists of
- * {@link org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo} objects.
+ * {@link ContainerProtos.ChunkInfo} objects.
*
* This class is immutable.
*/
@@ -49,7 +49,7 @@ public ChunkInfoList(List chunks) {
}
/**
- * @return A new {@link #ChunkInfoList} created from protobuf data.
+ * @return A new {@link ChunkInfoList} created from protobuf data.
*/
public static ChunkInfoList getFromProtoBuf(
ContainerProtos.ChunkInfoList chunksProto) {
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutVersionInstanceFactory.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutVersionInstanceFactory.java
index b94dd024b2d..83e63a2a322 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutVersionInstanceFactory.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutVersionInstanceFactory.java
@@ -37,16 +37,18 @@
/**
* Generic factory which stores different instances of Type 'T' sharded by
- * a key and version. A single key can be associated with different versions
+ * a key & version. A single key can be associated with different versions
* of 'T'.
+ *
* Why does this class exist?
* A typical use case during upgrade is to have multiple versions of a class
* / method / object and chose them based on current layout
* version at runtime. Before finalizing, an older version is typically
* needed, and after finalize, a newer version is needed. This class serves
* this purpose in a generic way.
+ *
* For example, we can create a Factory to create multiple versions of
- * OMRequests sharded by Request Type and Layout Version Supported.
+ * OMRequests sharded by Request Type & Layout Version Supported.
*/
public class LayoutVersionInstanceFactory {
@@ -69,7 +71,7 @@ public class LayoutVersionInstanceFactory {
/**
* Register an instance with a given factory key (key + version).
* For safety reasons we dont allow (1) re-registering, (2) registering an
- * instance with version > SLV.
+ * instance with version > SLV.
*
* @param lvm LayoutVersionManager
* @param key VersionFactoryKey key to associate with instance.
@@ -136,15 +138,13 @@ private boolean isValid(LayoutVersionManager lvm, int version) {
}
/**
- *
* From the list of versioned instances for a given "key", this
* returns the "floor" value corresponding to the given version.
- * For example, if we have key = "CreateKey", entry -> [(1, CreateKeyV1),
- * (3, CreateKeyV2), and if the passed in key = CreateKey & version = 2, we
+ * For example, if we have key = "CreateKey", entry -> [(1, CreateKeyV1),
+ * (3, CreateKeyV2), and if the passed in key = CreateKey & version = 2, we
* return CreateKeyV1.
* Since this is a priority queue based implementation, we use a O(1) peek()
* lookup to get the current valid version.
- *
* @param lvm LayoutVersionManager
* @param key Key and Version.
* @return instance.
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutVersionManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutVersionManager.java
index a765c2c9455..3137d756e6b 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutVersionManager.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutVersionManager.java
@@ -74,6 +74,7 @@ public interface LayoutVersionManager {
/**
* Generic API for returning a registered handler for a given type.
* @param type String type
+ * @return
*/
default Object getHandler(String type) {
return null;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/UpgradeFinalizer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/UpgradeFinalizer.java
index 19c0498aa7a..44ae94870e3 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/UpgradeFinalizer.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/UpgradeFinalizer.java
@@ -50,14 +50,14 @@ public interface UpgradeFinalizer {
* Represents the current state in which the service is with regards to
* finalization after an upgrade.
* The state transitions are the following:
- * {@code ALREADY_FINALIZED} - no entry no exit from this status without restart.
+ * ALREADY_FINALIZED - no entry no exit from this status without restart.
* After an upgrade:
- * {@code FINALIZATION_REQUIRED -(finalize)-> STARTING_FINALIZATION
- * -> FINALIZATION_IN_PROGRESS -> FINALIZATION_DONE} from finalization done
+ * FINALIZATION_REQUIRED -(finalize)-> STARTING_FINALIZATION
+ * -> FINALIZATION_IN_PROGRESS -> FINALIZATION_DONE from finalization done
* there is no more move possible, after a restart the service can end up in:
- * {@code FINALIZATION_REQUIRED}, if the finalization failed and have not reached
- * {@code FINALIZATION_DONE},
- * - or it can be {@code ALREADY_FINALIZED} if the finalization was successfully done.
+ * - FINALIZATION_REQUIRED, if the finalization failed and have not reached
+ * FINALIZATION_DONE,
+ * - or it can be ALREADY_FINALIZED if the finalization was successfully done.
*/
enum Status {
ALREADY_FINALIZED,
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/VersionFactoryKey.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/VersionFactoryKey.java
index 6465cc85501..bda45f5a745 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/VersionFactoryKey.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/VersionFactoryKey.java
@@ -20,7 +20,7 @@
/**
* "Key" element to the Version specific instance factory. Currently it has 2
- * dimensions -> a 'key' string and a version. This is to support a factory
+ * dimensions -> a 'key' string and a version. This is to support a factory
* which returns an instance for a given "key" and "version".
*/
public class VersionFactoryKey {
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 9b0ff0e9625..20c1bed89be 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -860,15 +860,6 @@
The default read threshold to use memory mapped buffers.
-
- ozone.chunk.read.mapped.buffer.max.count
- 0
- OZONE, SCM, CONTAINER, PERFORMANCE
-
- The default max count of memory mapped buffers allowed for a DN.
- Default 0 means no mapped buffers allowed for data read.
-
-
ozone.scm.container.layout
FILE_PER_BLOCK
@@ -3415,6 +3406,14 @@
unhealthy will each have their own limit.
+
+ ozone.client.list.trash.keys.max
+ 1000
+ OZONE, CLIENT
+
+ The maximum number of keys to return for a list trash request.
+
+
ozone.http.basedir
@@ -3743,15 +3742,6 @@
-
- ozone.snapshot.deep.cleaning.enabled
- false
- OZONE, PERFORMANCE, OM
-
- Flag to enable/disable snapshot deep cleaning.
-
-
-
ozone.scm.event.ContainerReport.thread.pool.size
10
@@ -4234,27 +4224,12 @@
-
- ozone.hbase.enhancements.allowed
- false
- OZONE, OM
-
- When set to false, server-side HBase enhancement-related Ozone (experimental) features
- are disabled (not allowed to be enabled) regardless of whether those configs are set.
-
- Here is the list of configs and values overridden when this config is set to false:
- 1. ozone.fs.hsync.enabled = false
-
- A warning message will be printed if any of the above configs are overridden by this.
-
-
ozone.fs.hsync.enabled
false
- OZONE, CLIENT, OM
+ OZONE, CLIENT
- Enable hsync/hflush on the Ozone Manager and/or client side. Disabled by default.
- Can be enabled only when ozone.hbase.enhancements.allowed = true
+ Enable hsync/hflush. By default they are disabled.
diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationSource.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationSource.java
index 0d6c0c90878..b1a20c9aecb 100644
--- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationSource.java
+++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationSource.java
@@ -108,7 +108,7 @@ default String[] getTrimmedStrings(String name) {
/**
* Gets the configuration entries where the key contains the prefix. This
* method will strip the prefix from the key in the return Map.
- * Example: {@code somePrefix.key->value} will be {@code key->value} in the returned map.
+ * Example: somePrefix.key->value will be key->value in the returned map.
* @param keyPrefix Prefix to search.
* @return Map containing keys that match and their values.
*/
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/checksum/DNContainerOperationClient.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/checksum/DNContainerOperationClient.java
index 3c08e58f9bf..969add4a15c 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/checksum/DNContainerOperationClient.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/checksum/DNContainerOperationClient.java
@@ -33,7 +33,9 @@
import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
import org.apache.hadoop.hdds.security.SecurityConfig;
import org.apache.hadoop.hdds.security.symmetric.SecretKeySignerClient;
+import org.apache.hadoop.hdds.security.x509.certificate.client.CACertificateProvider;
import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
+import org.apache.hadoop.hdds.utils.HAUtils;
import org.apache.hadoop.ozone.OzoneSecurityUtil;
import jakarta.annotation.Nonnull;
import org.apache.hadoop.ozone.container.common.helpers.TokenHelper;
@@ -71,7 +73,11 @@ private static XceiverClientManager createClientManager(
throws IOException {
ClientTrustManager trustManager = null;
if (OzoneSecurityUtil.isSecurityEnabled(conf)) {
- trustManager = certificateClient.createClientTrustManager();
+ CACertificateProvider localCaCerts =
+ () -> HAUtils.buildCAX509List(certificateClient, conf);
+ CACertificateProvider remoteCacerts =
+ () -> HAUtils.buildCAX509List(null, conf);
+ trustManager = new ClientTrustManager(remoteCacerts, localCaCerts);
}
DatanodeConfiguration dnConf = conf.getObject(DatanodeConfiguration.class);
return new XceiverClientManager(conf,
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/checksum/ReconcileContainerTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/checksum/ReconcileContainerTask.java
index d14dc666b71..ac42efd45ad 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/checksum/ReconcileContainerTask.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/checksum/ReconcileContainerTask.java
@@ -46,16 +46,6 @@ public ReconcileContainerTask(ContainerController controller,
this.dnClient = dnClient;
}
- @Override
- protected String getMetricName() {
- return "ContainerReconciliations";
- }
-
- @Override
- protected String getMetricDescriptionSegment() {
- return "Container Reconciliations";
- }
-
@Override
public void runTask() {
long start = Time.monotonicNow();
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
index 15cc6245ddb..b5dfd07d576 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
@@ -189,6 +189,7 @@ public int containerCount() {
* Send FCR which will not contain removed containers.
*
* @param context StateContext
+ * @return
*/
public void handleVolumeFailures(StateContext context) {
AtomicBoolean failedVolume = new AtomicBoolean(false);
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
index 28aa3d8588f..417fb443eef 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
@@ -649,7 +649,7 @@ public Handler getHandler(ContainerProtos.ContainerType containerType) {
@Override
public void setClusterId(String clusterId) {
- Preconditions.checkNotNull(clusterId, "clusterId cannot be null");
+ Preconditions.checkNotNull(clusterId, "clusterId Cannot be null");
if (this.clusterId == null) {
this.clusterId = clusterId;
for (Map.Entry handlerMap : handlers.entrySet()) {
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java
index 2e11cde3d9e..d6ca2d120e6 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java
@@ -35,7 +35,7 @@
/**
* Map: containerId {@literal ->} (localId {@literal ->} {@link BlockData}).
* The outer container map does not entail locking for a better performance.
- * The inner {@code BlockDataMap} is synchronized.
+ * The inner {@link BlockDataMap} is synchronized.
*
* This class will maintain list of open keys per container when closeContainer
* command comes, it should autocommit all open keys of a open container before
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicyTemplate.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicyTemplate.java
index bb47b5b9b6f..c584ba79037 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicyTemplate.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicyTemplate.java
@@ -90,7 +90,7 @@ public final List chooseContainerForBlockDeletion(
/**
* Abstract step for ordering the container data to be deleted.
* Subclass need to implement the concrete ordering implementation
- * in descending order (more prioritized -> less prioritized)
+ * in descending order (more prioritized -> less prioritized)
* @param candidateContainers candidate containers to be ordered
*/
protected abstract void orderByDescendingPriority(
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java
index f075b6f67ca..d02bae0a35a 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java
@@ -75,6 +75,7 @@ void validateContainerCommand(
/**
* Returns the handler for the specified containerType.
* @param containerType
+ * @return
*/
Handler getHandler(ContainerProtos.ContainerType containerType);
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
index 9d157cc9912..55fcbcdb3cc 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
@@ -218,6 +218,7 @@ public DatanodeStateMachine(HddsDatanodeService hddsDatanodeService,
ReplicationSupervisorMetrics.create(supervisor);
ecReconstructionMetrics = ECReconstructionMetrics.create();
+
ecReconstructionCoordinator = new ECReconstructionCoordinator(
conf, certClient, secretKeyClient, context, ecReconstructionMetrics,
threadNamePrefix);
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java
index bc703ac6a55..8533f7384d4 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java
@@ -18,6 +18,7 @@
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutorService;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
@@ -57,11 +58,11 @@ public class CloseContainerCommandHandler implements CommandHandler {
private final AtomicLong invocationCount = new AtomicLong(0);
private final AtomicInteger queuedCount = new AtomicInteger(0);
- private final ThreadPoolExecutor executor;
+ private final ExecutorService executor;
private long totalTime;
/**
- * Constructs a close container command handler.
+ * Constructs a ContainerReport handler.
*/
public CloseContainerCommandHandler(
int threadPoolSize, int queueSize, String threadNamePrefix) {
@@ -219,14 +220,4 @@ public long getTotalRunTime() {
public int getQueuedCount() {
return queuedCount.get();
}
-
- @Override
- public int getThreadPoolMaxPoolSize() {
- return executor.getMaximumPoolSize();
- }
-
- @Override
- public int getThreadPoolActivePoolSize() {
- return executor.getActiveCount();
- }
}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java
index c3f8da74c7a..9035b79c670 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java
@@ -56,6 +56,11 @@ public final class CommandDispatcher {
private CommandDispatcher(OzoneContainer container, SCMConnectionManager
connectionManager, StateContext context,
CommandHandler... handlers) {
+ Preconditions.checkNotNull(context);
+ Preconditions.checkNotNull(handlers);
+ Preconditions.checkArgument(handlers.length > 0);
+ Preconditions.checkNotNull(container);
+ Preconditions.checkNotNull(connectionManager);
this.context = context;
this.container = container;
this.connectionManager = connectionManager;
@@ -72,7 +77,6 @@ private CommandDispatcher(OzoneContainer container, SCMConnectionManager
commandHandlerMetrics = CommandHandlerMetrics.create(handlerMap);
}
- @VisibleForTesting
public CommandHandler getCloseContainerHandler() {
return handlerMap.get(Type.closeContainerCommand);
}
@@ -197,12 +201,11 @@ public Builder setContext(StateContext stateContext) {
* @return Command Dispatcher.
*/
public CommandDispatcher build() {
- Preconditions.checkNotNull(this.connectionManager,
- "Missing scm connection manager.");
- Preconditions.checkNotNull(this.container, "Missing ozone container.");
- Preconditions.checkNotNull(this.context, "Missing state context.");
- Preconditions.checkArgument(this.handlerList.size() > 0,
- "The number of command handlers must be greater than 0.");
+ Preconditions.checkNotNull(this.connectionManager, "Missing connection" +
+ " manager.");
+ Preconditions.checkNotNull(this.container, "Missing container.");
+ Preconditions.checkNotNull(this.context, "Missing context.");
+ Preconditions.checkArgument(this.handlerList.size() > 0);
return new CommandDispatcher(this.container, this.connectionManager,
this.context, handlerList.toArray(
new CommandHandler[handlerList.size()]));
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
index bd7431c6145..747749066e3 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
@@ -168,12 +168,12 @@ public int getQueuedCount() {
@Override
public int getThreadPoolMaxPoolSize() {
- return executor.getMaximumPoolSize();
+ return ((ThreadPoolExecutor)executor).getMaximumPoolSize();
}
@Override
public int getThreadPoolActivePoolSize() {
- return executor.getActiveCount();
+ return ((ThreadPoolExecutor)executor).getActiveCount();
}
/**
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteContainerCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteContainerCommandHandler.java
index b76e306e1c0..ead81c32e5b 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteContainerCommandHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteContainerCommandHandler.java
@@ -36,6 +36,7 @@
import java.io.IOException;
import java.time.Clock;
import java.util.OptionalLong;
+import java.util.concurrent.ExecutorService;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
@@ -52,7 +53,7 @@ public class DeleteContainerCommandHandler implements CommandHandler {
private final AtomicInteger invocationCount = new AtomicInteger(0);
private final AtomicInteger timeoutCount = new AtomicInteger(0);
private final AtomicLong totalTime = new AtomicLong(0);
- private final ThreadPoolExecutor executor;
+ private final ExecutorService executor;
private final Clock clock;
private int maxQueueSize;
@@ -69,7 +70,7 @@ public DeleteContainerCommandHandler(
}
protected DeleteContainerCommandHandler(Clock clock,
- ThreadPoolExecutor executor, int queueSize) {
+ ExecutorService executor, int queueSize) {
this.executor = executor;
this.clock = clock;
maxQueueSize = queueSize;
@@ -130,7 +131,7 @@ private void handleInternal(SCMCommand command, StateContext context,
@Override
public int getQueuedCount() {
- return executor.getQueue().size();
+ return ((ThreadPoolExecutor)executor).getQueue().size();
}
@Override
@@ -159,16 +160,6 @@ public long getTotalRunTime() {
return totalTime.get();
}
- @Override
- public int getThreadPoolMaxPoolSize() {
- return executor.getMaximumPoolSize();
- }
-
- @Override
- public int getThreadPoolActivePoolSize() {
- return executor.getActiveCount();
- }
-
@Override
public void stop() {
try {
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
index caa6b9df121..b6ab4748fe3 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
@@ -495,7 +495,7 @@ public Builder setEndpointStateMachine(EndpointStateMachine rpcEndPoint) {
/**
* Sets the LayoutVersionManager.
*
- * @param lvm config
+ * @param versionMgr - config
* @return Builder
*/
public Builder setLayoutVersionManager(HDDSLayoutVersionManager lvm) {
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
index 969756b40f8..71f95cc4d32 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
@@ -244,7 +244,7 @@ public Builder setConfig(ConfigurationSource config) {
/**
* Sets the LayoutVersionManager.
*
- * @param lvm config
+ * @param versionMgr - config
* @return Builder.
*/
public Builder setLayoutVersionManager(HDDSLayoutVersionManager lvm) {
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index be566f84fc9..b3398de07ad 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
@@ -83,7 +83,6 @@
import org.apache.ratis.protocol.RaftGroupId;
import org.apache.ratis.protocol.RaftGroupMemberId;
import org.apache.ratis.protocol.RaftPeerId;
-import org.apache.ratis.protocol.RaftPeer;
import org.apache.ratis.protocol.exceptions.StateMachineException;
import org.apache.ratis.server.RaftServer;
import org.apache.ratis.server.protocol.TermIndex;
@@ -234,7 +233,7 @@ public ContainerStateMachine(HddsDatanodeService hddsDatanodeService, RaftGroupI
// cache with FIFO eviction, and if element not found, this needs
// to be obtained from disk for slow follower
stateMachineDataCache = new ResourceCache<>(
- (index, data) -> data.size(),
+ (index, data) -> ((ByteString)data).size(),
pendingRequestsBytesLimit,
(p) -> {
if (p.wasEvicted()) {
@@ -705,10 +704,9 @@ private ExecutorService getChunkExecutor(WriteChunkRequestProto req) {
return chunkExecutors.get(i);
}
- /**
- * {@link #writeStateMachineData}
- * calls are not synchronized with each other
- * and also with {@code applyTransaction(TransactionContext)}.
+ /*
+ * writeStateMachineData calls are not synchronized with each other
+ * and also with applyTransaction.
*/
@Override
public CompletableFuture write(LogEntryProto entry, TransactionContext trx) {
@@ -826,7 +824,7 @@ public CompletableFuture flush(long index) {
}
/**
- * This method is used by the Leader to read state machine data for sending appendEntries to followers.
+ * This method is used by the Leader to read state machine date for sending appendEntries to followers.
* It will first get the data from {@link #stateMachineDataCache}.
* If the data is not in the cache, it will read from the file by dispatching a command
*
@@ -1163,8 +1161,8 @@ public void evictStateMachineCache() {
}
@Override
- public void notifyFollowerSlowness(RoleInfoProto roleInfoProto, RaftPeer follower) {
- ratisServer.handleFollowerSlowness(gid, roleInfoProto, follower);
+ public void notifyFollowerSlowness(RoleInfoProto roleInfoProto) {
+ ratisServer.handleNodeSlowness(gid, roleInfoProto);
}
@Override
@@ -1199,7 +1197,7 @@ public void notifyGroupRemove() {
try {
containerController.markContainerForClose(cid);
containerController.quasiCloseContainer(cid,
- "Ratis group removed. Group id: " + gid);
+ "Ratis group removed");
} catch (IOException e) {
LOG.debug("Failed to quasi-close container {}", cid);
}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
index a4c14343985..7899cdcc0e6 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
@@ -104,7 +104,6 @@
import org.apache.ratis.server.RaftServerRpc;
import org.apache.ratis.server.protocol.TermIndex;
import org.apache.ratis.server.storage.RaftStorage;
-import org.apache.ratis.util.Preconditions;
import org.apache.ratis.util.SizeInBytes;
import org.apache.ratis.util.TimeDuration;
import org.apache.ratis.util.TraditionalBinaryPrefix;
@@ -162,18 +161,19 @@ private static long nextCallId() {
private int clientPort;
private int dataStreamPort;
private final RaftServer server;
- private final String name;
private final List chunkExecutors;
private final ContainerDispatcher dispatcher;
private final ContainerController containerController;
private final ClientId clientId = ClientId.randomId();
private final StateContext context;
+ private final long nodeFailureTimeoutMs;
private boolean isStarted = false;
private final DatanodeDetails datanodeDetails;
private final ConfigurationSource conf;
// TODO: Remove the gids set when Ratis supports an api to query active
// pipelines
private final ConcurrentMap activePipelines = new ConcurrentHashMap<>();
+ private final RaftPeerId raftPeerId;
// Timeout used while calling submitRequest directly.
private final long requestTimeout;
private final boolean shouldDeleteRatisLogDirectory;
@@ -197,14 +197,14 @@ private XceiverServerRatis(HddsDatanodeService hddsDatanodeService, DatanodeDeta
this.context = context;
this.dispatcher = dispatcher;
this.containerController = containerController;
+ this.raftPeerId = RatisHelper.toRaftPeerId(dd);
String threadNamePrefix = datanodeDetails.threadNamePrefix();
chunkExecutors = createChunkExecutors(conf, threadNamePrefix);
+ nodeFailureTimeoutMs = ratisServerConfig.getFollowerSlownessTimeout();
shouldDeleteRatisLogDirectory =
ratisServerConfig.shouldDeleteRatisLogDirectory();
RaftProperties serverProperties = newRaftProperties();
- final RaftPeerId raftPeerId = RatisHelper.toRaftPeerId(dd);
- this.name = getClass().getSimpleName() + "(" + raftPeerId + ")";
this.server =
RaftServer.newBuilder().setServerId(raftPeerId)
.setProperties(serverProperties)
@@ -474,7 +474,7 @@ private void setStateMachineDataConfigurations(RaftProperties properties) {
// NOTE : the default value for the retry count in ratis is -1,
// which means retry indefinitely.
- final int syncTimeoutRetryDefault = (int) ratisServerConfig.getFollowerSlownessTimeout() /
+ int syncTimeoutRetryDefault = (int) nodeFailureTimeoutMs /
dataSyncTimeout.toIntExact(TimeUnit.MILLISECONDS);
int numSyncRetries = conf.getInt(
OzoneConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES,
@@ -558,7 +558,7 @@ private static Parameters createTlsParameters(SecurityConfig conf,
@Override
public void start() throws IOException {
if (!isStarted) {
- LOG.info("Starting {}", name);
+ LOG.info("Starting {} {}", getClass().getSimpleName(), server.getId());
for (ThreadPoolExecutor executor : chunkExecutors) {
executor.prestartAllCoreThreads();
}
@@ -581,11 +581,11 @@ public void start() throws IOException {
}
}
- private int getRealPort(InetSocketAddress address, Port.Name portName) {
+ private int getRealPort(InetSocketAddress address, Port.Name name) {
int realPort = address.getPort();
- final Port port = DatanodeDetails.newPort(portName, realPort);
- datanodeDetails.setPort(port);
- LOG.info("{} is started using port {}", name, port);
+ datanodeDetails.setPort(DatanodeDetails.newPort(name, realPort));
+ LOG.info("{} {} is started using port {} for {}",
+ getClass().getSimpleName(), server.getId(), realPort, name);
return realPort;
}
@@ -593,7 +593,7 @@ private int getRealPort(InetSocketAddress address, Port.Name portName) {
public void stop() {
if (isStarted) {
try {
- LOG.info("Closing {}", name);
+ LOG.info("Stopping {} {}", getClass().getSimpleName(), server.getId());
// shutdown server before the executors as while shutting down,
// some of the tasks would be executed using the executors.
server.close();
@@ -602,7 +602,7 @@ public void stop() {
}
isStarted = false;
} catch (IOException e) {
- LOG.error("Failed to close {}.", name, e);
+ LOG.error("XceiverServerRatis Could not be stopped gracefully.", e);
}
}
}
@@ -706,40 +706,45 @@ private GroupInfoRequest createGroupInfoRequest(
nextCallId());
}
- private void handlePipelineFailure(RaftGroupId groupId, RoleInfoProto roleInfoProto, String reason) {
- final RaftPeerId raftPeerId = RaftPeerId.valueOf(roleInfoProto.getSelf().getId());
- Preconditions.assertEquals(getServer().getId(), raftPeerId, "raftPeerId");
- final StringBuilder b = new StringBuilder()
- .append(name).append(" with datanodeId ").append(RatisHelper.toDatanodeId(raftPeerId))
- .append("handlePipelineFailure ").append(" for ").append(reason)
- .append(": ").append(roleInfoProto.getRole())
- .append(" elapsed time=").append(roleInfoProto.getRoleElapsedTimeMs()).append("ms");
-
+ private void handlePipelineFailure(RaftGroupId groupId,
+ RoleInfoProto roleInfoProto) {
+ String msg;
+ UUID datanode = RatisHelper.toDatanodeId(roleInfoProto.getSelf());
+ RaftPeerId id = RaftPeerId.valueOf(roleInfoProto.getSelf().getId());
switch (roleInfoProto.getRole()) {
case CANDIDATE:
- final long lastLeaderElapsedTime = roleInfoProto.getCandidateInfo().getLastLeaderElapsedTimeMs();
- b.append(", lastLeaderElapsedTime=").append(lastLeaderElapsedTime).append("ms");
+ msg = datanode + " is in candidate state for " +
+ roleInfoProto.getCandidateInfo().getLastLeaderElapsedTimeMs() + "ms";
break;
case FOLLOWER:
- b.append(", outstandingOp=").append(roleInfoProto.getFollowerInfo().getOutstandingOp());
+ msg = datanode + " closes pipeline when installSnapshot from leader " +
+ "because leader snapshot doesn't contain any data to replay, " +
+ "all the log entries prior to the snapshot might have been purged." +
+ "So follower should not try to install snapshot from leader but" +
+ "can close the pipeline here. It's in follower state for " +
+ roleInfoProto.getRoleElapsedTimeMs() + "ms";
break;
case LEADER:
- final long followerSlownessTimeoutMs = ratisServerConfig.getFollowerSlownessTimeout();
- for (RaftProtos.ServerRpcProto follower : roleInfoProto.getLeaderInfo().getFollowerInfoList()) {
- final long lastRpcElapsedTimeMs = follower.getLastRpcElapsedTimeMs();
- final boolean slow = lastRpcElapsedTimeMs > followerSlownessTimeoutMs;
- final RaftPeerId followerId = RaftPeerId.valueOf(follower.getId().getId());
- b.append("\n Follower ").append(followerId)
- .append(" with datanodeId ").append(RatisHelper.toDatanodeId(followerId))
- .append(" is ").append(slow ? "slow" : " responding")
- .append(" with lastRpcElapsedTime=").append(lastRpcElapsedTimeMs).append("ms");
+ StringBuilder sb = new StringBuilder();
+ sb.append(datanode).append(" has not seen follower/s");
+ for (RaftProtos.ServerRpcProto follower : roleInfoProto.getLeaderInfo()
+ .getFollowerInfoList()) {
+ if (follower.getLastRpcElapsedTimeMs() > nodeFailureTimeoutMs) {
+ sb.append(" ").append(RatisHelper.toDatanodeId(follower.getId()))
+ .append(" for ").append(follower.getLastRpcElapsedTimeMs())
+ .append("ms");
+ }
}
+ msg = sb.toString();
break;
default:
- throw new IllegalStateException("Unexpected role " + roleInfoProto.getRole());
+ LOG.error("unknown state: {}", roleInfoProto.getRole());
+ throw new IllegalStateException("node" + id + " is in illegal role "
+ + roleInfoProto.getRole());
}
- triggerPipelineClose(groupId, b.toString(), ClosePipelineInfo.Reason.PIPELINE_FAILED);
+ triggerPipelineClose(groupId, msg,
+ ClosePipelineInfo.Reason.PIPELINE_FAILED);
}
private void triggerPipelineClose(RaftGroupId groupId, String detail,
@@ -864,12 +869,12 @@ public void removeGroup(HddsProtos.PipelineID pipelineId)
processReply(reply);
}
- void handleFollowerSlowness(RaftGroupId groupId, RoleInfoProto roleInfoProto, RaftPeer follower) {
- handlePipelineFailure(groupId, roleInfoProto, "slow follower " + follower.getId());
+ void handleNodeSlowness(RaftGroupId groupId, RoleInfoProto roleInfoProto) {
+ handlePipelineFailure(groupId, roleInfoProto);
}
void handleNoLeader(RaftGroupId groupId, RoleInfoProto roleInfoProto) {
- handlePipelineFailure(groupId, roleInfoProto, "no leader");
+ handlePipelineFailure(groupId, roleInfoProto);
}
void handleApplyTransactionFailure(RaftGroupId groupId,
@@ -896,9 +901,10 @@ void handleApplyTransactionFailure(RaftGroupId groupId,
void handleInstallSnapshotFromLeader(RaftGroupId groupId,
RoleInfoProto roleInfoProto,
TermIndex firstTermIndexInLog) {
- LOG.warn("handleInstallSnapshotFromLeader for firstTermIndexInLog={}, terminating pipeline: {}",
+ LOG.warn("Install snapshot notification received from Leader with " +
+ "termIndex: {}, terminating pipeline: {}",
firstTermIndexInLog, groupId);
- handlePipelineFailure(groupId, roleInfoProto, "install snapshot notification");
+ handlePipelineFailure(groupId, roleInfoProto);
}
/**
@@ -944,7 +950,7 @@ void handleLeaderChangedNotification(RaftGroupMemberId groupMemberId,
LOG.info("Leader change notification received for group: {} with new " +
"leaderId: {}", groupMemberId.getGroupId(), raftPeerId1);
// Save the reported leader to be sent with the report to SCM
- final boolean leaderForGroup = server.getId().equals(raftPeerId1);
+ boolean leaderForGroup = this.raftPeerId.equals(raftPeerId1);
activePipelines.compute(groupMemberId.getGroupId(),
(key, value) -> value == null ? new ActivePipelineContext(leaderForGroup, false) :
new ActivePipelineContext(leaderForGroup, value.isPendingClose()));
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
index c58aab2e5ba..b22b9148bb1 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
@@ -199,7 +199,7 @@ public void shutdown() {
/**
* Delete all files under
- * volume/hdds/cluster-id/tmp/deleted-containers.
+ * /hdds//tmp/deleted-containers.
* This is the directory where containers are moved when they are deleted
* from the system, but before being removed from the filesystem. This
* makes the deletion atomic.
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java
index 3d1be9791ec..af890269255 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java
@@ -46,18 +46,16 @@
* - fsCapacity: reported total capacity from local fs.
* - minVolumeFreeSpace (mvfs) : determines the free space for closing
containers.This is like adding a few reserved bytes to reserved space.
- Dn's will send close container action to SCM at this limit, and it is
+ Dn's will send close container action to SCM at this limit & it is
configurable.
*
- *
- * {@code
+ *
* |----used----| (avail) |++mvfs++|++++reserved+++++++|
* |<- capacity ->|
* | fsAvail |-------other-----------|
* |<- fsCapacity ->|
- * }
- *
+ *
* What we could directly get from local fs:
* fsCapacity, fsAvail, (fsUsed = fsCapacity - fsAvail)
* We could get from config:
@@ -80,13 +78,11 @@
* then we should use DedicatedDiskSpaceUsage for
* `hdds.datanode.du.factory.classname`,
* Then it is much simpler, since we don't care about other usage:
- * {@code
+ *
* |----used----| (avail)/fsAvail |
* |<- capacity/fsCapacity ->|
- * }
*
* We have avail == fsAvail.
- *
*/
public final class VolumeInfo {
@@ -157,14 +153,11 @@ public long getCapacity() {
}
/**
- *
- * {@code
* Calculate available space use method A.
* |----used----| (avail) |++++++++reserved++++++++|
* |<- capacity ->|
+ *
* A) avail = capacity - used
- * }
- *
*/
public long getAvailable() {
return usage.getAvailable();
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java
index 733dc7964f1..7e138b05716 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java
@@ -77,15 +77,11 @@ public long getUsedSpace() {
}
/**
- *
- * {@code
* Calculate available space use method B.
* |----used----| (avail) |++++++++reserved++++++++|
* | fsAvail |-------other-------|
- * ->|~~~~|<-
+ * ->|~~~~|<-
* remainingReserved
- * }
- *
* B) avail = fsAvail - Max(reserved - other, 0);
*/
public SpaceUsageSource getCurrentUsage() {
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECContainerOperationClient.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECContainerOperationClient.java
index 487e6d37b28..9dedd65565f 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECContainerOperationClient.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECContainerOperationClient.java
@@ -26,10 +26,12 @@
import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdds.scm.XceiverClientSpi;
import org.apache.hadoop.hdds.scm.client.ClientTrustManager;
+import org.apache.hadoop.hdds.security.x509.certificate.client.CACertificateProvider;
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
+import org.apache.hadoop.hdds.utils.HAUtils;
import org.apache.hadoop.ozone.OzoneSecurityUtil;
import org.apache.hadoop.ozone.container.common.helpers.BlockData;
import org.apache.hadoop.security.token.Token;
@@ -67,17 +69,21 @@ public ECContainerOperationClient(ConfigurationSource conf,
}
@Nonnull
- private static XceiverClientManager createClientManager(ConfigurationSource conf, CertificateClient certificateClient)
+ private static XceiverClientManager createClientManager(
+ ConfigurationSource conf, CertificateClient certificateClient)
throws IOException {
ClientTrustManager trustManager = null;
if (OzoneSecurityUtil.isSecurityEnabled(conf)) {
- trustManager = certificateClient.createClientTrustManager();
+ CACertificateProvider localCaCerts =
+ () -> HAUtils.buildCAX509List(certificateClient, conf);
+ CACertificateProvider remoteCacerts =
+ () -> HAUtils.buildCAX509List(null, conf);
+ trustManager = new ClientTrustManager(remoteCacerts, localCaCerts);
}
- XceiverClientManager.ScmClientConfig scmClientConfig = new XceiverClientManager.XceiverClientManagerConfigBuilder()
- .setMaxCacheSize(256)
- .setStaleThresholdMs(10 * 1000)
- .build();
- return new XceiverClientManager(conf, scmClientConfig, trustManager);
+ return new XceiverClientManager(conf,
+ new XceiverClientManager.XceiverClientManagerConfigBuilder()
+ .setMaxCacheSize(256).setStaleThresholdMs(10 * 1000).build(),
+ trustManager);
}
public BlockData[] listBlock(long containerId, DatanodeDetails dn,
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java
index f1e1d0d900b..7e64766b41c 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java
@@ -42,6 +42,7 @@
import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.hadoop.io.ByteBufferPool;
import org.apache.hadoop.io.ElasticByteBufferPool;
+import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.client.io.BlockInputStreamFactory;
import org.apache.hadoop.ozone.client.io.BlockInputStreamFactoryImpl;
import org.apache.hadoop.ozone.client.io.ECBlockInputStreamProxy;
@@ -370,7 +371,7 @@ private void logBlockGroupDetails(BlockLocationInfo blockLocationInfo,
.append(" block length: ")
.append(data.getSize())
.append(" block group length: ")
- .append(data.getBlockGroupLength())
+ .append(getBlockDataLength(data))
.append(" chunk list: \n");
int cnt = 0;
for (ContainerProtos.ChunkInfo chunkInfo : data.getChunks()) {
@@ -572,7 +573,7 @@ private long calcEffectiveBlockGroupLen(BlockData[] blockGroup,
continue;
}
- long putBlockLen = blockGroup[i].getBlockGroupLength();
+ long putBlockLen = getBlockDataLength(blockGroup[i]);
// Use safe length is the minimum of the lengths recorded across the
// stripe
blockGroupLen = Math.min(putBlockLen, blockGroupLen);
@@ -580,6 +581,16 @@ private long calcEffectiveBlockGroupLen(BlockData[] blockGroup,
return blockGroupLen == Long.MAX_VALUE ? 0 : blockGroupLen;
}
+ private long getBlockDataLength(BlockData blockData) {
+ String lenStr = blockData.getMetadata()
+ .get(OzoneConsts.BLOCK_GROUP_LEN_KEY_IN_PUT_BLOCK);
+ // If we don't have the length, then it indicates a problem with the stripe.
+ // All replica should carry the length, so if it is not there, we return 0,
+ // which will cause us to set the length of the block to zero and not
+ // attempt to reconstruct it.
+ return (lenStr == null) ? 0 : Long.parseLong(lenStr);
+ }
+
public ECReconstructionMetrics getECReconstructionMetrics() {
return this.metrics;
}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinatorTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinatorTask.java
index a50a125f6d4..6d32f3a3f3e 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinatorTask.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinatorTask.java
@@ -46,16 +46,6 @@ public ECReconstructionCoordinatorTask(
debugString = reconstructionCommandInfo.toString();
}
- @Override
- public String getMetricName() {
- return "ECReconstructions";
- }
-
- @Override
- public String getMetricDescriptionSegment() {
- return "EC reconstructions";
- }
-
@Override
public void runTask() {
// Implement the coordinator logic to handle a container group
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
index b4ff62e52d2..cea6737c7c9 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
@@ -935,6 +935,7 @@ private ContainerReplicaProto.State getHddsState()
/**
* Returns container DB file.
+ * @return
*/
public File getContainerDBFile() {
return KeyValueContainerLocationUtil.getContainerDBFile(containerData);
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
index 708038bd13f..ccc24dad0f9 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
@@ -431,6 +431,7 @@ public KeyPrefixFilter getDeletingBlockKeyFilter() {
/**
* Schema v3 use a prefix as startKey,
* for other schemas just return null.
+ * @return
*/
public String startKeyEmpty() {
if (hasSchema(SCHEMA_V3)) {
@@ -442,6 +443,7 @@ public String startKeyEmpty() {
/**
* Schema v3 use containerID as key prefix,
* for other schemas just return null.
+ * @return
*/
public String containerPrefix() {
if (hasSchema(SCHEMA_V3)) {
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index d587748e6f8..d1028727648 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -137,7 +137,6 @@
import org.apache.hadoop.ozone.container.common.interfaces.ScanResult;
import static org.apache.hadoop.ozone.ClientVersion.EC_REPLICA_INDEX_REQUIRED_IN_BLOCK_REQUEST;
-import static org.apache.hadoop.ozone.OzoneConsts.INCREMENTAL_CHUNK_LIST;
import org.apache.hadoop.util.Time;
import org.apache.ratis.statemachine.StateMachine;
@@ -596,13 +595,9 @@ ContainerCommandResponseProto handlePutBlock(
boolean endOfBlock = false;
if (!request.getPutBlock().hasEof() || request.getPutBlock().getEof()) {
- // There are two cases where client sends empty put block with eof.
- // (1) An EC empty file. In this case, the block/chunk file does not exist,
- // so no need to flush/close the file.
- // (2) Ratis output stream in incremental chunk list mode may send empty put block
- // to close the block, in which case we need to flush/close the file.
- if (!request.getPutBlock().getBlockData().getChunksList().isEmpty() ||
- blockData.getMetadata().containsKey(INCREMENTAL_CHUNK_LIST)) {
+ // in EC, we will be doing empty put block.
+ // So, let's flush only when there are any chunks
+ if (!request.getPutBlock().getBlockData().getChunksList().isEmpty()) {
chunkManager.finishWriteChunks(kvContainer, blockData);
}
endOfBlock = true;
@@ -997,9 +992,6 @@ ContainerCommandResponseProto handleWriteChunk(
// of order.
blockData.setBlockCommitSequenceId(dispatcherContext.getLogIndex());
boolean eob = writeChunk.getBlock().getEof();
- if (eob) {
- chunkManager.finishWriteChunks(kvContainer, blockData);
- }
blockManager.putBlock(kvContainer, blockData, eob);
blockDataProto = blockData.getProtoBufMessage();
final long numBytes = blockDataProto.getSerializedSize();
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java
index 945efbcf6ea..7773b54f794 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java
@@ -99,6 +99,7 @@ public static DatanodeStore getUncachedDatanodeStore(
* opened by this thread, the other thread will get a RocksDB exception.
* @param containerData The container data
* @param conf Configuration
+ * @return
* @throws IOException
*/
public static DatanodeStore getUncachedDatanodeStore(
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
index dc048ac16aa..0fac45571c7 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
@@ -39,7 +39,6 @@
import java.util.EnumSet;
import java.util.List;
import java.util.Set;
-import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.function.ToLongFunction;
@@ -51,7 +50,6 @@
import org.apache.hadoop.ozone.common.utils.BufferUtils;
import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
-import org.apache.hadoop.ozone.container.keyvalue.impl.MappedBufferManager;
import org.apache.hadoop.util.Time;
import com.google.common.annotations.VisibleForTesting;
@@ -202,12 +200,11 @@ private static long writeDataToChannel(FileChannel channel, ChunkBuffer data,
}
}
- @SuppressWarnings("checkstyle:parameternumber")
public static ChunkBuffer readData(long len, int bufferCapacity,
- File file, long off, HddsVolume volume, int readMappedBufferThreshold, boolean mmapEnabled,
- MappedBufferManager mappedBufferManager) throws StorageContainerException {
- if (mmapEnabled && len > readMappedBufferThreshold && bufferCapacity > readMappedBufferThreshold) {
- return readData(file, bufferCapacity, off, len, volume, mappedBufferManager);
+ File file, long off, HddsVolume volume, int readMappedBufferThreshold)
+ throws StorageContainerException {
+ if (len > readMappedBufferThreshold) {
+ return readData(file, bufferCapacity, off, len, volume);
} else if (len == 0) {
return ChunkBuffer.wrap(Collections.emptyList());
}
@@ -259,52 +256,25 @@ private static void readData(File file, long offset, long len,
* @return a list of {@link MappedByteBuffer} containing the data.
*/
private static ChunkBuffer readData(File file, int chunkSize,
- long offset, long length, HddsVolume volume, MappedBufferManager mappedBufferManager)
+ long offset, long length, HddsVolume volume)
throws StorageContainerException {
- final int bufferNum = Math.toIntExact((length - 1) / chunkSize) + 1;
- if (!mappedBufferManager.getQuota(bufferNum)) {
- // proceed with normal buffer
- final ByteBuffer[] buffers = BufferUtils.assignByteBuffers(length,
- chunkSize);
- readData(file, offset, length, c -> c.position(offset).read(buffers), volume);
- Arrays.stream(buffers).forEach(ByteBuffer::flip);
- return ChunkBuffer.wrap(Arrays.asList(buffers));
- } else {
- try {
- // proceed with mapped buffer
- final List buffers = new ArrayList<>(bufferNum);
- readData(file, offset, length, channel -> {
- long readLen = 0;
- while (readLen < length) {
- final int n = Math.toIntExact(Math.min(length - readLen, chunkSize));
- final long finalOffset = offset + readLen;
- final AtomicReference exception = new AtomicReference<>();
- ByteBuffer mapped = mappedBufferManager.computeIfAbsent(file.getAbsolutePath(), finalOffset, n,
- () -> {
- try {
- return channel.map(FileChannel.MapMode.READ_ONLY, finalOffset, n);
- } catch (IOException e) {
- LOG.error("Failed to map file {} with offset {} and length {}", file, finalOffset, n);
- exception.set(e);
- return null;
- }
- });
- if (mapped == null) {
- throw exception.get();
- }
- LOG.debug("mapped: offset={}, readLen={}, n={}, {}", finalOffset, readLen, n, mapped.getClass());
- readLen += mapped.remaining();
- buffers.add(mapped);
- }
- return readLen;
- }, volume);
- return ChunkBuffer.wrap(buffers);
- } catch (Throwable e) {
- mappedBufferManager.releaseQuota(bufferNum);
- throw e;
+ final List buffers = new ArrayList<>(
+ Math.toIntExact((length - 1) / chunkSize) + 1);
+ readData(file, offset, length, channel -> {
+ long readLen = 0;
+ while (readLen < length) {
+ final int n = Math.toIntExact(Math.min(length - readLen, chunkSize));
+ final ByteBuffer mapped = channel.map(
+ FileChannel.MapMode.READ_ONLY, offset + readLen, n);
+ LOG.debug("mapped: offset={}, readLen={}, n={}, {}",
+ offset, readLen, n, mapped.getClass());
+ readLen += mapped.remaining();
+ buffers.add(mapped);
}
- }
+ return readLen;
+ }, volume);
+ return ChunkBuffer.wrap(buffers);
}
/**
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
index dd719a81fb3..b287d9ac133 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
@@ -439,13 +439,13 @@ public static boolean isSameSchemaVersion(String schema, String other) {
/**
* Moves container directory to a new location
- * under "volume/hdds/cluster-id/tmp/deleted-containers"
+ * under "/hdds//tmp/deleted-containers"
* and updates metadata and chunks path.
* Containers will be moved under it before getting deleted
* to avoid, in case of failure, having artifact leftovers
* on the default container path on the disk.
*
- * Delete operation for Schema < V3
+ * Delete operation for Schema < V3
* 1. Container is marked DELETED
* 2. Container is removed from memory container set
* 3. Container DB handler from cache is removed and closed
@@ -460,6 +460,7 @@ public static boolean isSameSchemaVersion(String schema, String other) {
* 5. Container is deleted from tmp directory.
*
* @param keyValueContainerData
+ * @return true if renaming was successful
*/
public static void moveToDeletedContainerDir(
KeyValueContainerData keyValueContainerData,
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
index 6232b843567..7b3852011d3 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
@@ -64,7 +64,6 @@ public class BlockManagerImpl implements BlockManager {
// Default Read Buffer capacity when Checksum is not present
private final int defaultReadBufferCapacity;
private final int readMappedBufferThreshold;
- private final int readMappedBufferMaxCount;
/**
* Constructs a Block Manager.
@@ -80,9 +79,6 @@ public BlockManagerImpl(ConfigurationSource conf) {
this.readMappedBufferThreshold = config.getBufferSize(
ScmConfigKeys.OZONE_CHUNK_READ_MAPPED_BUFFER_THRESHOLD_KEY,
ScmConfigKeys.OZONE_CHUNK_READ_MAPPED_BUFFER_THRESHOLD_DEFAULT);
- this.readMappedBufferMaxCount = config.getInt(
- ScmConfigKeys.OZONE_CHUNK_READ_MAPPED_BUFFER_MAX_COUNT_KEY,
- ScmConfigKeys.OZONE_CHUNK_READ_MAPPED_BUFFER_MAX_COUNT_DEFAULT);
}
@Override
@@ -308,11 +304,6 @@ public int getReadMappedBufferThreshold() {
return readMappedBufferThreshold;
}
- /** @return the max count of memory mapped buffers for read. */
- public int getReadMappedBufferMaxCount() {
- return readMappedBufferMaxCount;
- }
-
/**
* Deletes an existing block.
* As Deletion is handled by BlockDeletingService,
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java
index aa5d52f3cee..288a2d3e331 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java
@@ -46,6 +46,7 @@ private ChunkManagerFactory() {
* @param conf Configuration
* @param manager This parameter will be used only for read data of
* FILE_PER_CHUNK layout file. Can be null for other cases.
+ * @return
*/
public static ChunkManager createChunkManager(ConfigurationSource conf,
BlockManager manager, VolumeSet volSet) {
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java
index 4ca578d7717..a87b184ccec 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java
@@ -75,8 +75,6 @@ public class FilePerBlockStrategy implements ChunkManager {
private final OpenFiles files = new OpenFiles();
private final int defaultReadBufferCapacity;
private final int readMappedBufferThreshold;
- private final int readMappedBufferMaxCount;
- private final MappedBufferManager mappedBufferManager;
private final VolumeSet volumeSet;
public FilePerBlockStrategy(boolean sync, BlockManager manager,
@@ -86,15 +84,7 @@ public FilePerBlockStrategy(boolean sync, BlockManager manager,
manager.getDefaultReadBufferCapacity();
this.readMappedBufferThreshold = manager == null ? 0
: manager.getReadMappedBufferThreshold();
- this.readMappedBufferMaxCount = manager == null ? 0
- : manager.getReadMappedBufferMaxCount();
- LOG.info("ozone.chunk.read.mapped.buffer.max.count is load with {}", readMappedBufferMaxCount);
this.volumeSet = volSet;
- if (this.readMappedBufferMaxCount > 0) {
- mappedBufferManager = new MappedBufferManager(this.readMappedBufferMaxCount);
- } else {
- mappedBufferManager = null;
- }
}
private static void checkLayoutVersion(Container container) {
@@ -202,10 +192,10 @@ public ChunkBuffer readChunk(Container container, BlockID blockID,
final long len = info.getLen();
long offset = info.getOffset();
- int bufferCapacity = ChunkManager.getBufferCapacityForChunkRead(info,
+ int bufferCapacity = ChunkManager.getBufferCapacityForChunkRead(info,
defaultReadBufferCapacity);
return ChunkUtils.readData(len, bufferCapacity, chunkFile, offset, volume,
- readMappedBufferThreshold, readMappedBufferMaxCount > 0, mappedBufferManager);
+ readMappedBufferThreshold);
}
@Override
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java
index 6ac88cad7f5..a649f573bf0 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java
@@ -67,8 +67,6 @@ public class FilePerChunkStrategy implements ChunkManager {
private final BlockManager blockManager;
private final int defaultReadBufferCapacity;
private final int readMappedBufferThreshold;
- private final int readMappedBufferMaxCount;
- private final MappedBufferManager mappedBufferManager;
private final VolumeSet volumeSet;
public FilePerChunkStrategy(boolean sync, BlockManager manager,
@@ -79,15 +77,7 @@ public FilePerChunkStrategy(boolean sync, BlockManager manager,
manager.getDefaultReadBufferCapacity();
this.readMappedBufferThreshold = manager == null ? 0
: manager.getReadMappedBufferThreshold();
- this.readMappedBufferMaxCount = manager == null ? 0
- : manager.getReadMappedBufferMaxCount();
- LOG.info("ozone.chunk.read.mapped.buffer.max.count is load with {}", readMappedBufferMaxCount);
this.volumeSet = volSet;
- if (this.readMappedBufferMaxCount > 0) {
- mappedBufferManager = new MappedBufferManager(this.readMappedBufferMaxCount);
- } else {
- mappedBufferManager = null;
- }
}
private static void checkLayoutVersion(Container container) {
@@ -275,7 +265,7 @@ public ChunkBuffer readChunk(Container container, BlockID blockID,
long offset = info.getOffset() - chunkFileOffset;
Preconditions.checkState(offset >= 0);
return ChunkUtils.readData(len, bufferCapacity, file, offset, volume,
- readMappedBufferThreshold, readMappedBufferMaxCount > 0, mappedBufferManager);
+ readMappedBufferThreshold);
}
} catch (StorageContainerException ex) {
//UNABLE TO FIND chunk is not a problem as we will try with the
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/MappedBufferManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/MappedBufferManager.java
deleted file mode 100644
index be2751925c7..00000000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/MappedBufferManager.java
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.keyvalue.impl;
-
-import com.google.common.util.concurrent.Striped;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.lang.ref.WeakReference;
-import java.nio.ByteBuffer;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.Semaphore;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.locks.Lock;
-import java.util.function.Supplier;
-
-/**
- * A Manager who manages the mapped buffers to under a predefined total count, also support reuse mapped buffers.
- */
-public class MappedBufferManager {
-
- private static ConcurrentHashMap> mappedBuffers =
- new ConcurrentHashMap>();
- private static final Logger LOG = LoggerFactory.getLogger(MappedBufferManager.class);
- private final Semaphore semaphore;
- private final int capacity;
- private final AtomicBoolean cleanupInProgress = new AtomicBoolean(false);
- private final Striped lock;
-
- public MappedBufferManager(int capacity) {
- this.capacity = capacity;
- this.semaphore = new Semaphore(capacity);
- this.lock = Striped.lazyWeakLock(1024);
- }
-
- public boolean getQuota(int permits) {
- boolean ret = semaphore.tryAcquire(permits);
- if (ret) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("quota is decreased by {} to total {}", permits, semaphore.availablePermits());
- }
- } else {
- if (cleanupInProgress.compareAndSet(false, true)) {
- CompletableFuture.runAsync(() -> {
- int p = 0;
- try {
- for (String key : mappedBuffers.keySet()) {
- ByteBuffer buf = mappedBuffers.get(key).get();
- if (buf == null) {
- mappedBuffers.remove(key);
- p++;
- }
- }
- if (p > 0) {
- releaseQuota(p);
- }
- } finally {
- cleanupInProgress.set(false);
- }
- });
- }
- }
- return ret;
- }
-
- public void releaseQuota(int permits) {
- semaphore.release(permits);
- if (LOG.isDebugEnabled()) {
- LOG.debug("quota is increased by {} to total {}", permits, semaphore.availablePermits());
- }
- }
-
- public int availableQuota() {
- return semaphore.availablePermits();
- }
-
- public ByteBuffer computeIfAbsent(String file, long position, long size,
- Supplier supplier) {
- String key = file + "-" + position + "-" + size;
- Lock fileLock = lock.get(key);
- fileLock.lock();
- try {
- WeakReference refer = mappedBuffers.get(key);
- if (refer != null && refer.get() != null) {
- // reuse the mapped buffer
- if (LOG.isDebugEnabled()) {
- LOG.debug("find buffer for key {}", key);
- }
- releaseQuota(1);
- return refer.get();
- }
-
- ByteBuffer buffer = supplier.get();
- if (buffer != null) {
- mappedBuffers.put(key, new WeakReference<>(buffer));
- if (LOG.isDebugEnabled()) {
- LOG.debug("add buffer for key {}", key);
- }
- }
- return buffer;
- } finally {
- fileLock.unlock();
- }
- }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/StreamDataChannelBase.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/StreamDataChannelBase.java
index 601e7b2712c..8df856d4b93 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/StreamDataChannelBase.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/StreamDataChannelBase.java
@@ -99,9 +99,7 @@ public void setLinked() {
linked.set(true);
}
- /**
- * @return true if {@link org.apache.ratis.statemachine.StateMachine.DataChannel} is already linked.
- */
+ /** @return true iff {@link StateMachine.DataChannel} is already linked. */
public boolean cleanUp() {
if (linked.get()) {
// already linked, nothing to do.
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java
index 256d357a31d..6dd8590bdf3 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java
@@ -99,9 +99,6 @@ void finalizeBlock(Container container, BlockID blockId)
/** @return the threshold to read using memory mapped buffers. */
int getReadMappedBufferThreshold();
- /** @return the max count of memory mapped buffers to read. */
- int getReadMappedBufferMaxCount();
-
/**
* Shutdown ContainerManager.
*/
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java
index 88aeb3c174d..26719d7f035 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java
@@ -300,9 +300,9 @@ protected static void checkTableStatus(Table, ?> table, String name)
/**
* Block Iterator for KeyValue Container. This block iterator returns blocks
- * which match with the {@link org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter}. If no
+ * which match with the {@link MetadataKeyFilters.KeyPrefixFilter}. If no
* filter is specified, then default filter used is
- * {@link org.apache.hadoop.hdds.utils.MetadataKeyFilters#getUnprefixedKeyFilter()}
+ * {@link MetadataKeyFilters#getUnprefixedKeyFilter()}
*/
@InterfaceAudience.Public
public static class KeyValueBlockIterator implements
@@ -405,9 +405,9 @@ public void close() throws IOException {
/**
* Block localId Iterator for KeyValue Container.
* This Block localId iterator returns localIds
- * which match with the {@link org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter}. If no
+ * which match with the {@link MetadataKeyFilters.KeyPrefixFilter}. If no
* filter is specified, then default filter used is
- * {@link org.apache.hadoop.hdds.utils.MetadataKeyFilters#getUnprefixedKeyFilter()}
+ * {@link MetadataKeyFilters#getUnprefixedKeyFilter()}
*/
@InterfaceAudience.Public
public static class KeyValueBlockLocalIdIterator implements
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneChunkInfoListCodec.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneChunkInfoListCodec.java
index 1be5a3819c8..4beb2075432 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneChunkInfoListCodec.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneChunkInfoListCodec.java
@@ -25,8 +25,7 @@
import java.io.IOException;
/**
- * Codec for parsing {@link org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfoList}
- * objects from data
+ * Codec for parsing {@link ContainerProtos.ChunkInfoList} objects from data
* that may have been written using schema version one. Before upgrading
* schema versions, deleted block IDs were stored with a duplicate copy of
* their ID as the value in the database. After upgrading the code, any
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java
index 84ddba759fe..a49cb7278a7 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java
@@ -167,6 +167,7 @@ public void closeContainer(final long containerId) throws IOException {
* Returns the Container given a container id.
*
* @param containerId ID of the container
+ * @return Container
*/
public void addFinalizedBlock(final long containerId,
final long localId) {
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
index 8ae838a7e53..cb7db07c24f 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
@@ -483,10 +483,10 @@ public void start(String clusterId) throws IOException {
replicationServer.start();
datanodeDetails.setPort(Name.REPLICATION, replicationServer.getPort());
- hddsDispatcher.init();
- hddsDispatcher.setClusterId(clusterId);
writeChannel.start();
readChannel.start();
+ hddsDispatcher.init();
+ hddsDispatcher.setClusterId(clusterId);
blockDeletingService.start();
recoveringContainerScrubbingService.start();
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/AbstractReplicationTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/AbstractReplicationTask.java
index f4bf54a3d82..72fa88b35d9 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/AbstractReplicationTask.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/AbstractReplicationTask.java
@@ -70,10 +70,6 @@ protected AbstractReplicationTask(long containerID,
this.term = term;
queued = Instant.now(clock);
}
-
- protected abstract String getMetricName();
-
- protected abstract String getMetricDescriptionSegment();
public long getContainerId() {
return containerId;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java
index 92ff4b6d8d6..5ceea125e81 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java
@@ -26,7 +26,6 @@
import java.util.Objects;
import java.util.OptionalLong;
import java.util.Set;
-import java.util.Collections;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.PriorityBlockingQueue;
@@ -72,17 +71,11 @@ public final class ReplicationSupervisor {
private final StateContext context;
private final Clock clock;
- private final Map requestCounter = new ConcurrentHashMap<>();
- private final Map successCounter = new ConcurrentHashMap<>();
- private final Map failureCounter = new ConcurrentHashMap<>();
- private final Map timeoutCounter = new ConcurrentHashMap<>();
- private final Map skippedCounter = new ConcurrentHashMap<>();
-
- private static final Map METRICS_MAP;
-
- static {
- METRICS_MAP = new HashMap<>();
- }
+ private final AtomicLong requestCounter = new AtomicLong();
+ private final AtomicLong successCounter = new AtomicLong();
+ private final AtomicLong failureCounter = new AtomicLong();
+ private final AtomicLong timeoutCounter = new AtomicLong();
+ private final AtomicLong skippedCounter = new AtomicLong();
/**
* A set of container IDs that are currently being downloaded
@@ -195,10 +188,6 @@ public static Builder newBuilder() {
return new Builder();
}
- public static Map getMetricsMap() {
- return Collections.unmodifiableMap(METRICS_MAP);
- }
-
private ReplicationSupervisor(StateContext context, ExecutorService executor,
ReplicationConfig replicationConfig, DatanodeConfiguration datanodeConfig,
Clock clock, IntConsumer executorThreadUpdater) {
@@ -232,19 +221,6 @@ public void addTask(AbstractReplicationTask task) {
return;
}
- if (requestCounter.get(task.getMetricName()) == null) {
- synchronized (this) {
- if (requestCounter.get(task.getMetricName()) == null) {
- requestCounter.put(task.getMetricName(), new AtomicLong(0));
- successCounter.put(task.getMetricName(), new AtomicLong(0));
- failureCounter.put(task.getMetricName(), new AtomicLong(0));
- timeoutCounter.put(task.getMetricName(), new AtomicLong(0));
- skippedCounter.put(task.getMetricName(), new AtomicLong(0));
- METRICS_MAP.put(task.getMetricName(), task.getMetricDescriptionSegment());
- }
- }
- }
-
if (inFlight.add(task)) {
if (task.getPriority() != ReplicationCommandPriority.LOW) {
// Low priority tasks are not included in the replication queue sizes
@@ -354,14 +330,14 @@ public TaskRunner(AbstractReplicationTask task) {
@Override
public void run() {
try {
- requestCounter.get(task.getMetricName()).incrementAndGet();
+ requestCounter.incrementAndGet();
final long now = clock.millis();
final long deadline = task.getDeadline();
if (deadline > 0 && now > deadline) {
LOG.info("Ignoring {} since the deadline has passed ({} < {})",
this, Instant.ofEpochMilli(deadline), Instant.ofEpochMilli(now));
- timeoutCounter.get(task.getMetricName()).incrementAndGet();
+ timeoutCounter.incrementAndGet();
return;
}
@@ -388,18 +364,18 @@ public void run() {
task.runTask();
if (task.getStatus() == Status.FAILED) {
LOG.warn("Failed {}", this);
- failureCounter.get(task.getMetricName()).incrementAndGet();
+ failureCounter.incrementAndGet();
} else if (task.getStatus() == Status.DONE) {
LOG.info("Successful {}", this);
- successCounter.get(task.getMetricName()).incrementAndGet();
+ successCounter.incrementAndGet();
} else if (task.getStatus() == Status.SKIPPED) {
LOG.info("Skipped {}", this);
- skippedCounter.get(task.getMetricName()).incrementAndGet();
+ skippedCounter.incrementAndGet();
}
} catch (Exception e) {
task.setStatus(Status.FAILED);
LOG.warn("Failed {}", this, e);
- failureCounter.get(task.getMetricName()).incrementAndGet();
+ failureCounter.incrementAndGet();
} finally {
inFlight.remove(task);
decrementTaskCounter(task);
@@ -443,12 +419,7 @@ public boolean equals(Object o) {
}
public long getReplicationRequestCount() {
- return getCount(requestCounter);
- }
-
- public long getReplicationRequestCount(String metricsName) {
- AtomicLong counter = requestCounter.get(metricsName);
- return counter != null ? counter.get() : 0;
+ return requestCounter.get();
}
public long getQueueSize() {
@@ -467,48 +438,20 @@ public long getMaxReplicationStreams() {
}
}
- private long getCount(Map counter) {
- long total = 0;
- for (Map.Entry entry : counter.entrySet()) {
- total += entry.getValue().get();
- }
- return total;
- }
-
public long getReplicationSuccessCount() {
- return getCount(successCounter);
- }
-
- public long getReplicationSuccessCount(String metricsName) {
- AtomicLong counter = successCounter.get(metricsName);
- return counter != null ? counter.get() : 0;
+ return successCounter.get();
}
public long getReplicationFailureCount() {
- return getCount(failureCounter);
- }
-
- public long getReplicationFailureCount(String metricsName) {
- AtomicLong counter = failureCounter.get(metricsName);
- return counter != null ? counter.get() : 0;
+ return failureCounter.get();
}
public long getReplicationTimeoutCount() {
- return getCount(timeoutCounter);
- }
-
- public long getReplicationTimeoutCount(String metricsName) {
- AtomicLong counter = timeoutCounter.get(metricsName);
- return counter != null ? counter.get() : 0;
+ return timeoutCounter.get();
}
public long getReplicationSkippedCount() {
- return getCount(skippedCounter);
- }
-
- public long getReplicationSkippedCount(String metricsName) {
- AtomicLong counter = skippedCounter.get(metricsName);
- return counter != null ? counter.get() : 0;
+ return skippedCounter.get();
}
}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisorMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisorMetrics.java
index a1763976af9..671e985d7ad 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisorMetrics.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisorMetrics.java
@@ -71,47 +71,16 @@ public void getMetrics(MetricsCollector collector, boolean all) {
.addGauge(Interns.info("numRequestedReplications",
"Number of requested replications"),
supervisor.getReplicationRequestCount())
- .addGauge(Interns.info("numSuccessReplications",
- "Number of successful replications"),
- supervisor.getReplicationSuccessCount())
- .addGauge(Interns.info("numFailureReplications",
- "Number of failure replications"),
- supervisor.getReplicationFailureCount())
.addGauge(Interns.info("numTimeoutReplications",
"Number of replication requests timed out before being processed"),
supervisor.getReplicationTimeoutCount())
.addGauge(Interns.info("numSkippedReplications",
"Number of replication requests skipped as the container is "
- + "already present"),
- supervisor.getReplicationSkippedCount())
+ + "already present"), supervisor.getReplicationSkippedCount())
.addGauge(Interns.info("maxReplicationStreams", "Maximum number of "
+ "concurrent replication tasks which can run simultaneously"),
supervisor.getMaxReplicationStreams());
- Map metricsMap = ReplicationSupervisor.getMetricsMap();
- if (!metricsMap.isEmpty()) {
- metricsMap.forEach((metricsName, descriptionSegment) -> {
- if (!metricsName.equals("")) {
- builder.addGauge(Interns.info("numRequested" + metricsName,
- "Number of requested " + descriptionSegment),
- supervisor.getReplicationRequestCount(metricsName))
- .addGauge(Interns.info("numSuccess" + metricsName,
- "Number of successful " + descriptionSegment),
- supervisor.getReplicationSuccessCount(metricsName))
- .addGauge(Interns.info("numFailure" + metricsName,
- "Number of failure " + descriptionSegment),
- supervisor.getReplicationFailureCount(metricsName))
- .addGauge(Interns.info("numTimeout" + metricsName,
- "Number of " + descriptionSegment + " timed out before being processed"),
- supervisor.getReplicationTimeoutCount(metricsName))
- .addGauge(Interns.info("numSkipped" + metricsName,
- "Number of " + descriptionSegment + " skipped as the container is "
- + "already present"),
- supervisor.getReplicationSkippedCount(metricsName));
- }
- });
- }
-
Map tasks = supervisor.getInFlightReplicationSummary();
for (Map.Entry entry : tasks.entrySet()) {
builder.addGauge(Interns.info("numInflight" + entry.getKey(),
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationTask.java
index 2168f324c24..ca0ca98906c 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationTask.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationTask.java
@@ -65,16 +65,6 @@ protected ReplicationTask(
replicator);
}
- @Override
- public String getMetricName() {
- return "ContainerReplications";
- }
-
- @Override
- public String getMetricDescriptionSegment() {
- return "container replications";
- }
-
@Override
public boolean equals(Object o) {
if (this == o) {
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/stream/StreamingSource.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/stream/StreamingSource.java
index e49f3c3d6e5..5fdfc931b99 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/stream/StreamingSource.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/stream/StreamingSource.java
@@ -27,9 +27,9 @@ public interface StreamingSource {
/**
*
- * @param id custom identifier
+ * @param id: custom identifier
*
- * @return map of files which should be copied (logical name -> real path)
+ * @return map of files which should be copied (logical name -> real path)
*/
Map getFilesToStream(String id) throws InterruptedException;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReconstructECContainersCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReconstructECContainersCommand.java
index ada80c980f6..f6633cb9d37 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReconstructECContainersCommand.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReconstructECContainersCommand.java
@@ -17,13 +17,13 @@
*/
package org.apache.hadoop.ozone.protocol.commands;
-import java.util.Arrays;
import java.util.List;
import java.util.Objects;
import java.util.stream.Collectors;
import com.google.protobuf.ByteString;
import org.apache.hadoop.hdds.HddsIdFactory;
+import org.apache.hadoop.hdds.StringUtils;
import org.apache.hadoop.hdds.client.ECReplicationConfig;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
@@ -140,7 +140,7 @@ public String toString() {
.collect(Collectors.joining(", "))).append("]")
.append(", targets: ").append(getTargetDatanodes())
.append(", missingIndexes: ").append(
- Arrays.toString(missingContainerIndexes.toByteArray()));
+ StringUtils.bytes2String(missingContainerIndexes.asReadOnlyByteBuffer()));
return sb.toString();
}
/**
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java
index d6b44f2a641..eeb99b5a3db 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java
@@ -122,6 +122,7 @@ private SCMDatanodeResponse submitRequest(Type type,
/**
* Returns SCM version.
*
+ * @param unused - set to null and unused.
* @return Version info.
*/
@Override
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java
index a3b60aa36da..219645c8edc 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java
@@ -35,7 +35,6 @@
import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
import org.apache.ozone.test.GenericTestUtils;
-import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.util.UUID;
@@ -44,8 +43,6 @@
import static org.apache.hadoop.ozone.OzoneConsts.GB;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.eq;
import static org.mockito.Mockito.mock;
@@ -295,28 +292,4 @@ private void waitTillFinishExecution(
GenericTestUtils.waitFor(()
-> closeHandler.getQueuedCount() <= 0, 10, 3000);
}
-
- @Test
- public void testThreadPoolPoolSize() {
- assertEquals(1, subject.getThreadPoolMaxPoolSize());
- assertEquals(0, subject.getThreadPoolActivePoolSize());
-
- CloseContainerCommandHandler closeContainerCommandHandler =
- new CloseContainerCommandHandler(10, 10, "");
- closeContainerCommandHandler.handle(new CloseContainerCommand(
- CONTAINER_ID + 1, PipelineID.randomId()),
- ozoneContainer, context, null);
- closeContainerCommandHandler.handle(new CloseContainerCommand(
- CONTAINER_ID + 2, PipelineID.randomId()),
- ozoneContainer, context, null);
- closeContainerCommandHandler.handle(new CloseContainerCommand(
- CONTAINER_ID + 3, PipelineID.randomId()),
- ozoneContainer, context, null);
- closeContainerCommandHandler.handle(new CloseContainerCommand(
- CONTAINER_ID + 4, PipelineID.randomId()),
- ozoneContainer, context, null);
- assertEquals(10, closeContainerCommandHandler.getThreadPoolMaxPoolSize());
- assertTrue(closeContainerCommandHandler.getThreadPoolActivePoolSize() > 0);
- }
-
}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerCommandHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerCommandHandler.java
index 5ee31b97fd6..49c34828fbd 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerCommandHandler.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerCommandHandler.java
@@ -19,14 +19,6 @@
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ThreadFactory;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.LinkedBlockingQueue;
-
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController;
import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
@@ -40,6 +32,7 @@
import java.time.ZoneId;
import java.util.OptionalLong;
+import static com.google.common.util.concurrent.MoreExecutors.newDirectExecutorService;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock;
@@ -70,14 +63,8 @@ public void setup() {
}
@Test
- public void testExpiredCommandsAreNotProcessed()
- throws IOException, InterruptedException {
- CountDownLatch latch1 = new CountDownLatch(1);
- ThreadFactory threadFactory = new ThreadFactoryBuilder().build();
- ThreadPoolWithLockExecutor executor = new ThreadPoolWithLockExecutor(
- threadFactory, latch1);
- DeleteContainerCommandHandler handler = new DeleteContainerCommandHandler(
- clock, executor, 100);
+ public void testExpiredCommandsAreNotProcessed() throws IOException {
+ DeleteContainerCommandHandler handler = createSubject(clock, 1000);
DeleteContainerCommand command1 = new DeleteContainerCommand(1L);
command1.setDeadline(clock.millis() + 10000);
@@ -88,14 +75,9 @@ public void testExpiredCommandsAreNotProcessed()
clock.fastForward(15000);
handler.handle(command1, ozoneContainer, null, null);
- latch1.await();
assertEquals(1, handler.getTimeoutCount());
- CountDownLatch latch2 = new CountDownLatch(2);
- executor.setLatch(latch2);
handler.handle(command2, ozoneContainer, null, null);
handler.handle(command3, ozoneContainer, null, null);
- latch2.await();
-
assertEquals(1, handler.getTimeoutCount());
assertEquals(3, handler.getInvocationCount());
verify(controller, times(0))
@@ -107,8 +89,7 @@ public void testExpiredCommandsAreNotProcessed()
}
@Test
- public void testCommandForCurrentTermIsExecuted()
- throws IOException, InterruptedException {
+ public void testCommandForCurrentTermIsExecuted() throws IOException {
// GIVEN
DeleteContainerCommand command = new DeleteContainerCommand(1L);
command.setTerm(1);
@@ -116,17 +97,10 @@ public void testCommandForCurrentTermIsExecuted()
when(context.getTermOfLeaderSCM())
.thenReturn(OptionalLong.of(command.getTerm()));
- TestClock testClock = new TestClock(Instant.now(), ZoneId.systemDefault());
- CountDownLatch latch = new CountDownLatch(1);
- ThreadFactory threadFactory = new ThreadFactoryBuilder().build();
- ThreadPoolWithLockExecutor executor = new ThreadPoolWithLockExecutor(
- threadFactory, latch);
- DeleteContainerCommandHandler subject = new DeleteContainerCommandHandler(
- testClock, executor, 100);
+ DeleteContainerCommandHandler subject = createSubject();
// WHEN
subject.handle(command, ozoneContainer, context, null);
- latch.await();
// THEN
verify(controller, times(1))
@@ -189,10 +163,8 @@ private static DeleteContainerCommandHandler createSubject() {
private static DeleteContainerCommandHandler createSubject(
TestClock clock, int queueSize) {
- ThreadFactory threadFactory = new ThreadFactoryBuilder().build();
- ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.
- newFixedThreadPool(1, threadFactory);
- return new DeleteContainerCommandHandler(clock, executor, queueSize);
+ return new DeleteContainerCommandHandler(clock,
+ newDirectExecutorService(), queueSize);
}
private static DeleteContainerCommandHandler createSubjectWithPoolSize(
@@ -200,21 +172,4 @@ private static DeleteContainerCommandHandler createSubjectWithPoolSize(
return new DeleteContainerCommandHandler(1, clock, queueSize, "");
}
- static class ThreadPoolWithLockExecutor extends ThreadPoolExecutor {
- private CountDownLatch countDownLatch;
- ThreadPoolWithLockExecutor(ThreadFactory threadFactory, CountDownLatch latch) {
- super(1, 1, 0, TimeUnit.MILLISECONDS,
- new LinkedBlockingQueue(), threadFactory);
- this.countDownLatch = latch;
- }
-
- void setLatch(CountDownLatch latch) {
- this.countDownLatch = latch;
- }
-
- @Override
- protected void afterExecute(Runnable r, Throwable t) {
- countDownLatch.countDown();
- }
- }
}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/helpers/TestChunkUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/helpers/TestChunkUtils.java
index 5454f9e8a9b..d04f3a5167f 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/helpers/TestChunkUtils.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/helpers/TestChunkUtils.java
@@ -42,7 +42,6 @@
import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
import org.apache.hadoop.ozone.common.ChunkBuffer;
import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import org.apache.hadoop.ozone.container.keyvalue.impl.MappedBufferManager;
import org.apache.ozone.test.GenericTestUtils;
import org.apache.commons.io.FileUtils;
@@ -71,7 +70,6 @@ class TestChunkUtils {
private static final int BUFFER_CAPACITY = 1 << 20;
private static final int MAPPED_BUFFER_THRESHOLD = 32 << 10;
private static final Random RANDOM = new Random();
- private static final MappedBufferManager MAPPED_BUFFER_MANAGER = new MappedBufferManager(100);
@TempDir
private File tempDir;
@@ -80,7 +78,7 @@ static ChunkBuffer readData(File file, long off, long len)
throws StorageContainerException {
LOG.info("off={}, len={}", off, len);
return ChunkUtils.readData(len, BUFFER_CAPACITY, file, off, null,
- MAPPED_BUFFER_THRESHOLD, true, MAPPED_BUFFER_MANAGER);
+ MAPPED_BUFFER_THRESHOLD);
}
@Test
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/AbstractTestChunkManager.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/AbstractTestChunkManager.java
index d9b95f199dd..0c373cb0dbf 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/AbstractTestChunkManager.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/AbstractTestChunkManager.java
@@ -34,13 +34,8 @@
import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.io.TempDir;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import java.io.BufferedReader;
import java.io.File;
-import java.io.IOException;
-import java.io.InputStreamReader;
import java.nio.Buffer;
import java.nio.ByteBuffer;
import java.util.UUID;
@@ -58,8 +53,6 @@
* Helpers for ChunkManager implementation tests.
*/
public abstract class AbstractTestChunkManager {
- private static final Logger LOG =
- LoggerFactory.getLogger(AbstractTestChunkManager.class);
private HddsVolume hddsVolume;
private KeyValueContainerData keyValueContainerData;
@@ -135,55 +128,6 @@ protected void checkChunkFileCount(int expected) {
assertEquals(expected, files.length);
}
- /**
- * Helper method to check if a file is in use.
- */
- public static boolean isFileNotInUse(String filePath) {
- try {
- Process process = new ProcessBuilder("fuser", filePath).start();
- try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream(), UTF_8))) {
- String output = reader.readLine(); // If fuser returns no output, the file is not in use
- if (output == null) {
- return true;
- }
- LOG.debug("File is in use: {}", filePath);
- return false;
- } finally {
- process.destroy();
- }
- } catch (IOException e) {
- LOG.warn("Failed to check if file is in use: {}", filePath, e);
- return false; // On failure, assume the file is in use
- }
- }
-
- protected boolean checkChunkFilesClosed() {
- return checkChunkFilesClosed(keyValueContainerData.getChunksPath());
- }
-
- /**
- * check that all files under chunk path are closed.
- */
- public static boolean checkChunkFilesClosed(String path) {
- //As in Setup, we try to create container, these paths should exist.
- assertNotNull(path);
-
- File dir = new File(path);
- assertTrue(dir.exists());
-
- File[] files = dir.listFiles();
- assertNotNull(files);
- for (File file : files) {
- assertTrue(file.exists());
- assertTrue(file.isFile());
- // check that the file is closed.
- if (!isFileNotInUse(file.getAbsolutePath())) {
- return false;
- }
- }
- return true;
- }
-
protected void checkWriteIOStats(long length, long opCount) {
VolumeIOStats volumeIOStats = hddsVolume.getVolumeIOStats();
assertEquals(length, volumeIOStats.getWriteBytes());
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java
index d4a12f577e9..47d24874749 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java
@@ -27,7 +27,6 @@
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager;
import org.junit.jupiter.api.Test;
-import org.mockito.Mockito;
import java.io.File;
import java.io.IOException;
@@ -40,9 +39,7 @@
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
-import static org.mockito.Mockito.when;
/**
* Common test cases for ChunkManager implementation tests.
@@ -225,26 +222,4 @@ public void testWriteAndReadChunkMultipleTimes() throws Exception {
checkReadIOStats(len * count, count);
}
- @Test
- public void testFinishWrite() throws Exception {
- // GIVEN
- ChunkManager chunkManager = createTestSubject();
- checkChunkFileCount(0);
- checkWriteIOStats(0, 0);
-
- chunkManager.writeChunk(getKeyValueContainer(), getBlockID(),
- getChunkInfo(), getData(),
- WRITE_STAGE);
-
- BlockData blockData = Mockito.mock(BlockData.class);
- when(blockData.getBlockID()).thenReturn(getBlockID());
-
- chunkManager.finishWriteChunks(getKeyValueContainer(), blockData);
- assertTrue(checkChunkFilesClosed());
-
- // THEN
- checkChunkFileCount(1);
- checkWriteIOStats(getChunkInfo().getLen(), 1);
- }
-
}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestMappedBufferManager.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestMappedBufferManager.java
deleted file mode 100644
index 22406975986..00000000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestMappedBufferManager.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.container.keyvalue.impl;
-
-import org.junit.jupiter.api.Test;
-
-import java.nio.ByteBuffer;
-
-import static org.junit.jupiter.api.Assertions.assertEquals;
-
-/**
- * Test for MappedBufferManager.
- */
-public class TestMappedBufferManager {
-
- @Test
- public void testComputeIfAbsent() {
- MappedBufferManager manager = new MappedBufferManager(100);
- String file = "/CID-fd49f4a7-670d-43c5-a177-8ac03aafceb2/current/containerDir0/2/chunks/113750153625600065.block";
- long position = 0;
- int size = 1024;
- ByteBuffer buffer1 = ByteBuffer.allocate(size);
- ByteBuffer buffer2 = ByteBuffer.allocate(size + 1);
- ByteBuffer byteBuffer1 = manager.computeIfAbsent(file, position, size, () -> buffer1);
- assertEquals(buffer1, byteBuffer1);
- // buffer should be reused
- String file2 = "/CID-fd49f4a7-670d-43c5-a177-8ac03aafceb2/current/containerDir0/2/chunks/113750153625600065.block";
- ByteBuffer byteBuffer2 = manager.computeIfAbsent(file2, position, size, () -> buffer2);
- assertEquals(buffer1, byteBuffer2);
- }
-}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java
index ef37c226653..1f69db78d62 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java
@@ -27,7 +27,6 @@
import java.time.Instant;
import java.time.ZoneId;
import java.util.List;
-import java.util.SortedMap;
import java.util.UUID;
import java.util.concurrent.AbstractExecutorService;
import java.util.concurrent.CountDownLatch;
@@ -47,8 +46,6 @@
import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ReplicationCommandPriority;
-import org.apache.hadoop.hdds.security.symmetric.SecretKeySignerClient;
-import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
import org.apache.hadoop.metrics2.impl.MetricsCollectorImpl;
import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
@@ -58,9 +55,7 @@
import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
import org.apache.hadoop.ozone.container.ec.reconstruction.ECReconstructionCommandInfo;
-import org.apache.hadoop.ozone.container.ec.reconstruction.ECReconstructionCoordinator;
import org.apache.hadoop.ozone.container.ec.reconstruction.ECReconstructionCoordinatorTask;
-import org.apache.hadoop.ozone.container.ec.reconstruction.ECReconstructionMetrics;
import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
@@ -114,8 +109,6 @@ public class TestReplicationSupervisor {
};
private final AtomicReference replicatorRef =
new AtomicReference<>();
- private final AtomicReference ecReplicatorRef =
- new AtomicReference<>();
private ContainerSet set;
@@ -142,7 +135,6 @@ public void setUp() throws Exception {
@AfterEach
public void cleanup() {
replicatorRef.set(null);
- ecReplicatorRef.set(null);
}
@ContainerLayoutTestInfo.ContainerTest
@@ -402,107 +394,6 @@ public void taskWithObsoleteTermIsDropped(ContainerLayoutVersion layout) {
assertEquals(0, supervisor.getReplicationSuccessCount());
}
- @ContainerLayoutTestInfo.ContainerTest
- public void testMultipleReplication(ContainerLayoutVersion layout,
- @TempDir File tempFile) throws IOException {
- this.layoutVersion = layout;
- OzoneConfiguration conf = new OzoneConfiguration();
- // GIVEN
- ReplicationSupervisor replicationSupervisor =
- supervisorWithReplicator(FakeReplicator::new);
- ReplicationSupervisor ecReconstructionSupervisor = supervisorWithECReconstruction();
- ReplicationSupervisorMetrics replicationMetrics =
- ReplicationSupervisorMetrics.create(replicationSupervisor);
- ReplicationSupervisorMetrics ecReconstructionMetrics =
- ReplicationSupervisorMetrics.create(ecReconstructionSupervisor);
- try {
- //WHEN
- replicationSupervisor.addTask(createTask(1L));
- ecReconstructionSupervisor.addTask(createECTaskWithCoordinator(2L));
- replicationSupervisor.addTask(createTask(1L));
- replicationSupervisor.addTask(createTask(3L));
- ecReconstructionSupervisor.addTask(createECTaskWithCoordinator(4L));
-
- SimpleContainerDownloader moc = mock(SimpleContainerDownloader.class);
- Path res = Paths.get("file:/tmp/no-such-file");
- when(moc.getContainerDataFromReplicas(anyLong(), anyList(),
- any(Path.class), any())).thenReturn(res);
-
- final String testDir = tempFile.getPath();
- MutableVolumeSet volumeSet = mock(MutableVolumeSet.class);
- when(volumeSet.getVolumesList()).thenReturn(singletonList(
- new HddsVolume.Builder(testDir).conf(conf).build()));
- ContainerController mockedCC = mock(ContainerController.class);
- ContainerImporter importer = new ContainerImporter(conf, set, mockedCC, volumeSet);
- ContainerReplicator replicator = new DownloadAndImportReplicator(
- conf, set, importer, moc);
- replicatorRef.set(replicator);
- replicationSupervisor.addTask(createTask(5L));
-
- ReplicateContainerCommand cmd1 = createCommand(6L);
- cmd1.setDeadline(clock.millis() + 10000);
- ReplicationTask task1 = new ReplicationTask(cmd1, replicatorRef.get());
- clock.fastForward(15000);
- replicationSupervisor.addTask(task1);
-
- ReconstructECContainersCommand cmd2 = createReconstructionCmd(7L);
- cmd2.setDeadline(clock.millis() + 10000);
- ECReconstructionCoordinatorTask task2 = new ECReconstructionCoordinatorTask(
- ecReplicatorRef.get(), new ECReconstructionCommandInfo(cmd2));
- clock.fastForward(15000);
- ecReconstructionSupervisor.addTask(task2);
- ecReconstructionSupervisor.addTask(createECTask(8L));
- ecReconstructionSupervisor.addTask(createECTask(9L));
-
- //THEN
- assertEquals(2, replicationSupervisor.getReplicationSuccessCount());
- assertEquals(2, replicationSupervisor.getReplicationSuccessCount(
- task1.getMetricName()));
- assertEquals(1, replicationSupervisor.getReplicationFailureCount());
- assertEquals(1, replicationSupervisor.getReplicationFailureCount(
- task1.getMetricName()));
- assertEquals(1, replicationSupervisor.getReplicationSkippedCount());
- assertEquals(1, replicationSupervisor.getReplicationSkippedCount(
- task1.getMetricName()));
- assertEquals(1, replicationSupervisor.getReplicationTimeoutCount());
- assertEquals(1, replicationSupervisor.getReplicationTimeoutCount(
- task1.getMetricName()));
- assertEquals(5, replicationSupervisor.getReplicationRequestCount());
- assertEquals(5, replicationSupervisor.getReplicationRequestCount(
- task1.getMetricName()));
- assertEquals(0, replicationSupervisor.getReplicationRequestCount(
- task2.getMetricName()));
-
- assertEquals(2, ecReconstructionSupervisor.getReplicationSuccessCount());
- assertEquals(2, ecReconstructionSupervisor.getReplicationSuccessCount(
- task2.getMetricName()));
- assertEquals(1, ecReconstructionSupervisor.getReplicationTimeoutCount());
- assertEquals(1, ecReconstructionSupervisor.getReplicationTimeoutCount(
- task2.getMetricName()));
- assertEquals(2, ecReconstructionSupervisor.getReplicationFailureCount());
- assertEquals(2, ecReconstructionSupervisor.getReplicationFailureCount(
- task2.getMetricName()));
- assertEquals(5, ecReconstructionSupervisor.getReplicationRequestCount());
- assertEquals(5, ecReconstructionSupervisor.getReplicationRequestCount(
- task2.getMetricName()));
- assertEquals(0, ecReconstructionSupervisor.getReplicationRequestCount(
- task1.getMetricName()));
-
- MetricsCollectorImpl replicationMetricsCollector = new MetricsCollectorImpl();
- replicationMetrics.getMetrics(replicationMetricsCollector, true);
- assertEquals(1, replicationMetricsCollector.getRecords().size());
-
- MetricsCollectorImpl ecReconstructionMetricsCollector = new MetricsCollectorImpl();
- ecReconstructionMetrics.getMetrics(ecReconstructionMetricsCollector, true);
- assertEquals(1, ecReconstructionMetricsCollector.getRecords().size());
- } finally {
- replicationMetrics.unRegister();
- ecReconstructionMetrics.unRegister();
- replicationSupervisor.stop();
- ecReconstructionSupervisor.stop();
- }
- }
-
@ContainerLayoutTestInfo.ContainerTest
public void testPriorityOrdering(ContainerLayoutVersion layout)
throws InterruptedException {
@@ -585,16 +476,6 @@ private static class BlockingTask extends AbstractReplicationTask {
this.waitForCompleteLatch = waitForCompletion;
}
- @Override
- protected String getMetricName() {
- return "Blockings";
- }
-
- @Override
- protected String getMetricDescriptionSegment() {
- return "blockings";
- }
-
@Override
public void runTask() {
runningLatch.countDown();
@@ -621,16 +502,6 @@ private static class OrderedTask extends AbstractReplicationTask {
setPriority(priority);
}
- @Override
- protected String getMetricName() {
- return "Ordereds";
- }
-
- @Override
- protected String getMetricDescriptionSegment() {
- return "ordereds";
- }
-
@Override
public void runTask() {
completeList.add(name);
@@ -660,22 +531,6 @@ private ReplicationSupervisor supervisorWith(
return supervisor;
}
- private ReplicationSupervisor supervisorWithECReconstruction() throws IOException {
- ConfigurationSource conf = new OzoneConfiguration();
- ExecutorService executor = newDirectExecutorService();
- ReplicationServer.ReplicationConfig repConf =
- conf.getObject(ReplicationServer.ReplicationConfig.class);
- ReplicationSupervisor supervisor = ReplicationSupervisor.newBuilder()
- .stateContext(context).replicationConfig(repConf).executor(executor)
- .clock(clock).build();
-
- FakeECReconstructionCoordinator coordinator = new FakeECReconstructionCoordinator(
- new OzoneConfiguration(), null, null, context,
- ECReconstructionMetrics.create(), "", supervisor);
- ecReplicatorRef.set(coordinator);
- return supervisor;
- }
-
private ReplicationTask createTask(long containerId) {
ReplicateContainerCommand cmd = createCommand(containerId);
return new ReplicationTask(cmd, replicatorRef.get());
@@ -683,13 +538,7 @@ private ReplicationTask createTask(long containerId) {
private ECReconstructionCoordinatorTask createECTask(long containerId) {
return new ECReconstructionCoordinatorTask(null,
- createReconstructionCmdInfo(containerId));
- }
-
- private ECReconstructionCoordinatorTask createECTaskWithCoordinator(long containerId) {
- ECReconstructionCommandInfo ecReconstructionCommandInfo = createReconstructionCmdInfo(containerId);
- return new ECReconstructionCoordinatorTask(ecReplicatorRef.get(),
- ecReconstructionCommandInfo);
+ createReconstructionCmd(containerId));
}
private static ReplicateContainerCommand createCommand(long containerId) {
@@ -699,20 +548,18 @@ private static ReplicateContainerCommand createCommand(long containerId) {
return cmd;
}
- private static ECReconstructionCommandInfo createReconstructionCmdInfo(
+ private static ECReconstructionCommandInfo createReconstructionCmd(
long containerId) {
- return new ECReconstructionCommandInfo(createReconstructionCmd(containerId));
- }
-
- private static ReconstructECContainersCommand createReconstructionCmd(
- long containerId) {
- List sources =
- new ArrayList<>();
- sources.add(new ReconstructECContainersCommand.DatanodeDetailsAndReplicaIndex(
- MockDatanodeDetails.randomDatanodeDetails(), 1));
- sources.add(new ReconstructECContainersCommand.DatanodeDetailsAndReplicaIndex(
+ List sources
+ = new ArrayList<>();
+ sources.add(new ReconstructECContainersCommand
+ .DatanodeDetailsAndReplicaIndex(
+ MockDatanodeDetails.randomDatanodeDetails(), 1));
+ sources.add(new ReconstructECContainersCommand
+ .DatanodeDetailsAndReplicaIndex(
MockDatanodeDetails.randomDatanodeDetails(), 2));
- sources.add(new ReconstructECContainersCommand.DatanodeDetailsAndReplicaIndex(
+ sources.add(new ReconstructECContainersCommand
+ .DatanodeDetailsAndReplicaIndex(
MockDatanodeDetails.randomDatanodeDetails(), 3));
byte[] missingIndexes = new byte[1];
@@ -720,44 +567,14 @@ private static ReconstructECContainersCommand createReconstructionCmd(
List target = singletonList(
MockDatanodeDetails.randomDatanodeDetails());
- ReconstructECContainersCommand cmd = new ReconstructECContainersCommand(containerId, sources, target,
- Proto2Utils.unsafeByteString(missingIndexes),
- new ECReplicationConfig(3, 2));
- cmd.setTerm(CURRENT_TERM);
- return cmd;
- }
-
- /**
- * A fake coordinator that simulates successful reconstruction of ec containers.
- */
- private class FakeECReconstructionCoordinator extends ECReconstructionCoordinator {
-
- private final OzoneConfiguration conf = new OzoneConfiguration();
- private final ReplicationSupervisor supervisor;
-
- FakeECReconstructionCoordinator(ConfigurationSource conf,
- CertificateClient certificateClient, SecretKeySignerClient secretKeyClient,
- StateContext context, ECReconstructionMetrics metrics, String threadNamePrefix,
- ReplicationSupervisor supervisor)
- throws IOException {
- super(conf, certificateClient, secretKeyClient, context, metrics, threadNamePrefix);
- this.supervisor = supervisor;
- }
-
- @Override
- public void reconstructECContainerGroup(long containerID,
- ECReplicationConfig repConfig, SortedMap sourceNodeMap,
- SortedMap targetNodeMap) {
- assertEquals(1, supervisor.getTotalInFlightReplications());
-
- KeyValueContainerData kvcd = new KeyValueContainerData(
- containerID, layoutVersion, 100L,
- UUID.randomUUID().toString(), UUID.randomUUID().toString());
- KeyValueContainer kvc = new KeyValueContainer(kvcd, conf);
- assertDoesNotThrow(() -> {
- set.addContainer(kvc);
- });
- }
+ ReconstructECContainersCommand cmd =
+ new ReconstructECContainersCommand(containerId,
+ sources,
+ target,
+ Proto2Utils.unsafeByteString(missingIndexes),
+ new ECReplicationConfig(3, 2));
+
+ return new ECReconstructionCommandInfo(cmd);
}
/**
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/protocol/commands/TestReconstructionECContainersCommands.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/protocol/commands/TestReconstructionECContainersCommands.java
index 519a24a2a5c..f4e4ec6a253 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/protocol/commands/TestReconstructionECContainersCommands.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/protocol/commands/TestReconstructionECContainersCommands.java
@@ -26,12 +26,10 @@
import org.junit.jupiter.api.Test;
import java.util.ArrayList;
-import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.stream.Collectors;
-import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
@@ -55,8 +53,11 @@ public void testExceptionIfSourceAndMissingNotSameLength() {
@Test
public void protobufConversion() {
- byte[] missingIndexes = {1, 2};
- final ByteString missingContainerIndexes = Proto2Utils.unsafeByteString(missingIndexes);
+ final ByteString missingContainerIndexes = Proto2Utils.unsafeByteString(new byte[]{1, 2});
+ List srcNodesIndexes = new ArrayList<>();
+ for (int i = 0; i < srcNodesIndexes.size(); i++) {
+ srcNodesIndexes.add(i + 1L);
+ }
ECReplicationConfig ecReplicationConfig = new ECReplicationConfig(3, 2);
final List dnDetails = getDNDetails(5);
@@ -69,10 +70,6 @@ public void protobufConversion() {
ReconstructECContainersCommand reconstructECContainersCommand =
new ReconstructECContainersCommand(1L, sources, targets,
missingContainerIndexes, ecReplicationConfig);
-
- assertThat(reconstructECContainersCommand.toString())
- .contains("missingIndexes: " + Arrays.toString(missingIndexes));
-
StorageContainerDatanodeProtocolProtos.ReconstructECContainersCommandProto
proto = reconstructECContainersCommand.getProto();
diff --git a/hadoop-hdds/dev-support/checkstyle/checkstyle.xml b/hadoop-hdds/dev-support/checkstyle/checkstyle.xml
index 288085ef948..3a69c793c26 100644
--- a/hadoop-hdds/dev-support/checkstyle/checkstyle.xml
+++ b/hadoop-hdds/dev-support/checkstyle/checkstyle.xml
@@ -88,7 +88,6 @@
-
diff --git a/hadoop-hdds/docs/content/concept/Datanodes.md b/hadoop-hdds/docs/content/concept/Datanodes.md
index cf246712f68..47c09a798fc 100644
--- a/hadoop-hdds/docs/content/concept/Datanodes.md
+++ b/hadoop-hdds/docs/content/concept/Datanodes.md
@@ -76,15 +76,3 @@ blocks that get reported. That is a 40x reduction in the block reports.
This extra indirection helps tremendously with scaling Ozone. SCM has far
less block data to process and the namespace service (Ozone Manager) as a
different service are critical to scaling Ozone.
-
-
-## Notable configurations
-
-key | default | description
-----|---------|------------
-dfs.container.ratis.datanode.storage.dir | none | This directory is used for storing Ratis metadata like logs.
-ozone.scm.datanode.id.dir | none | The path that datanodes will use to store the datanode ID.
-hdds.datanode.dir | none | Determines where HDDS data will be stored on the local filesystem.
-hdds.datanode.dir.du.reserved | none | Reserved space in bytes per volume. Always leave this much space free for non dfs use.
-ozone.metadata.dirs | none | Directory to store persisted data (RocksDB).
-ozone.recon.address | 0.0.0.0:9891 | RPC address of the Recon. Use to connect Recon.
diff --git a/hadoop-hdds/docs/content/concept/Datanodes.zh.md b/hadoop-hdds/docs/content/concept/Datanodes.zh.md
index 32071c9e51e..8f129df7b9b 100644
--- a/hadoop-hdds/docs/content/concept/Datanodes.zh.md
+++ b/hadoop-hdds/docs/content/concept/Datanodes.zh.md
@@ -49,15 +49,3 @@ Ozone 的存储容器是一个自包含的超级块,容器中包含一系列
SCM 如何获得容器的位置?这一点和现有的 HDFS 十分相似。数据节点会定期发送类似于块报告的容器报告,容器报告比块报告的内容简洁的多,比如,对于一个存储容量为 196 TB 的集群,Ozone 大概会拥有四万个容器,相比于 HDFS 的一百五十万个块,块报告数量缩减为四十分之一。
这种间接管理的方式大大地提高了 Ozone 的扩展性,因为 SCM 需要处理的块数据大大减少,且命名服务(OM)作为一个独特的服务主体对于扩展 Ozone 具有重要意义。
-
-
-## 需要关注的配置项
-
-配置项 |默认值 | 描述
-----|---------|------------
-dfs.container.ratis.datanode.storage.dir | none | 该目录用于存储 Ratis 元数据,如日志。
-ozone.scm.datanode.id.dir | none | 数据节点上用于存储数据节点 ID 的路径。
-hdds.datanode.dir | none | 此配置决定数据节点上的数据将存储在本地文件系统的哪个位置。
-hdds.datanode.dir.du.reserved | none | 每个卷保留的存储空间(以字节为单位)。始终为非DFS用途保留这么多空闲空间。
-ozone.metadata.dirs | none | 用于存储持久化数据(RocksDB)的目录。
-ozone.recon.address | 0.0.0.0:9891 | Recon的RPC地址。 使用 连接到Recon。
\ No newline at end of file
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/DatanodeRatisGrpcConfig.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/DatanodeRatisGrpcConfig.java
index fde555208b3..5b283c3a1a3 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/DatanodeRatisGrpcConfig.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/DatanodeRatisGrpcConfig.java
@@ -31,6 +31,23 @@
@ConfigGroup(prefix = HDDS_DATANODE_RATIS_PREFIX_KEY + "."
+ GrpcConfigKeys.PREFIX)
public class DatanodeRatisGrpcConfig {
+ @Config(key = "message.size.max",
+ defaultValue = "32MB",
+ type = ConfigType.SIZE,
+ tags = {OZONE, CLIENT, PERFORMANCE},
+ description = "Maximum message size allowed to be received by Grpc " +
+ "Channel (Server)."
+ )
+ private int maximumMessageSize = 32 * 1024 * 1024;
+
+ public int getMaximumMessageSize() {
+ return maximumMessageSize;
+ }
+
+ public void setMaximumMessageSize(int maximumMessageSize) {
+ this.maximumMessageSize = maximumMessageSize;
+ }
+
@Config(key = "flow.control.window",
defaultValue = "5MB",
type = ConfigType.SIZE,
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/SCMSecurityProtocol.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/SCMSecurityProtocol.java
index 0cb39482e98..cbb4f3fc2ee 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/SCMSecurityProtocol.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/SCMSecurityProtocol.java
@@ -118,6 +118,7 @@ String getSCMCertificate(ScmNodeDetailsProto scmNodeDetails,
/**
* Get Root CA certificate.
+ * @return
* @throws IOException
*/
String getRootCACertificate() throws IOException;
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java
index 71918308f14..a938d53c7c4 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java
@@ -330,6 +330,7 @@ public SCMGetCertResponseProto getCACert() throws IOException {
* @param role - node type: OM/SCM/DN.
* @param startSerialId - start cert serial id.
* @param count - max number of certificates returned in a batch.
+ * @return
* @throws IOException
*/
@Override
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java
index d9b198d4b14..1f114304cca 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java
@@ -307,7 +307,7 @@ public boolean addSCM(AddSCMRequest request) throws IOException {
}
/**
* Sort the datanodes based on distance from client.
- * @return list of datanodes;
+ * @return List>
* @throws IOException
*/
@Override
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/TokenVerifier.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/TokenVerifier.java
index da651160d04..e7e029f7087 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/TokenVerifier.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/TokenVerifier.java
@@ -50,7 +50,8 @@ void verify(Token> token,
ContainerCommandRequestProtoOrBuilder cmd)
throws SCMSecurityException;
- /** Same as {@link #verify}, but with encoded token. */
+ /** Same as {@link #verify(Token,
+ * ContainerCommandRequestProtoOrBuilder)}, but with encoded token. */
default void verify(ContainerCommandRequestProtoOrBuilder cmd,
String encodedToken) throws SCMSecurityException {
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultApprover.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultApprover.java
index 5a39d0f1dd0..b2d62443b77 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultApprover.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultApprover.java
@@ -104,7 +104,7 @@ public DefaultApprover(PKIProfile pkiProfile, SecurityConfig config) {
* @param certSerialId - the new certificate id.
* @return Signed Certificate.
* @throws IOException - On Error
- * @throws CertificateException - on Error.
+ * @throws OperatorCreationException - on Error.
*/
@SuppressWarnings("ParameterNumber")
@Override
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java
index 118aa826013..a93bdb4e3d6 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java
@@ -195,6 +195,8 @@ public CertPath getCaCertPath()
*
* @param certSerialId - Certificate for this CA.
* @return X509Certificate
+ * @throws CertificateException - usually thrown if this CA is not
+ * initialized.
* @throws IOException - on Error.
*/
@Override
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java
index 42292b9663f..70a475982bd 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java
@@ -73,7 +73,6 @@
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertResponseProto;
import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdds.scm.client.ClientTrustManager;
import org.apache.hadoop.hdds.security.SecurityConfig;
import org.apache.hadoop.hdds.security.ssl.ReloadingX509KeyManager;
import org.apache.hadoop.hdds.security.ssl.ReloadingX509TrustManager;
@@ -668,8 +667,6 @@ protected enum InitCase {
* certificate.
*
* Truth table:
- *
- * {@code
* +--------------+---------------+--------------+---------------------+
* | Private Key | Public Keys | Certificate | Result |
* +--------------+---------------+--------------+---------------------+
@@ -682,8 +679,7 @@ protected enum InitCase {
* | True (1) | True (1) | False (0) | GETCERT->SUCCESS |
* | True (1) | True (1) | True (1) | SUCCESS |
* +--------------+-----------------+--------------+----------------+
- * }
- *
+ *
* Success in following cases:
* 1. If keypair as well certificate is available.
* 2. If private key and certificate is available and public key is
@@ -987,6 +983,43 @@ public Set getAllCaCerts() {
return certs;
}
+ @Override
+ public List getCAList() {
+ pemEncodedCACertsLock.lock();
+ try {
+ return pemEncodedCACerts;
+ } finally {
+ pemEncodedCACertsLock.unlock();
+ }
+ }
+
+ public List listCA() throws IOException {
+ pemEncodedCACertsLock.lock();
+ try {
+ if (pemEncodedCACerts == null) {
+ updateCAList();
+ }
+ return pemEncodedCACerts;
+ } finally {
+ pemEncodedCACertsLock.unlock();
+ }
+ }
+
+ @Override
+ public List updateCAList() throws IOException {
+ pemEncodedCACertsLock.lock();
+ try {
+ pemEncodedCACerts = getScmSecureClient().listCACertificate();
+ return pemEncodedCACerts;
+ } catch (Exception e) {
+ getLogger().error("Error during updating CA list", e);
+ throw new CertificateException("Error during updating CA list", e,
+ CERTIFICATE_ERROR);
+ } finally {
+ pemEncodedCACertsLock.unlock();
+ }
+ }
+
@Override
public ReloadingX509TrustManager getTrustManager() throws CertificateException {
try {
@@ -1016,20 +1049,8 @@ public ReloadingX509KeyManager getKeyManager() throws CertificateException {
}
}
- @Override
- public ClientTrustManager createClientTrustManager() throws IOException {
- CACertificateProvider caCertificateProvider = () -> {
- List caCerts = new ArrayList<>();
- caCerts.addAll(getAllCaCerts());
- caCerts.addAll(getAllRootCaCerts());
- return caCerts;
- };
- return new ClientTrustManager(caCertificateProvider, caCertificateProvider);
- }
-
/**
* Register a receiver that will be called after the certificate renewed.
- *
* @param receiver
*/
@Override
@@ -1086,7 +1107,7 @@ public Duration timeBeforeExpiryGracePeriod(X509Certificate certificate) {
* Renew keys and certificate. Save the keys are certificate to disk in new
* directories, swap the current key directory and certs directory with the
* new directories.
- * @param force check certificate expiry time again if force is false.
+ * @param force, check certificate expiry time again if force is false.
* @return String, new certificate ID
* */
public String renewAndStoreKeyAndCertificate(boolean force)
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java
index 9d037fed6bc..f27f42e0b4c 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java
@@ -123,8 +123,8 @@
/**
* Create a Jetty embedded server to answer http requests. The primary goal is
* to serve up status information for the server. There are three contexts:
- * "/logs/" -> points to the log directory "/static/" -> points to common static
- * files (src/webapps/static) "/" -> the jsp server code from
+ * "/logs/" -> points to the log directory "/static/" -> points to common static
+ * files (src/webapps/static) "/" -> the jsp server code from
* (src/webapps/)
*
* This class is a fork of the old HttpServer. HttpServer exists for
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/ProfileServlet.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/ProfileServlet.java
index bceec92c6c8..f4f188aaf39 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/ProfileServlet.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/ProfileServlet.java
@@ -41,12 +41,11 @@
import org.slf4j.LoggerFactory;
/**
- *
* Servlet that runs async-profiler as web-endpoint.
- *
+ *
* Source: https://github.com/apache/hive/blob/master/common/src/java/org
* /apache/hive/http/ProfileServlet.java
- *
+ *
* Following options from async-profiler can be specified as query parameter.
* // -e event profiling event: cpu|alloc|lock|cache-misses etc.
* // -d duration run profiling for seconds
@@ -80,7 +79,7 @@
* curl "http://localhost:10002/prof"
* - To collect 1 minute CPU profile of current process and output in tree
* format (html)
- * curl "http://localhost:10002/prof?output=tree&duration=60"
+ * curl "http://localhost:10002/prof?output=tree&duration=60"
* - To collect 30 second heap allocation profile of current process (returns
* FlameGraph svg)
* curl "http://localhost:10002/prof?event=alloc"
@@ -112,7 +111,6 @@
* The default output format of the newest async profiler is HTML.
* If the user is using an older version such as 1.5, HTML is not supported.
* Please specify the corresponding output format.
- *
*/
public class ProfileServlet extends HttpServlet {
private static final long serialVersionUID = 1L;
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/PrometheusServlet.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/PrometheusServlet.java
index 535a5e6c8e9..0d01aa43b42 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/PrometheusServlet.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/PrometheusServlet.java
@@ -24,6 +24,8 @@
import java.io.IOException;
import java.io.PrintWriter;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+
import io.prometheus.client.CollectorRegistry;
import io.prometheus.client.exporter.common.TextFormat;
@@ -56,6 +58,7 @@ protected void doGet(HttpServletRequest req, HttpServletResponse resp)
return;
}
}
+ DefaultMetricsSystem.instance().publishMetricsNow();
PrintWriter writer = resp.getWriter();
getPrometheusSink().writeMetrics(writer);
writer.write("\n\n#Dropwizard metrics\n\n");
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HAUtils.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HAUtils.java
index 0dc244bdbc7..342a0400cbd 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HAUtils.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HAUtils.java
@@ -35,6 +35,8 @@
import org.apache.hadoop.hdds.scm.proxy.SCMClientConfig;
import org.apache.hadoop.hdds.scm.proxy.SCMContainerLocationFailoverProxyProvider;
import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
+import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
+import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec;
import org.apache.hadoop.hdds.tracing.TracingUtil;
import org.apache.hadoop.hdds.utils.db.DBDefinition;
import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition;
@@ -371,6 +373,80 @@ public static List getExistingSstFiles(File db) throws IOException {
return sstList;
}
+ /**
+ * Build CA list which need to be passed to client.
+ *
+ * If certificate client is null, obtain the list of CA using SCM security
+ * client, else it uses certificate client.
+ * @return list of CA
+ */
+ public static List buildCAList(CertificateClient certClient,
+ ConfigurationSource configuration) throws IOException {
+ long waitDuration =
+ configuration.getTimeDuration(OZONE_SCM_CA_LIST_RETRY_INTERVAL,
+ OZONE_SCM_CA_LIST_RETRY_INTERVAL_DEFAULT, TimeUnit.SECONDS);
+ if (certClient != null) {
+ if (!SCMHAUtils.isSCMHAEnabled(configuration)) {
+ return generateCAList(certClient);
+ } else {
+ Collection scmNodes = SCMHAUtils.getSCMNodeIds(configuration);
+ int expectedCount = scmNodes.size() + 1;
+ if (scmNodes.size() > 1) {
+ // First check if cert client has ca list initialized.
+ // This is being done, when this method is called multiple times we
+ // don't make call to SCM, we return from in-memory.
+ List caCertPemList = certClient.getCAList();
+ if (caCertPemList != null && caCertPemList.size() == expectedCount) {
+ return caCertPemList;
+ }
+ return getCAListWithRetry(() ->
+ waitForCACerts(certClient::updateCAList, expectedCount),
+ waitDuration);
+ } else {
+ return generateCAList(certClient);
+ }
+ }
+ } else {
+ SCMSecurityProtocolClientSideTranslatorPB scmSecurityProtocolClient =
+ HddsServerUtil.getScmSecurityClient(configuration);
+ if (!SCMHAUtils.isSCMHAEnabled(configuration)) {
+ List caCertPemList = new ArrayList<>();
+ SCMGetCertResponseProto scmGetCertResponseProto =
+ scmSecurityProtocolClient.getCACert();
+ if (scmGetCertResponseProto.hasX509Certificate()) {
+ caCertPemList.add(scmGetCertResponseProto.getX509Certificate());
+ }
+ if (scmGetCertResponseProto.hasX509RootCACertificate()) {
+ caCertPemList.add(scmGetCertResponseProto.getX509RootCACertificate());
+ }
+ return caCertPemList;
+ } else {
+ Collection scmNodes = SCMHAUtils.getSCMNodeIds(configuration);
+ int expectedCount = scmNodes.size() + 1;
+ if (scmNodes.size() > 1) {
+ return getCAListWithRetry(() -> waitForCACerts(
+ scmSecurityProtocolClient::listCACertificate,
+ expectedCount), waitDuration);
+ } else {
+ return scmSecurityProtocolClient.listCACertificate();
+ }
+ }
+ }
+ }
+
+ private static List generateCAList(CertificateClient certClient)
+ throws IOException {
+ List caCertPemList = new ArrayList<>();
+ for (X509Certificate cert : certClient.getAllRootCaCerts()) {
+ caCertPemList.add(CertificateCodec.getPEMEncodedString(cert));
+ }
+ for (X509Certificate cert : certClient.getAllCaCerts()) {
+ caCertPemList.add(CertificateCodec.getPEMEncodedString(cert));
+ }
+ return caCertPemList;
+ }
+
+
/**
* Retry forever until CA list matches expected count.
* @param task - task to get CA list.
@@ -412,37 +488,23 @@ private static List waitForCACerts(
* Build CA List in the format of X509Certificate.
* If certificate client is null, obtain the list of CA using SCM
* security client, else it uses certificate client.
- *
* @return list of CA X509Certificates.
*/
- public static List buildCAX509List(ConfigurationSource conf) throws IOException {
- long waitDuration =
- conf.getTimeDuration(OZONE_SCM_CA_LIST_RETRY_INTERVAL,
- OZONE_SCM_CA_LIST_RETRY_INTERVAL_DEFAULT, TimeUnit.SECONDS);
- Collection scmNodes = SCMHAUtils.getSCMNodeIds(conf);
- SCMSecurityProtocolClientSideTranslatorPB scmSecurityProtocolClient =
- HddsServerUtil.getScmSecurityClient(conf);
- if (!SCMHAUtils.isSCMHAEnabled(conf)) {
- List caCertPemList = new ArrayList<>();
- SCMGetCertResponseProto scmGetCertResponseProto =
- scmSecurityProtocolClient.getCACert();
- if (scmGetCertResponseProto.hasX509Certificate()) {
- caCertPemList.add(scmGetCertResponseProto.getX509Certificate());
- }
- if (scmGetCertResponseProto.hasX509RootCACertificate()) {
- caCertPemList.add(scmGetCertResponseProto.getX509RootCACertificate());
- }
- return OzoneSecurityUtil.convertToX509(caCertPemList);
- } else {
- int expectedCount = scmNodes.size() + 1;
- if (scmNodes.size() > 1) {
- return OzoneSecurityUtil.convertToX509(getCAListWithRetry(() -> waitForCACerts(
- scmSecurityProtocolClient::listCACertificate,
- expectedCount), waitDuration));
- } else {
- return OzoneSecurityUtil.convertToX509(scmSecurityProtocolClient.listCACertificate());
+ public static List buildCAX509List(
+ CertificateClient certClient,
+ ConfigurationSource conf) throws IOException {
+ if (certClient != null) {
+ // Do this here to avoid extra conversion of X509 to pem and again to
+ // X509 by buildCAList.
+ if (!SCMHAUtils.isSCMHAEnabled(conf)) {
+ List x509Certificates = new ArrayList<>();
+ x509Certificates.addAll(certClient.getAllCaCerts());
+ x509Certificates.addAll(certClient.getAllRootCaCerts());
+ return x509Certificates;
}
}
+ List pemEncodedCerts = HAUtils.buildCAList(certClient, conf);
+ return OzoneSecurityUtil.convertToX509(pemEncodedCerts);
}
}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java
index 94e9dceb6a7..c45e772c241 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java
@@ -125,11 +125,11 @@ private HddsServerUtil() {
HddsServerUtil.class);
/**
- * Add protobuf-based protocol to the {@link org.apache.hadoop.ipc.RPC.Server}.
+ * Add protobuf-based protocol to the {@link RPC.Server}.
* @param conf configuration
* @param protocol Protocol interface
* @param service service that implements the protocol
- * @param server RPC server to which the protocol and implementation is added to
+ * @param server RPC server to which the protocol & implementation is added to
*/
public static void addPBProtocol(Configuration conf, Class> protocol,
BlockingService service, RPC.Server server) throws IOException {
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java
index 29531f31518..e7c4ec4ce3d 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java
@@ -24,7 +24,6 @@
import java.util.List;
import java.util.Objects;
-import com.google.protobuf.ByteString;
import org.apache.hadoop.hdds.utils.db.Codec;
import org.apache.hadoop.hdds.utils.db.DelegatedCodec;
import org.apache.hadoop.hdds.utils.db.StringCodec;
@@ -163,15 +162,7 @@ public String toString() {
*/
public static TransactionInfo readTransactionInfo(
DBStoreHAManager metadataManager) throws IOException {
- return metadataManager.getTransactionInfoTable().getSkipCache(TRANSACTION_INFO_KEY);
- }
-
- public ByteString toByteString() throws IOException {
- return ByteString.copyFrom(getCodec().toPersistedFormat(this));
- }
-
- public static TransactionInfo fromByteString(ByteString byteString) throws IOException {
- return byteString == null ? null : getCodec().fromPersistedFormat(byteString.toByteArray());
+ return metadataManager.getTransactionInfoTable().get(TRANSACTION_INFO_KEY);
}
public SnapshotInfo toSnapshotInfo() {
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java
index 8623a3bdd7d..3e8ea30a652 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java
@@ -184,7 +184,7 @@ void move(KEY sourceKey, KEY destKey, VALUE value,
/**
* Get List of Index to Table Names.
* (For decoding table from column family index)
- * @return Map of Index -> TableName
+ * @return Map of Index -> TableName
*/
Map getTableNames();
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBCheckpointUtils.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBCheckpointUtils.java
index 015cd10b8b9..c47b176e93b 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBCheckpointUtils.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBCheckpointUtils.java
@@ -49,7 +49,7 @@ public static boolean waitForCheckpointDirectoryExist(File file,
final boolean success = RatisHelper.attemptUntilTrue(file::exists, POLL_INTERVAL_DURATION, maxWaitTimeout);
if (!success) {
LOG.info("Checkpoint directory: {} didn't get created in {} secs.",
- file.getAbsolutePath(), maxWaitTimeout.getSeconds());
+ maxWaitTimeout.getSeconds(), file.getAbsolutePath());
}
return success;
}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java
index c156b8e4d67..c441ec929c7 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java
@@ -841,7 +841,7 @@ private int getLastLevel() throws IOException {
/**
* Deletes sst files which do not correspond to prefix
* for given table.
- * @param prefixPairs a map of TableName to prefixUsed.
+ * @param prefixPairs, a map of TableName to prefixUsed.
*/
public void deleteFilesNotMatchingPrefix(Map prefixPairs)
throws IOException, RocksDBException {
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java
index c7055267052..c818c07b1ac 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java
@@ -24,7 +24,6 @@
import java.util.Iterator;
import java.util.List;
import java.util.Map;
-import java.util.Objects;
import org.apache.commons.lang3.NotImplementedException;
import org.apache.hadoop.hdds.annotation.InterfaceStability;
@@ -170,7 +169,7 @@ default VALUE getReadCopy(KEY key) throws IOException {
/**
* Returns a prefixed iterator for this metadata store.
* @param prefix
- * @return MetaStoreIterator
+ * @return
*/
TableIterator> iterator(KEY prefix)
throws IOException;
@@ -246,7 +245,7 @@ default TableCacheMetrics createCacheMetrics() throws IOException {
/**
* Returns a certain range of key value pairs as a list based on a
- * startKey or count. Further a {@link org.apache.hadoop.hdds.utils.MetadataKeyFilters.MetadataKeyFilter}
+ * startKey or count. Further a {@link MetadataKeyFilters.MetadataKeyFilter}
* can be added to * filter keys if necessary.
* To prevent race conditions while listing
* entries, this implementation takes a snapshot and lists the entries from
@@ -262,7 +261,7 @@ default TableCacheMetrics createCacheMetrics() throws IOException {
* the value for count must be an integer greater than 0.
*
* This method allows to specify one or more
- * {@link org.apache.hadoop.hdds.utils.MetadataKeyFilters.MetadataKeyFilter}
+ * {@link MetadataKeyFilters.MetadataKeyFilter}
* to filter keys by certain condition. Once given, only the entries
* whose key passes all the filters will be included in the result.
*
@@ -270,7 +269,7 @@ default TableCacheMetrics createCacheMetrics() throws IOException {
* @param count max number of entries to return.
* @param prefix fixed key schema specific prefix
* @param filters customized one or more
- * {@link org.apache.hadoop.hdds.utils.MetadataKeyFilters.MetadataKeyFilter}.
+ * {@link MetadataKeyFilters.MetadataKeyFilter}.
* @return a list of entries found in the database or an empty list if the
* startKey is invalid.
* @throws IOException if there are I/O errors.
@@ -293,7 +292,7 @@ List extends KeyValue> getRangeKVs(KEY startKey,
* @param count max number of entries to return.
* @param prefix fixed key schema specific prefix
* @param filters customized one or more
- * {@link org.apache.hadoop.hdds.utils.MetadataKeyFilters.MetadataKeyFilter}.
+ * {@link MetadataKeyFilters.MetadataKeyFilter}.
* @return a list of entries found in the database.
* @throws IOException
* @throws IllegalArgumentException
@@ -308,6 +307,7 @@ List extends KeyValue> getSequentialRangeKVs(KEY startKey,
* as part of a batch operation.
* @param batch
* @param prefix
+ * @return
*/
void deleteBatchWithPrefix(BatchOperation batch, KEY prefix)
throws IOException;
@@ -354,24 +354,6 @@ public V getValue() {
public String toString() {
return "(key=" + key + ", value=" + value + ")";
}
-
- @Override
- public boolean equals(Object obj) {
- if (!(obj instanceof KeyValue)) {
- return false;
- }
- KeyValue, ?> kv = (KeyValue, ?>) obj;
- try {
- return getKey().equals(kv.getKey()) && getValue().equals(kv.getValue());
- } catch (IOException e) {
- throw new RuntimeException(e);
- }
- }
-
- @Override
- public int hashCode() {
- return Objects.hash(getKey(), getValue());
- }
};
}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/audit/package-info.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/audit/package-info.java
index c428f2860ee..0c1ec710d2c 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/audit/package-info.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/audit/package-info.java
@@ -48,7 +48,7 @@
* This interface must be implemented by entities requiring audit logging.
* For example - OMVolumeArgs, OMBucketArgs.
* The implementing class must override toAuditMap() to return an
- * instance of {@code Map} where both Key and Value are String.
+ * instance of Map where both Key and Value are String.
*
* Key: must contain printable US ASCII characters
* May not contain a space, =, ], or "
diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.css b/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.css
index 389d9d78f21..e08e9c52060 100644
--- a/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.css
+++ b/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.css
@@ -91,7 +91,3 @@ body {
.om-roles-background {
background-color: #dcfbcd!important;
}
-
-.scm-roles-background {
- background-color: #dcfbcd!important;
-}
\ No newline at end of file
diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/jvm.html b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/jvm.html
index 9706ebdf6b3..c1f7d16aefa 100644
--- a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/jvm.html
+++ b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/jvm.html
@@ -21,6 +21,6 @@
Input arguments: |
- {{$ctrl.jmx.InputArguments.join('\n')}} |
+ {{$ctrl.jmx.InputArguments}} |
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClientTestImpl.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClientTestImpl.java
index 94ef86650c4..fa784b75538 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClientTestImpl.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClientTestImpl.java
@@ -48,7 +48,6 @@
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.client.ClientTrustManager;
import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
import org.apache.hadoop.hdds.security.SecurityConfig;
import org.apache.hadoop.hdds.security.ssl.ReloadingX509KeyManager;
@@ -258,6 +257,16 @@ public Set getAllCaCerts() {
return rootCerts;
}
+ @Override
+ public List getCAList() {
+ return null;
+ }
+
+ @Override
+ public List updateCAList() throws IOException {
+ return null;
+ }
+
public void renewRootCA() throws Exception {
LocalDateTime start = LocalDateTime.now();
Duration rootCACertDuration = securityConfig.getMaxCertificateDuration();
@@ -355,17 +364,6 @@ public ReloadingX509TrustManager getTrustManager() throws CertificateException {
}
}
- @Override
- public ClientTrustManager createClientTrustManager() throws IOException {
- CACertificateProvider caCertificateProvider = () -> {
- List caCerts = new ArrayList<>();
- caCerts.addAll(getAllCaCerts());
- caCerts.addAll(getAllRootCaCerts());
- return caCerts;
- };
- return new ClientTrustManager(caCertificateProvider, caCertificateProvider);
- }
-
@Override
public void registerNotificationReceiver(CertificateNotification receiver) {
synchronized (notificationReceivers) {
diff --git a/hadoop-hdds/interface-client/src/main/proto/hdds.proto b/hadoop-hdds/interface-client/src/main/proto/hdds.proto
index e25d85e1957..ee5c0d9cc5a 100644
--- a/hadoop-hdds/interface-client/src/main/proto/hdds.proto
+++ b/hadoop-hdds/interface-client/src/main/proto/hdds.proto
@@ -191,7 +191,6 @@ message DatanodeUsageInfoProto {
optional int64 containerCount = 5;
optional int64 committed = 6;
optional int64 freeSpaceToSpare = 7;
- optional int64 pipelineCount = 8;
}
/**
diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml
index 87d76158301..a863fe3ef5d 100644
--- a/hadoop-hdds/pom.xml
+++ b/hadoop-hdds/pom.xml
@@ -247,30 +247,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
-
-
- org.apache.maven.plugins
- maven-remote-resources-plugin
-
-
- org.apache.ozone:ozone-dev-support:${ozone.version}
-
-
-
-
- org.apache.ozone
- ozone-dev-support
- ${ozone.version}
-
-
-
-
-
- process
-
-
-
-
diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeLibraryLoader.java b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeLibraryLoader.java
index d93933dee36..ce424c930e1 100644
--- a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeLibraryLoader.java
+++ b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeLibraryLoader.java
@@ -36,8 +36,6 @@
import java.util.Optional;
import java.util.concurrent.ConcurrentHashMap;
-import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME;
-
/**
* Class to load Native Libraries.
*/
@@ -69,10 +67,6 @@ public static NativeLibraryLoader getInstance() {
return instance;
}
- public static String getJniLibraryFileName() {
- return appendLibOsSuffix("lib" + ROCKS_TOOLS_NATIVE_LIBRARY_NAME);
- }
-
public static String getJniLibraryFileName(String libraryName) {
return appendLibOsSuffix("lib" + libraryName);
}
@@ -105,12 +99,9 @@ private static String appendLibOsSuffix(String libraryFileName) {
return libraryFileName + getLibOsSuffix();
}
- public static boolean isLibraryLoaded() {
- return isLibraryLoaded(ROCKS_TOOLS_NATIVE_LIBRARY_NAME);
- }
-
public static boolean isLibraryLoaded(final String libraryName) {
- return getInstance().librariesLoaded.getOrDefault(libraryName, false);
+ return getInstance().librariesLoaded
+ .getOrDefault(libraryName, false);
}
public synchronized boolean loadLibrary(final String libraryName, final List dependentFiles) {
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/PlacementPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/PlacementPolicy.java
index 05eb32722e7..a792e2cea6b 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/PlacementPolicy.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/PlacementPolicy.java
@@ -73,7 +73,7 @@ ContainerPlacementStatus validateContainerPlacement(
* Given a set of replicas of a container which are
* neither over underreplicated nor overreplicated,
* return a set of replicas to copy to another node to fix misreplication.
- * @param replicas Map of replicas with value signifying if
+ * @param replicas: Map of replicas with value signifying if
* replica can be copied
*/
Set replicasToCopyToFixMisreplication(
@@ -82,8 +82,8 @@ Set replicasToCopyToFixMisreplication(
/**
* Given a set of replicas of a container which are overreplicated,
* return a set of replicas to delete to fix overreplication.
- * @param replicas Set of existing replicas of the container
- * @param expectedCountPerUniqueReplica Replication factor of each
+ * @param replicas: Set of existing replicas of the container
+ * @param expectedCountPerUniqueReplica: Replication factor of each
* unique replica
*/
Set replicasToRemoveToFixOverreplication(
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java
index 2a1c6fce0c0..471a9479412 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java
@@ -525,7 +525,7 @@ public boolean isValidNode(DatanodeDetails datanodeDetails,
* Given a set of replicas of a container which are
* neither over underreplicated nor overreplicated,
* return a set of replicas to copy to another node to fix misreplication.
- * @param replicas Map of replicas with value signifying if
+ * @param replicas: Map of replicas with value signifying if
* replica can be copied
*/
@Override
@@ -582,7 +582,7 @@ protected Node getPlacementGroup(DatanodeDetails dn) {
* replication is computed.
* The algorithm starts with creating a replicaIdMap which contains the
* replicas grouped by replica Index. A placementGroup Map is created which
- * groups replicas based on their rack and the replicas within the rack
+ * groups replicas based on their rack & the replicas within the rack
* are further grouped based on the replica Index.
* A placement Group Count Map is created which keeps
* track of the count of replicas in each rack.
@@ -590,13 +590,13 @@ protected Node getPlacementGroup(DatanodeDetails dn) {
* order based on their current replication factor in a descending factor.
* For each replica Index the replica is removed from the rack which contains
* the most replicas, in order to achieve this the racks are put
- * into priority queue and are based on the number of replicas they have.
- * The replica is removed from the rack with maximum replicas and the replica
- * to be removed is also removed from the maps created above and
+ * into priority queue & are based on the number of replicas they have.
+ * The replica is removed from the rack with maximum replicas & the replica
+ * to be removed is also removed from the maps created above &
* the count for rack is reduced.
* The set of replicas computed are then returned by the function.
- * @param replicas Set of existing replicas of the container
- * @param expectedCountPerUniqueReplica Replication factor of each
+ * @param replicas: Set of existing replicas of the container
+ * @param expectedCountPerUniqueReplica: Replication factor of each
* * unique replica
* @return Set of replicas to be removed are computed.
*/
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java
index 5ec68c78d74..45d53c0ef2c 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java
@@ -57,7 +57,7 @@ DatanodeDeletedBlockTransactions getTransactions(
* considered to be failed if it has been sent more than MAX_RETRY limit
* and its count is reset to -1.
*
- * @param count Maximum num of returned transactions, if < 0. return all.
+ * @param count Maximum num of returned transactions, if < 0. return all.
* @param startTxId The least transaction id to start with.
* @return a list of failed deleted block transactions.
* @throws IOException
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java
index 6b6a888f424..3eba240533e 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java
@@ -61,9 +61,9 @@ default List getContainers() {
* The max size of the searching range cannot exceed the
* value of count.
*
- * @param startID start containerID, >=0,
+ * @param startID start containerID, >=0,
* start searching at the head if 0.
- * @param count count must be >= 0
+ * @param count count must be >= 0
* Usually the count will be replace with a very big
* value instead of being unlimited in case the db is very big.
*
@@ -85,9 +85,9 @@ default List getContainers() {
* The max size of the searching range cannot exceed the
* value of count.
*
- * @param startID start containerID, >=0,
+ * @param startID start containerID, >=0,
* start searching at the head if 0.
- * @param count count must be >= 0
+ * @param count count must be >= 0
* Usually the count will be replace with a very big
* value instead of being unlimited in case the db is very big.
* @param state container state
@@ -164,6 +164,7 @@ void updateContainerReplica(ContainerID containerID, ContainerReplica replica)
*
* @param containerID Container ID
* @param replica ContainerReplica
+ * @return True of dataNode is removed successfully else false.
*/
void removeContainerReplica(ContainerID containerID, ContainerReplica replica)
throws ContainerNotFoundException, ContainerReplicaNotFoundException;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java
index 19a2f3c2e62..7fea44671ff 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java
@@ -158,14 +158,6 @@ public ContainerBalancerTask(StorageContainerManager scm,
this.selectedSources = new HashSet<>();
this.selectedTargets = new HashSet<>();
findSourceStrategy = new FindSourceGreedy(nodeManager);
- if (config.getNetworkTopologyEnable()) {
- findTargetStrategy = new FindTargetGreedyByNetworkTopology(
- containerManager, placementPolicyValidateProxy,
- nodeManager, networkTopology);
- } else {
- findTargetStrategy = new FindTargetGreedyByUsageInfo(containerManager,
- placementPolicyValidateProxy, nodeManager);
- }
this.iterationsStatistic = new ArrayList<>();
}
@@ -440,7 +432,14 @@ private boolean initializeIteration() {
this.maxDatanodesRatioToInvolvePerIteration =
config.getMaxDatanodesRatioToInvolvePerIteration();
this.maxSizeToMovePerIteration = config.getMaxSizeToMovePerIteration();
-
+ if (config.getNetworkTopologyEnable()) {
+ findTargetStrategy = new FindTargetGreedyByNetworkTopology(
+ containerManager, placementPolicyValidateProxy,
+ nodeManager, networkTopology);
+ } else {
+ findTargetStrategy = new FindTargetGreedyByUsageInfo(containerManager,
+ placementPolicyValidateProxy, nodeManager);
+ }
this.excludeNodes = config.getExcludeNodes();
this.includeNodes = config.getIncludeNodes();
// include/exclude nodes from balancing according to configs
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java
index 3d113b3d301..094e535dcbd 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java
@@ -42,7 +42,8 @@ public SCMNodeMetric(SCMNodeStat stat) {
* @param capacity in bytes
* @param used in bytes
* @param remaining in bytes
- * @param committed in bytes
+ * @param committed
+ * @paaram committed in bytes
*/
@VisibleForTesting
public SCMNodeMetric(long capacity, long used, long remaining,
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerHealthResult.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerHealthResult.java
index fcfef7de6e6..0abe8f6ea34 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerHealthResult.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerHealthResult.java
@@ -248,6 +248,7 @@ public void setOfflineIndexesOkAfterPending(boolean val) {
/**
* Returns true if a container has under-replication caused by offline
* indexes, but it is corrected by a pending add.
+ * @return
*/
public boolean offlineIndexesOkAfterPending() {
return offlineIndexesOkAfterPending;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerReplicaPendingOps.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerReplicaPendingOps.java
index 4eef0a8a744..d1890bdf802 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerReplicaPendingOps.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerReplicaPendingOps.java
@@ -116,7 +116,7 @@ public List getPendingOps(ContainerID containerID) {
* Store a ContainerReplicaOp to add a replica for the given ContainerID.
* @param containerID ContainerID for which to add a replica
* @param target The target datanode
- * @param replicaIndex The replica index (zero for Ratis, > 0 for EC)
+ * @param replicaIndex The replica index (zero for Ratis, > 0 for EC)
* @param deadlineEpochMillis The time by which the replica should have been
* added and reported by the datanode, or it will
* be discarded.
@@ -130,7 +130,7 @@ public void scheduleAddReplica(ContainerID containerID,
* Store a ContainerReplicaOp to delete a replica for the given ContainerID.
* @param containerID ContainerID for which to delete a replica
* @param target The target datanode
- * @param replicaIndex The replica index (zero for Ratis, > 0 for EC)
+ * @param replicaIndex The replica index (zero for Ratis, > 0 for EC)
* @param deadlineEpochMillis The time by which the replica should have been
* deleted and reported by the datanode, or it will
* be discarded.
@@ -145,7 +145,7 @@ public void scheduleDeleteReplica(ContainerID containerID,
* been replicated successfully.
* @param containerID ContainerID for which to complete the replication
* @param target The target Datanode
- * @param replicaIndex The replica index (zero for Ratis, > 0 for EC)
+ * @param replicaIndex The replica index (zero for Ratis, > 0 for EC)
* @return True if a pending replica was found and removed, false otherwise.
*/
public boolean completeAddReplica(ContainerID containerID,
@@ -167,7 +167,7 @@ public boolean completeAddReplica(ContainerID containerID,
* been deleted successfully.
* @param containerID ContainerID for which to complete the deletion
* @param target The target Datanode
- * @param replicaIndex The replica index (zero for Ratis, > 0 for EC)
+ * @param replicaIndex The replica index (zero for Ratis, > 0 for EC)
* @return True if a pending replica was found and removed, false otherwise.
*/
public boolean completeDeleteReplica(ContainerID containerID,
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisContainerReplicaCount.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisContainerReplicaCount.java
index 4e14798ccdc..fe771fac6a4 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisContainerReplicaCount.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisContainerReplicaCount.java
@@ -186,9 +186,9 @@ private void countReplicas() {
* For example, consider a CLOSED container with the following replicas:
* {CLOSED, CLOSING, OPEN, UNHEALTHY}
* In this case, healthy replica count equals 3. Calculation:
- * 1 CLOSED -> 1 matching replica.
- * 1 OPEN, 1 CLOSING -> 2 mismatched replicas.
- * 1 UNHEALTHY -> 1 unhealthy replica. Not counted as healthy.
+ * 1 CLOSED -> 1 matching replica.
+ * 1 OPEN, 1 CLOSING -> 2 mismatched replicas.
+ * 1 UNHEALTHY -> 1 unhealthy replica. Not counted as healthy.
* Total healthy replicas = 3 = 1 matching + 2 mismatched replicas
*/
public int getHealthyReplicaCount() {
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/HealthCheck.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/HealthCheck.java
index f271b8a863c..a95c0d39945 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/HealthCheck.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/HealthCheck.java
@@ -49,6 +49,7 @@ public interface HealthCheck {
* returns false. This allows handlers to be chained together, and each will
* be tried in turn until one succeeds.
* @param handler
+ * @return
*/
HealthCheck addNext(HealthCheck handler);
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java
index 1289a0a21ff..c6f15be5d2c 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java
@@ -37,10 +37,10 @@
*
* Currently we manage the following attributes for a container.
*
- * 1. StateMap - LifeCycleState -> Set of ContainerIDs
- * 2. TypeMap - ReplicationType -> Set of ContainerIDs
- * 3. OwnerMap - OwnerNames -> Set of ContainerIDs
- * 4. FactorMap - ReplicationFactor -> Set of ContainerIDs
+ * 1. StateMap - LifeCycleState -> Set of ContainerIDs
+ * 2. TypeMap - ReplicationType -> Set of ContainerIDs
+ * 3. OwnerMap - OwnerNames -> Set of ContainerIDs
+ * 4. FactorMap - ReplicationFactor -> Set of ContainerIDs
*
* This means that for a cluster size of 750 PB -- we will have around 150
* Million containers, if we assume 5GB average container size.
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/HASecurityUtils.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/HASecurityUtils.java
index 5eeb489f677..f0d78b23079 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/HASecurityUtils.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/HASecurityUtils.java
@@ -58,7 +58,7 @@
/**
* Utilities for SCM HA security.
*/
-public final class HASecurityUtils {
+public final class HASecurityUtils {
private HASecurityUtils() {
}
@@ -150,6 +150,7 @@ public static CertificateServer initializeRootCertificateServer(
*
* @param conf
* @param certificateClient
+ * @return
*/
public static GrpcTlsConfig createSCMRatisTLSConfig(SecurityConfig conf,
CertificateClient certificateClient) throws IOException {
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java
index 92a5140ff2a..03f6ae293b2 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java
@@ -55,6 +55,7 @@ public interface SCMHAManager extends AutoCloseable {
/**
* Returns the DBTransactionBuffer as SCMHADBTransactionBuffer if its
* valid.
+ * @return
*/
SCMHADBTransactionBuffer asSCMHADBTransactionBuffer();
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java
index b3350d8a12a..05ed833edbe 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java
@@ -346,7 +346,7 @@ public void setCommandCounts(CommandQueueReportProto cmds,
* Retrieve the number of queued commands of the given type, as reported by
* the datanode at the last heartbeat.
* @param cmd The command for which to receive the queued command count
- * @return -1 if we have no information about the count, or an integer >= 0
+ * @return -1 if we have no information about the count, or an integer >= 0
* indicating the command count at the last heartbeat.
*/
public int getCommandCount(SCMCommandProto.Type cmd) {
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeUsageInfo.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeUsageInfo.java
index 1cafab3f67c..4f7df496906 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeUsageInfo.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeUsageInfo.java
@@ -32,7 +32,6 @@ public class DatanodeUsageInfo {
private DatanodeDetails datanodeDetails;
private SCMNodeStat scmNodeStat;
private int containerCount;
- private int pipelineCount;
/**
* Constructs a DatanodeUsageInfo with DatanodeDetails and SCMNodeStat.
@@ -46,7 +45,6 @@ public DatanodeUsageInfo(
this.datanodeDetails = datanodeDetails;
this.scmNodeStat = scmNodeStat;
this.containerCount = -1;
- this.pipelineCount = -1;
}
/**
@@ -147,14 +145,6 @@ public void setContainerCount(int containerCount) {
this.containerCount = containerCount;
}
- public int getPipelineCount() {
- return pipelineCount;
- }
-
- public void setPipelineCount(int pipelineCount) {
- this.pipelineCount = pipelineCount;
- }
-
/**
* Gets Comparator that compares two DatanodeUsageInfo on the basis of
* their utilization values. Utilization is (capacity - remaining) divided
@@ -220,7 +210,6 @@ private DatanodeUsageInfoProto.Builder toProtoBuilder(int clientVersion) {
}
builder.setContainerCount(containerCount);
- builder.setPipelineCount(pipelineCount);
return builder;
}
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
index 992dc82582b..25be60945a9 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
@@ -97,6 +97,8 @@ default RegisteredCommand register(
* @param type The type of the SCMCommand.
* @param scmCommand A BiConsumer that takes a DatanodeDetails and a
* SCMCommand object and performs the necessary actions.
+ * @return whatever the regular register command returns with default
+ * layout version passed in.
*/
default void registerSendCommandNotify(SCMCommandProto.Type type,
BiConsumer> scmCommand) {
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManagerMXBean.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManagerMXBean.java
index 1bd9677a363..a66fc0d22fb 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManagerMXBean.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManagerMXBean.java
@@ -45,7 +45,7 @@ public interface NodeManagerMXBean {
/**
* @return Get the NodeStatus table information like hostname,
- * Commissioned State and Operational State column for dataNode
+ * Commissioned State & Operational State column for dataNode
*/
Map> getNodeStatusInfo();
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java
index 78c1801a103..3c3ff8fb833 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java
@@ -791,7 +791,7 @@ public void run() {
*
* This method is synchronized to coordinate node state updates between
* the upgrade finalization thread which calls this method, and the
- * node health processing thread that calls {@link #checkNodesHealth}.
+ * node health processing thread that calls {@link this#checkNodesHealth}.
*/
public synchronized void forceNodesToHealthyReadOnly() {
try {
@@ -817,7 +817,7 @@ public synchronized void forceNodesToHealthyReadOnly() {
/**
* This method is synchronized to coordinate node state updates between
* the upgrade finalization thread which calls
- * {@link #forceNodesToHealthyReadOnly}, and the node health processing
+ * {@link this#forceNodesToHealthyReadOnly}, and the node health processing
* thread that calls this method.
*/
@VisibleForTesting
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
index 7db0c88e173..05a68628852 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
@@ -146,8 +146,6 @@ public class SCMNodeManager implements NodeManager {
private static final String LASTHEARTBEAT = "LASTHEARTBEAT";
private static final String USEDSPACEPERCENT = "USEDSPACEPERCENT";
private static final String TOTALCAPACITY = "CAPACITY";
- private static final String DNUUID = "UUID";
- private static final String VERSION = "VERSION";
/**
* Constructs SCM machine Manager.
*/
@@ -449,11 +447,6 @@ public RegisteredCommand register(
processNodeReport(datanodeDetails, nodeReport);
LOG.info("Updated datanode to: {}", dn);
scmNodeEventPublisher.fireEvent(SCMEvents.NODE_ADDRESS_UPDATE, dn);
- } else if (isVersionChange(oldNode.getVersion(), datanodeDetails.getVersion())) {
- LOG.info("Update the version for registered datanode = {}, " +
- "oldVersion = {}, newVersion = {}.",
- datanodeDetails.getUuid(), oldNode.getVersion(), datanodeDetails.getVersion());
- nodeStateManager.updateNode(datanodeDetails, layoutInfo);
}
} catch (NodeNotFoundException e) {
LOG.error("Cannot find datanode {} from nodeStateManager",
@@ -515,18 +508,6 @@ private boolean updateDnsToUuidMap(
return ipChanged || hostNameChanged;
}
- /**
- * Check if the version has been updated.
- *
- * @param oldVersion datanode oldVersion
- * @param newVersion datanode newVersion
- * @return true means replacement is needed, while false means replacement is not needed.
- */
- private boolean isVersionChange(String oldVersion, String newVersion) {
- final boolean versionChanged = !Objects.equals(oldVersion, newVersion);
- return versionChanged;
- }
-
/**
* Send heartbeat to indicate the datanode is alive and doing well.
*
@@ -1001,7 +982,6 @@ public DatanodeUsageInfo getUsageInfo(DatanodeDetails dn) {
DatanodeUsageInfo usageInfo = new DatanodeUsageInfo(dn, stat);
try {
usageInfo.setContainerCount(getContainerCount(dn));
- usageInfo.setPipelineCount(getPipeLineCount(dn));
} catch (NodeNotFoundException ex) {
LOG.error("Unknown datanode {}.", dn, ex);
}
@@ -1155,8 +1135,6 @@ public Map> getNodeStatusInfo() {
String nonScmUsedPerc = storagePercentage[1];
map.put(USEDSPACEPERCENT,
"Ozone: " + scmUsedPerc + "%, other: " + nonScmUsedPerc + "%");
- map.put(DNUUID, dni.getUuidString());
- map.put(VERSION, dni.getVersion());
nodes.put(hostName, map);
}
return nodes;
@@ -1166,6 +1144,7 @@ public Map> getNodeStatusInfo() {
* Calculate the storage capacity of the DataNode node.
* @param storageReports Calculate the storage capacity corresponding
* to the storage collection.
+ * @return
*/
public static String calculateStorageCapacity(
List storageReports) {
@@ -1213,6 +1192,7 @@ private static String convertUnit(double value) {
* Calculate the storage usage percentage of a DataNode node.
* @param storageReports Calculate the storage percentage corresponding
* to the storage collection.
+ * @return
*/
public static String[] calculateStoragePercentage(
List storageReports) {
@@ -1630,11 +1610,6 @@ public int getContainerCount(DatanodeDetails datanodeDetails)
return nodeStateManager.getContainerCount(datanodeDetails.getUuid());
}
- public int getPipeLineCount(DatanodeDetails datanodeDetails)
- throws NodeNotFoundException {
- return nodeStateManager.getPipelinesCount(datanodeDetails);
- }
-
@Override
public void addDatanodeCommand(UUID dnId, SCMCommand command) {
writeLock().lock();
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java
index d6058877126..4dd0443a505 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java
@@ -46,6 +46,7 @@ void addPipeline(HddsProtos.Pipeline pipelineProto)
/**
* Removing pipeline would be replicated to Ratis.
* @param pipelineIDProto
+ * @return Pipeline removed
* @throws IOException
*/
@Replicate
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/RootCARotationManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/RootCARotationManager.java
index fc7249462c4..d38a904d09c 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/RootCARotationManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/RootCARotationManager.java
@@ -118,8 +118,6 @@ public class RootCARotationManager extends StatefulService {
*
* @param scm the storage container manager
*
- *
- * {@code
* (1) (3)(4)
* --------------------------->
* (2) scm2(Follower)
@@ -132,8 +130,8 @@ public class RootCARotationManager extends StatefulService {
* --------------------------->
* (2) scm3(Follower)
* <---------------------------
- * }
- *
+ *
+ *
* (1) Rotation Prepare
* (2) Rotation Prepare Ack
* (3) Rotation Commit
@@ -188,7 +186,7 @@ public void notifyStatusChanged() {
waitAckTask.cancel(true);
}
if (waitAckTimeoutTask != null) {
- waitAckTimeoutTask.cancel(true);
+ waitAckTask.cancel(true);
}
if (clearPostProcessingTask != null) {
clearPostProcessingTask.cancel(true);
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
index 6f5429a853b..cca2df00374 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
@@ -289,12 +289,12 @@ public interface ContainerReport {
public enum ContainerReportType {
/**
* Incremental container report type
- * {@link IncrementalContainerReportFromDatanode}.
+ * {@liks IncrementalContainerReportFromDatanode}.
*/
ICR,
/**
* Full container report type
- * {@link ContainerReportFromDatanode}.
+ * {@liks ContainerReportFromDatanode}.
*/
FCR
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
index 7c6f0fbbddf..e74a83e394f 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
@@ -491,6 +491,7 @@ private static String flatten(String input) {
/**
* Get Key associated with Datanode address for this server.
+ * @return
*/
protected String getDatanodeAddressKey() {
return this.scm.getScmNodeDetails().getDatanodeAddressKey();
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMHTTPServerConfig.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMHTTPServerConfig.java
index 5aaf4b7b485..2b6fa032b53 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMHTTPServerConfig.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMHTTPServerConfig.java
@@ -70,10 +70,9 @@ public String getKerberosKeytab() {
* This static class is required to support other classes
* that reference the key names and also require attributes.
* Example: SCMSecurityProtocol where the KerberosInfo references
- * the old configuration with the annotation shown below:
- *
- * {@code KerberosInfo(serverPrincipal =
- * ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY)}
+ * the old configuration with the annotation shown below:-
+ * @KerberosInfo(serverPrincipal =
+ * ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY)
*/
public static class ConfigStrings {
public static final String HDDS_SCM_HTTP_AUTH_CONFIG_PREFIX =
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java
index 75a5193116c..de609356b22 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.hdds.scm.server;
-import java.util.List;
import java.util.Map;
import org.apache.hadoop.hdds.annotation.InterfaceAudience;
@@ -73,7 +72,7 @@ public interface SCMMXBean extends ServiceRuntimeInfo {
String getClusterId();
- List> getScmRatisRoles();
+ String getScmRatisRoles();
/**
* Primordial node is the node on which scm init operation is performed.
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java
index 17318107e3d..88b3c887746 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java
@@ -430,6 +430,7 @@ public String getCACertificate() throws IOException {
* @param role - node role: OM/SCM/DN.
* @param startSerialId - start certificate serial id.
* @param count - max number of certificates returned in a batch.
+ * @return
* @throws IOException
*/
@Override
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index 8f7a7c2f9f1..876c499113d 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -172,7 +172,6 @@
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.ratis.protocol.RaftPeerId;
-import org.apache.ratis.server.RaftServer;
import org.apache.ratis.util.ExitUtils;
import org.apache.ratis.util.JvmPauseMonitor;
import org.slf4j.Logger;
@@ -614,8 +613,7 @@ public OzoneConfiguration getConfiguration() {
* @param conf HDDS configuration
* @param configurator SCM configurator
* @return SCM instance
- * @throws IOException on Failure,
- * @throws AuthenticationException
+ * @throws IOException, AuthenticationException
*/
public static StorageContainerManager createSCM(
OzoneConfiguration conf, SCMConfigurator configurator)
@@ -628,8 +626,7 @@ public static StorageContainerManager createSCM(
*
* @param conf HDDS configuration
* @return SCM instance
- * @throws IOException on Failure,
- * @throws AuthenticationException
+ * @throws IOException, AuthenticationException
*/
public static StorageContainerManager createSCM(OzoneConfiguration conf)
throws IOException, AuthenticationException {
@@ -1621,7 +1618,8 @@ private void persistSCMCertificates() throws IOException {
if (primaryScmNodeId != null && !primaryScmNodeId.equals(
scmStorageConfig.getScmId())) {
List pemEncodedCerts =
- getScmSecurityClientWithMaxRetry(configuration, getCurrentUser()).listCACertificate();
+ scmCertificateClient.listCA();
+
// Write the primary SCM CA and Root CA during startup.
for (String cert : pemEncodedCerts) {
X509Certificate x509Certificate = CertificateCodec.getX509Certificate(
@@ -2139,54 +2137,10 @@ public ContainerTokenGenerator getContainerTokenGenerator() {
}
@Override
- public List> getScmRatisRoles() {
+ public String getScmRatisRoles() {
final SCMRatisServer server = getScmHAManager().getRatisServer();
-
- // If Ratis is disabled
- if (server == null) {
- return getRatisRolesException("Ratis is disabled");
- }
-
- // To attempt to find the SCM Leader,
- // and if the Leader is not found
- // return Leader is not found message.
- RaftServer.Division division = server.getDivision();
- RaftPeerId leaderId = division.getInfo().getLeaderId();
- if (leaderId == null) {
- return getRatisRolesException("No leader found");
- }
-
- // If the SCMRatisServer is stopped, return a service stopped message.
- if (server.isStopped()) {
- return getRatisRolesException("Server is shutting down");
- }
-
- // Attempt to retrieve role information.
- try {
- List ratisRoles = server.getRatisRoles();
- List> result = new ArrayList<>();
- for (String role : ratisRoles) {
- String[] roleArr = role.split(":");
- List scmInfo = new ArrayList<>();
- // Host Name
- scmInfo.add(roleArr[0]);
- // Node ID
- scmInfo.add(roleArr[3]);
- // Ratis Port
- scmInfo.add(roleArr[1]);
- // Role
- scmInfo.add(roleArr[2]);
- result.add(scmInfo);
- }
- return result;
- } catch (Exception e) {
- LOG.error("Failed to getRatisRoles.", e);
- return getRatisRolesException("Exception Occurred, " + e.getMessage());
- }
- }
-
- private static List> getRatisRolesException(String exceptionString) {
- return Collections.singletonList(Collections.singletonList(exceptionString));
+ return server != null ?
+ HddsUtils.format(server.getRatisRoles()) : "STANDALONE";
}
/**
diff --git a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html
index 2748716e67f..3f825d4e25f 100644
--- a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html
+++ b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html
@@ -110,114 +110,6 @@ Space Statistics
-Pipeline Statistics
-
-
-
- Pipeline State |
- Size |
-
-
- Closed |
- {{statistics.pipelines.closed}} |
-
-
- Allocated |
- {{statistics.pipelines.allocated}} |
-
-
- Open |
- {{statistics.pipelines.open}} |
-
-
- Dormant |
- {{statistics.pipelines.dormant}} |
-
-
-
-
-Container Statistics
-
-
-
- Operational State |
- Size |
-
-
- Open |
- {{statistics.containers.lifecycle.open}} |
-
-
- Closing |
- {{statistics.containers.lifecycle.closing}} |
-
-
- Quasi Closed |
- {{statistics.containers.lifecycle.quasi_closed}} |
-
-
- Closed |
- {{statistics.containers.lifecycle.closed}} |
-
-
- Deleting |
- {{statistics.containers.lifecycle.deleting}} |
-
-
- Deleted |
- {{statistics.containers.lifecycle.deleted}} |
-
-
- Recovering |
- {{statistics.containers.lifecycle.recovering}} |
-
-
-
-
-
-
- Health |
- Size |
-
-
- Under Replicated |
- {{statistics.containers.health.under_replicated}} |
-
-
- Mis Replicated |
- {{statistics.containers.health.mis_replicated}} |
-
-
- Over Replicated |
- {{statistics.containers.health.over_replicated}} |
-
-
- Missing |
- {{statistics.containers.health.missing}} |
-
-
- Unhealthy |
- {{statistics.containers.health.unhealthy}} |
-
-
- Empty |
- {{statistics.containers.health.empty}} |
-
-
- Open Unhealthy |
- {{statistics.containers.health.open_unhealthy}} |
-
-
- Quasi Closed Stuck |
- {{statistics.containers.health.quasi_closed_stuck}} |
-
-
- Open Without Pipeline |
- {{statistics.containers.health.open_without_pipeline}} |
-
-
-
-
Node Status
@@ -248,10 +140,6 @@
Node Status
'sortdesc':(columnName == 'comstate' && !reverse)}">Commisioned State
Last Heartbeat |
-
UUID |
-
Version |
@@ -269,8 +157,6 @@ Node Status
{{typestat.opstate}} |
{{typestat.comstate}} |
{{typestat.lastheartbeat}} |
- {{typestat.uuid}} |
- {{typestat.version}} |
@@ -324,6 +210,10 @@
Status
Force Exit Safe Mode |
{{$ctrl.overview.jmx.SafeModeExitForceful}} |
+
+ SCM Roles (HA) |
+ {{$ctrl.overview.jmx.ScmRatisRoles}} |
+
Primordial Node (HA) |
{{$ctrl.overview.jmx.PrimordialNode}} |
@@ -345,35 +235,6 @@ Meta-Data Volume Information
-SCM Roles (HA)
-{{$ctrl.overview.jmx.ScmRatisRoles[0][0]}}
-
-
-
-
- Host Name |
- Node ID |
- Ratis Port |
- Role |
-
-
-
-
- {{roles[0]}} |
- {{roles[1]}} |
- {{roles[2]}} |
- {{roles[3]}} |
-
-
- {{roles[0]}} |
- {{roles[1]}} |
- {{roles[2]}} |
- {{roles[3]}} |
-
-
-
-
-
Safemode rules statuses
diff --git a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js
index fc216c06862..6fac6849530 100644
--- a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js
+++ b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js
@@ -53,42 +53,9 @@
remaining : "N/A",
nonscmused : "N/A"
}
- },
- pipelines : {
- closed : "N/A",
- allocated : "N/A",
- open : "N/A",
- dormant : "N/A"
- },
- containers : {
- lifecycle : {
- open : "N/A",
- closing : "N/A",
- quasi_closed : "N/A",
- closed : "N/A",
- deleting : "N/A",
- deleted : "N/A",
- recovering : "N/A"
- },
- health : {
- under_replicated : "N/A",
- mis_replicated : "N/A",
- over_replicated : "N/A",
- missing : "N/A",
- unhealthy : "N/A",
- empty : "N/A",
- open_unhealthy : "N/A",
- quasi_closed_stuck : "N/A",
- open_without_pipeline : "N/A"
- }
}
}
- $http.get("jmx?qry=Ratis:service=RaftServer,group=*,id=*")
- .then(function (result) {
- ctrl.role = result.data.beans[0];
- });
-
function get_protocol(URLScheme, value, baseProto, fallbackProto) {
let protocol = "unknown"
let port = -1;
@@ -128,8 +95,6 @@
capacity: value && value.find((element) => element.key === "CAPACITY").value,
comstate: value && value.find((element) => element.key === "COMSTATE").value,
lastheartbeat: value && value.find((element) => element.key === "LASTHEARTBEAT").value,
- uuid: value && value.find((element) => element.key === "UUID").value,
- version: value && value.find((element) => element.key === "VERSION").value,
port: portSpec.port,
protocol: portSpec.proto
}
@@ -170,46 +135,6 @@
}
});
});
-
- $http.get("jmx?qry=Hadoop:service=SCMPipelineManager,name=SCMPipelineManagerInfo")
- .then(function (result) {
- const URLScheme = location.protocol.replace(":" , "");
- ctrl.scmpipelinemanager = result.data.beans[0];
- ctrl.scmpipelinemanager.PipelineInfo.forEach(({key, value}) => {
- if(key == "CLOSED") {
- $scope.statistics.pipelines.closed = value;
- } else if(key == "ALLOCATED") {
- $scope.statistics.pipelines.allocated = value;
- } else if(key == "OPEN") {
- $scope.statistics.pipelines.open = value;
- } else if(key == "DORMANT") {
- $scope.statistics.pipelines.dormant = value;
- }
- });
- });
-
- $http.get("jmx?qry=Hadoop:service=StorageContainerManager,name=ReplicationManagerMetrics")
- .then(function (result) {
- const URLScheme = location.protocol.replace(":" , "");
- ctrl.scmcontainermanager = result.data.beans[0];
- $scope.statistics.containers.lifecycle.open = ctrl.scmcontainermanager.OpenContainers;
- $scope.statistics.containers.lifecycle.closing = ctrl.scmcontainermanager.ClosingContainers;
- $scope.statistics.containers.lifecycle.quasi_closed = ctrl.scmcontainermanager.QuasiClosedContainers;
- $scope.statistics.containers.lifecycle.closed = ctrl.scmcontainermanager.ClosedContainers;
- $scope.statistics.containers.lifecycle.deleting = ctrl.scmcontainermanager.DeletingContainers;
- $scope.statistics.containers.lifecycle.deleted = ctrl.scmcontainermanager.DeletedContainers;
- $scope.statistics.containers.lifecycle.recovering = ctrl.scmcontainermanager.RecoveringContainers;
- $scope.statistics.containers.health.under_replicated = ctrl.scmcontainermanager.UnderReplicatedContainers;
- $scope.statistics.containers.health.mis_replicated = ctrl.scmcontainermanager.MisReplicatedContainers;
- $scope.statistics.containers.health.over_replicated = ctrl.scmcontainermanager.OverReplicatedContainers;
- $scope.statistics.containers.health.missing = ctrl.scmcontainermanager.MissingContainers;
- $scope.statistics.containers.health.unhealthy = ctrl.scmcontainermanager.UnhealthyContainers;
- $scope.statistics.containers.health.empty = ctrl.scmcontainermanager.EmptyContainers;
- $scope.statistics.containers.health.open_unhealthy = ctrl.scmcontainermanager.OpenUnhealthyContainers;
- $scope.statistics.containers.health.quasi_closed_stuck = ctrl.scmcontainermanager.StuckQuasiClosedContainers;
- $scope.statistics.containers.health.open_without_pipeline = ctrl.scmcontainermanager.OpenContainersWithoutPipeline;
- });
-
/*if option is 'All' display all records else display specified record on page*/
$scope.UpdateRecordsToShow = () => {
if($scope.RecordsToDisplay == 'All') {
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/MockedSCM.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/MockedSCM.java
index 0972e57df64..a3ec55d5863 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/MockedSCM.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/MockedSCM.java
@@ -86,7 +86,7 @@ public MockedSCM(@Nonnull TestableCluster testableCluster) {
}
}
- void init(@Nonnull ContainerBalancerConfiguration balancerConfig, @Nonnull OzoneConfiguration ozoneCfg) {
+ private void init(@Nonnull ContainerBalancerConfiguration balancerConfig, @Nonnull OzoneConfiguration ozoneCfg) {
ozoneCfg.setFromObject(balancerConfig);
try {
doMock(balancerConfig, ozoneCfg);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerStatusInfo.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerStatusInfo.java
index 48b3ee2d0de..b8ac648e844 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerStatusInfo.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerStatusInfo.java
@@ -19,9 +19,7 @@
package org.apache.hadoop.hdds.scm.container.balancer;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
import org.apache.hadoop.ozone.OzoneConsts;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import java.util.List;
@@ -57,30 +55,4 @@ void testGetIterationStatistics() {
});
}
-
- /**
- * @see HDDS-11350
- */
- @Test
- void testGetCurrentIterationsStatisticDoesNotThrowNullPointerExceptionWhenBalancingThreadIsSleeping() {
- MockedSCM mockedScm = new MockedSCM(new TestableCluster(10, OzoneConsts.GB));
- OzoneConfiguration ozoneConfig = new OzoneConfiguration();
- ContainerBalancerConfiguration config = ozoneConfig.getObject(ContainerBalancerConfiguration.class);
-
- config.setIterations(2);
- // the following config makes the balancing thread go to sleep while waiting for DU to be triggered in DNs and
- // updated storage reports to arrive via DN heartbeats - of course, this is a unit test and NodeManager, DNs etc.
- // are all mocked
- config.setTriggerDuEnable(true);
- mockedScm.init(config, ozoneConfig);
-
- // run ContainerBalancerTask in a new thread and have the current thread call getCurrentIterationsStatistic
- StorageContainerManager scm = mockedScm.getStorageContainerManager();
- ContainerBalancer cb = new ContainerBalancer(scm);
- ContainerBalancerTask task = new ContainerBalancerTask(scm, 0, cb, cb.getMetrics(), config, false);
- Thread thread = new Thread(task);
- thread.setDaemon(true);
- thread.start();
- Assertions.assertDoesNotThrow(task::getCurrentIterationsStatistic);
- }
}
diff --git a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java
index 9a3a5c7a8f1..c9fa668445d 100644
--- a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java
+++ b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java
@@ -335,11 +335,11 @@ private static long monotonicNow() {
*
*
* TODO: Add lambda support once Java 8 is common.
- * {@code
+ *
* SystemErrCapturer.withCapture(capture -> {
* ...
* })
- * }
+ *
*/
public static class SystemErrCapturer implements AutoCloseable {
private final ByteArrayOutputStream bytes;
@@ -376,11 +376,11 @@ public void close() throws Exception {
*
*
* TODO: Add lambda support once Java 8 is common.
- * {@code
+ *
* SystemOutCapturer.withCapture(capture -> {
* ...
* })
- * }
+ *
*/
public static class SystemOutCapturer implements AutoCloseable {
private final ByteArrayOutputStream bytes;
@@ -475,8 +475,8 @@ public static final class ReflectionUtils {
* This method provides the modifiers field using reflection approach which is compatible
* for both pre Java 9 and post java 9 versions.
* @return modifiers field
- * @throws IllegalAccessException illegalAccessException,
- * @throws NoSuchFieldException noSuchFieldException.
+ * @throws IllegalAccessException
+ * @throws NoSuchFieldException
*/
public static Field getModifiersField() throws IllegalAccessException, NoSuchFieldException {
Field modifiersField = null;
diff --git a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/LambdaTestUtils.java b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/LambdaTestUtils.java
index d6b028c815f..661989dade1 100644
--- a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/LambdaTestUtils.java
+++ b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/LambdaTestUtils.java
@@ -77,13 +77,11 @@ public interface TimeoutHandler {
* is called. This returns the exception passed in (if any),
* or generates a new one.
*
- * {@code
* await(
* 30 * 1000,
* () -> { return 0 == filesystem.listFiles(new Path("/")).length); },
* () -> 500),
* (timeout, ex) -> ex != null ? ex : new TimeoutException("timeout"));
- * }
*
*
* @param timeoutMillis timeout in milliseconds.
@@ -162,11 +160,9 @@ public static int await(int timeoutMillis,
*
* Example: await for probe to succeed:
*
- * {@code
* await(
* 30 * 1000, 500,
* () -> { return 0 == filesystem.listFiles(new Path("/")).length); });
- * }
*
*
* @param timeoutMillis timeout in milliseconds.
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java
index c3e379a5399..98d8bb0d83e 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java
@@ -116,7 +116,7 @@ private XceiverClientManager newXCeiverClientManager(ConfigurationSource conf)
throws IOException {
XceiverClientManager manager;
if (OzoneSecurityUtil.isSecurityEnabled(conf)) {
- CACertificateProvider caCerts = () -> HAUtils.buildCAX509List(conf);
+ CACertificateProvider caCerts = () -> HAUtils.buildCAX509List(null, conf);
manager = new XceiverClientManager(conf,
conf.getObject(XceiverClientManager.ScmClientConfig.class),
new ClientTrustManager(caCerts, null));
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java
index 2c069291a86..b967fa0658c 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java
@@ -155,8 +155,6 @@ private void printInfo(DatanodeUsage info) {
+ " B", StringUtils.byteDesc(info.getRemaining()));
System.out.printf("%-13s: %s %n", "Remaining %",
PERCENT_FORMAT.format(info.getRemainingRatio()));
- System.out.printf("%-13s: %d %n", "Pipeline(s)",
- info.getPipelineCount());
System.out.printf("%-13s: %d %n", "Container(s)",
info.getContainerCount());
System.out.printf("%-24s: %s (%s) %n", "Container Pre-allocated",
@@ -194,7 +192,6 @@ private static class DatanodeUsage {
private long committed = 0;
private long freeSpaceToSpare = 0;
private long containerCount = 0;
- private long pipelineCount = 0;
DatanodeUsage(HddsProtos.DatanodeUsageInfoProto proto) {
if (proto.hasNode()) {
@@ -215,9 +212,6 @@ private static class DatanodeUsage {
if (proto.hasContainerCount()) {
containerCount = proto.getContainerCount();
}
- if (proto.hasPipelineCount()) {
- pipelineCount = proto.getPipelineCount();
- }
if (proto.hasFreeSpaceToSpare()) {
freeSpaceToSpare = proto.getFreeSpaceToSpare();
}
@@ -283,8 +277,5 @@ public double getRemainingRatio() {
return remaining / (double) capacity;
}
- public long getPipelineCount() {
- return pipelineCount;
- }
}
}
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ClosePipelineSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ClosePipelineSubcommand.java
index e5392ef618d..7c70456995b 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ClosePipelineSubcommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ClosePipelineSubcommand.java
@@ -59,22 +59,12 @@ public void execute(ScmClient scmClient) throws IOException {
List pipelineList = new ArrayList<>();
Predicate super Pipeline> predicate = replicationFilter.orElse(null);
- List pipelines = scmClient.listPipelines();
- if (predicate == null) {
- for (Pipeline pipeline : pipelines) {
- if (pipeline.getPipelineState() != Pipeline.PipelineState.CLOSED) {
- pipelineList.add(pipeline);
- }
- }
- } else {
- for (Pipeline pipeline : pipelines) {
- boolean filterPassed = predicate.test(pipeline);
- if (pipeline.getPipelineState() != Pipeline.PipelineState.CLOSED && filterPassed) {
- pipelineList.add(pipeline);
- }
+ for (Pipeline pipeline : scmClient.listPipelines()) {
+ boolean filterPassed = (predicate != null) && predicate.test(pipeline);
+ if (pipeline.getPipelineState() != Pipeline.PipelineState.CLOSED && filterPassed) {
+ pipelineList.add(pipeline);
}
}
-
System.out.println("Sending close command for " + pipelineList.size() + " pipelines...");
pipelineList.forEach(pipeline -> {
try {
diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java
index a691e754606..09f6621735e 100644
--- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java
+++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java
@@ -94,7 +94,6 @@ public void testCorrectJsonValuesInReport() throws IOException {
assertEquals(80.00, json.get(0).get("remainingPercent").doubleValue(), 0.001);
assertEquals(5, json.get(0).get("containerCount").longValue());
- assertEquals(10, json.get(0).get("pipelineCount").longValue());
}
@Test
@@ -123,7 +122,6 @@ public void testOutputDataFieldsAligning() throws IOException {
assertThat(output).contains("Remaining :");
assertThat(output).contains("Remaining % :");
assertThat(output).contains("Container(s) :");
- assertThat(output).contains("Pipeline(s) :");
assertThat(output).contains("Container Pre-allocated :");
assertThat(output).contains("Remaining Allocatable :");
assertThat(output).contains("Free Space To Spare :");
@@ -137,7 +135,6 @@ private List getUsageProto() {
.setRemaining(80)
.setUsed(10)
.setContainerCount(5)
- .setPipelineCount(10)
.build());
return result;
}
diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/pipeline/TestClosePipelinesSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/pipeline/TestClosePipelinesSubCommand.java
deleted file mode 100644
index 013350fe871..00000000000
--- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/pipeline/TestClosePipelinesSubCommand.java
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.cli.pipeline;
-
-import org.apache.hadoop.hdds.client.ECReplicationConfig;
-import org.apache.hadoop.hdds.client.RatisReplicationConfig;
-import org.apache.hadoop.hdds.client.ReplicationConfig;
-import org.apache.hadoop.hdds.client.StandaloneReplicationConfig;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.client.ScmClient;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.params.ParameterizedTest;
-import org.junit.jupiter.params.provider.Arguments;
-import org.junit.jupiter.params.provider.MethodSource;
-import picocli.CommandLine;
-
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.nio.charset.StandardCharsets;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.UUID;
-import java.util.stream.Stream;
-
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE;
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.params.provider.Arguments.arguments;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-
-/**
- * Tests for the ClosePipelineSubcommand class.
- */
-class TestClosePipelinesSubCommand {
-
- private static final String DEFAULT_ENCODING = StandardCharsets.UTF_8.name();
- private final ByteArrayOutputStream outContent = new ByteArrayOutputStream();
- private final ByteArrayOutputStream errContent = new ByteArrayOutputStream();
- private final PrintStream originalOut = System.out;
- private final PrintStream originalErr = System.err;
- private ClosePipelineSubcommand cmd;
- private ScmClient scmClient;
-
- public static Stream values() {
- return Stream.of(
- arguments(
- new String[]{"--all"},
- "Sending close command for 2 pipelines...\n",
- "with empty parameters"
- ),
- arguments(
- new String[]{"--all", "-ffc", "THREE"},
- "Sending close command for 1 pipelines...\n",
- "by filter factor, opened"
- ),
- arguments(
- new String[]{"--all", "-ffc", "ONE"},
- "Sending close command for 0 pipelines...\n",
- "by filter factor, closed"
- ),
- arguments(
- new String[]{"--all", "-r", "rs-3-2-1024k", "-t", "EC"},
- "Sending close command for 1 pipelines...\n",
- "by replication and type, opened"
- ),
- arguments(
- new String[]{"--all", "-r", "rs-6-3-1024k", "-t", "EC"},
- "Sending close command for 0 pipelines...\n",
- "by replication and type, closed"
- ),
- arguments(
- new String[]{"--all", "-t", "EC"},
- "Sending close command for 1 pipelines...\n",
- "by type, opened"
- ),
- arguments(
- new String[]{"--all", "-t", "RS"},
- "Sending close command for 0 pipelines...\n",
- "by type, closed"
- )
- );
- }
-
- @BeforeEach
- public void setup() throws IOException {
- cmd = new ClosePipelineSubcommand();
- System.setOut(new PrintStream(outContent, false, DEFAULT_ENCODING));
- System.setErr(new PrintStream(errContent, false, DEFAULT_ENCODING));
-
- scmClient = mock(ScmClient.class);
- when(scmClient.listPipelines()).thenAnswer(invocation -> createPipelines());
- }
-
- @AfterEach
- public void tearDown() {
- System.setOut(originalOut);
- System.setErr(originalErr);
- }
-
- @ParameterizedTest(name = "{index}. {2}")
- @MethodSource("values")
- void testCloseAllPipelines(String[] commands, String expectedOutput, String testName) throws IOException {
- CommandLine c = new CommandLine(cmd);
- c.parseArgs(commands);
- cmd.execute(scmClient);
- assertEquals(expectedOutput, outContent.toString(DEFAULT_ENCODING));
- }
-
- private List createPipelines() {
- List pipelines = new ArrayList<>();
- pipelines.add(createPipeline(StandaloneReplicationConfig.getInstance(ONE),
- Pipeline.PipelineState.CLOSED));
- pipelines.add(createPipeline(RatisReplicationConfig.getInstance(THREE),
- Pipeline.PipelineState.OPEN));
- pipelines.add(createPipeline(RatisReplicationConfig.getInstance(THREE),
- Pipeline.PipelineState.CLOSED));
-
- pipelines.add(createPipeline(
- new ECReplicationConfig(3, 2), Pipeline.PipelineState.OPEN));
- pipelines.add(createPipeline(
- new ECReplicationConfig(3, 2), Pipeline.PipelineState.CLOSED));
- pipelines.add(createPipeline(
- new ECReplicationConfig(6, 3), Pipeline.PipelineState.CLOSED));
- pipelines.add(createPipeline(
- RatisReplicationConfig.getInstance(THREE), Pipeline.PipelineState.CLOSED));
- return pipelines;
- }
-
- private Pipeline createPipeline(ReplicationConfig repConfig,
- Pipeline.PipelineState state) {
- return new Pipeline.Builder()
- .setId(PipelineID.randomId())
- .setCreateTimestamp(System.currentTimeMillis())
- .setState(state)
- .setReplicationConfig(repConfig)
- .setNodes(createDatanodeDetails(1))
- .build();
- }
-
- private List createDatanodeDetails(int count) {
- List dns = new ArrayList<>();
- for (int i = 0; i < count; i++) {
- HddsProtos.DatanodeDetailsProto dnd =
- HddsProtos.DatanodeDetailsProto.newBuilder()
- .setHostName("host" + i)
- .setIpAddress("1.2.3." + i + 1)
- .setNetworkLocation("/default")
- .setNetworkName("host" + i)
- .addPorts(HddsProtos.Port.newBuilder()
- .setName("ratis").setValue(5678).build())
- .setUuid(UUID.randomUUID().toString())
- .build();
- dns.add(DatanodeDetails.getFromProtoBuf(dnd));
- }
- return dns;
- }
-}
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
index 56ca8798f22..65dce09cba1 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
@@ -217,7 +217,7 @@ public S3SecretValue getS3Secret(String kerberosID, boolean createIfNotExist)
* Set secretKey for accessId.
* @param accessId
* @param secretKey
- * @return {@code S3SecretValue } pair
+ * @return S3SecretValue pair
* @throws IOException
*/
public S3SecretValue setS3Secret(String accessId, String secretKey)
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java
index 80a495a1d12..44239aafceb 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java
@@ -170,7 +170,7 @@ private static OzoneClient getRpcClient(ClientProtocol clientProtocol,
* Create OzoneClient for token renew/cancel operations.
* @param conf Configuration to be used for OzoneCient creation
* @param token ozone token is involved
- * @return OzoneClient
+ * @return
* @throws IOException
*/
public static OzoneClient getOzoneClient(Configuration conf,
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStreamSemaphore.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStreamSemaphore.java
index dc85fffe1ca..36031a9cf4d 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStreamSemaphore.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStreamSemaphore.java
@@ -32,7 +32,7 @@ public class KeyOutputStreamSemaphore {
private final Semaphore requestSemaphore;
KeyOutputStreamSemaphore(int maxConcurrentWritePerKey) {
- LOG.debug("Initializing semaphore with maxConcurrentWritePerKey = {}", maxConcurrentWritePerKey);
+ LOG.info("Initializing semaphore with maxConcurrentWritePerKey = {}", maxConcurrentWritePerKey);
if (maxConcurrentWritePerKey > 0) {
requestSemaphore = new Semaphore(maxConcurrentWritePerKey);
} else if (maxConcurrentWritePerKey == 0) {
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
index 8d9614b554a..16211ebbb8e 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
@@ -59,6 +59,7 @@
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
import org.apache.hadoop.ozone.om.helpers.OzoneFileStatusLight;
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.S3SecretValue;
import org.apache.hadoop.ozone.om.helpers.S3VolumeContext;
import org.apache.hadoop.ozone.om.helpers.TenantStateList;
@@ -513,6 +514,39 @@ List listKeys(String volumeName, String bucketName,
String keyPrefix, String prevKey, int maxListResult)
throws IOException;
+ /**
+ * List trash allows the user to list the keys that were marked as deleted,
+ * but not actually deleted by Ozone Manager. This allows a user to recover
+ * keys within a configurable window.
+ * @param volumeName - The volume name, which can also be a wild card
+ * using '*'.
+ * @param bucketName - The bucket name, which can also be a wild card
+ * using '*'.
+ * @param startKeyName - List keys from a specific key name.
+ * @param keyPrefix - List keys using a specific prefix.
+ * @param maxKeys - The number of keys to be returned. This must be below
+ * the cluster level set by admins.
+ * @return The list of keys that are deleted from the deleted table.
+ * @throws IOException
+ */
+ List listTrash(String volumeName, String bucketName,
+ String startKeyName, String keyPrefix,
+ int maxKeys)
+ throws IOException;
+
+ /**
+ * Recover trash allows the user to recover keys that were marked as deleted,
+ * but not actually deleted by Ozone Manager.
+ * @param volumeName - The volume name.
+ * @param bucketName - The bucket name.
+ * @param keyName - The key user want to recover.
+ * @param destinationBucket - The bucket user want to recover to.
+ * @return The result of recovering operation is success or not.
+ * @throws IOException
+ */
+ boolean recoverTrash(String volumeName, String bucketName, String keyName,
+ String destinationBucket) throws IOException;
+
/**
* Get OzoneKey.
* @param volumeName Name of the Volume
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index fe986640176..bfeb9c1e6c1 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -34,7 +34,6 @@
import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.fs.Syncable;
-import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.hdds.client.DefaultReplicationConfig;
import org.apache.hadoop.hdds.client.ECReplicationConfig;
@@ -124,6 +123,7 @@
import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
import org.apache.hadoop.ozone.om.helpers.OzoneFileStatusLight;
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.S3SecretValue;
import org.apache.hadoop.ozone.om.helpers.S3VolumeContext;
import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
@@ -1771,6 +1771,25 @@ public List listKeys(String volumeName, String bucketName,
}
}
+ @Override
+ public List listTrash(String volumeName, String bucketName,
+ String startKeyName, String keyPrefix, int maxKeys) throws IOException {
+
+ Preconditions.checkNotNull(volumeName);
+ Preconditions.checkNotNull(bucketName);
+
+ return ozoneManagerClient.listTrash(volumeName, bucketName, startKeyName,
+ keyPrefix, maxKeys);
+ }
+
+ @Override
+ public boolean recoverTrash(String volumeName, String bucketName,
+ String keyName, String destinationBucket) throws IOException {
+
+ return ozoneManagerClient.recoverTrash(volumeName, bucketName, keyName,
+ destinationBucket);
+ }
+
@Override
public OzoneKeyDetails getKeyDetails(
String volumeName, String bucketName, String keyName)
@@ -2165,6 +2184,8 @@ public OzoneFileStatus getOzoneFileStatus(String volumeName,
@Override
public void createDirectory(String volumeName, String bucketName,
String keyName) throws IOException {
+ verifyVolumeName(volumeName);
+ verifyBucketName(bucketName);
String ownerName = getRealUserInfo().getShortUserName();
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName)
.setBucketName(bucketName)
@@ -2316,16 +2337,9 @@ public List listStatusLight(String volumeName,
String bucketName, String keyName, boolean recursive, String startKey,
long numEntries, boolean allowPartialPrefixes) throws IOException {
OmKeyArgs keyArgs = prepareOmKeyArgs(volumeName, bucketName, keyName);
- if (omVersion.compareTo(OzoneManagerVersion.LIGHTWEIGHT_LIST_STATUS) >= 0) {
- return ozoneManagerClient.listStatusLight(keyArgs, recursive, startKey,
- numEntries, allowPartialPrefixes);
- } else {
- return ozoneManagerClient.listStatus(keyArgs, recursive, startKey,
- numEntries, allowPartialPrefixes)
- .stream()
- .map(OzoneFileStatusLight::fromOzoneFileStatus)
- .collect(Collectors.toList());
- }
+ return ozoneManagerClient
+ .listStatusLight(keyArgs, recursive, startKey, numEntries,
+ allowPartialPrefixes);
}
/**
@@ -2497,7 +2511,9 @@ private OzoneOutputStream createOutputStream(OpenKeySession openKey)
private OzoneOutputStream createOutputStream(OpenKeySession openKey,
KeyOutputStream keyOutputStream)
throws IOException {
- boolean enableHsync = OzoneFSUtils.canEnableHsync(conf, true);
+ boolean enableHsync = conf.getBoolean(
+ OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED,
+ OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED_DEFAULT);
keyOutputStream
.addPreallocateBlocks(openKey.getKeyInfo().getLatestVersionLocations(),
openKey.getOpenVersion());
@@ -2509,7 +2525,9 @@ private OzoneOutputStream createOutputStream(OpenKeySession openKey,
private OzoneOutputStream createSecureOutputStream(OpenKeySession openKey,
OutputStream keyOutputStream, Syncable syncable) throws IOException {
- boolean enableHsync = OzoneFSUtils.canEnableHsync(conf, true);
+ boolean enableHsync = conf.getBoolean(
+ OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED,
+ OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED_DEFAULT);
final FileEncryptionInfo feInfo =
openKey.getKeyInfo().getFileEncryptionInfo();
if (feInfo != null) {
@@ -2590,27 +2608,17 @@ public OzoneFsServerDefaults getServerDefaults() throws IOException {
long now = Time.monotonicNow();
if ((serverDefaults == null) ||
(now - serverDefaultsLastUpdate > serverDefaultsValidityPeriod)) {
- try {
- for (ServiceInfo si : ozoneManagerClient.getServiceInfo()
- .getServiceInfoList()) {
- if (si.getServerDefaults() != null) {
- serverDefaults = si.getServerDefaults();
- serverDefaultsLastUpdate = now;
- break;
- }
- }
- } catch (Exception e) {
- LOG.warn("Could not get server defaults from OM.", e);
- }
+ serverDefaults = ozoneManagerClient.getServerDefaults();
+ serverDefaultsLastUpdate = now;
}
+ assert serverDefaults != null;
return serverDefaults;
}
@Override
public URI getKeyProviderUri() throws IOException {
- String keyProviderUri = (getServerDefaults() != null) ?
- serverDefaults.getKeyProviderUri() : null;
- return OzoneKMSUtil.getKeyProviderUri(ugi, null, keyProviderUri, conf);
+ return OzoneKMSUtil.getKeyProviderUri(ugi,
+ null, getServerDefaults().getKeyProviderUri(), conf);
}
@Override
diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestBlockOutputStreamIncrementalPutBlock.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestBlockOutputStreamIncrementalPutBlock.java
index 361dcb1fd0a..5f2b80bdef6 100644
--- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestBlockOutputStreamIncrementalPutBlock.java
+++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestBlockOutputStreamIncrementalPutBlock.java
@@ -71,8 +71,6 @@ private void init(boolean incrementalChunkList) throws IOException {
((InMemoryConfiguration)config).setFromObject(clientConfig);
- ((InMemoryConfiguration) config).setBoolean(
- OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, true);
((InMemoryConfiguration) config).setBoolean(
OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true);
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OFSPath.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OFSPath.java
index c5985f82093..61ae0879f78 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OFSPath.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OFSPath.java
@@ -214,7 +214,7 @@ public String toString() {
}
/**
- * Get the volume and bucket or mount name (non-key path).
+ * Get the volume & bucket or mount name (non-key path).
* @return String of path excluding key in bucket.
*/
// Prepend a delimiter at beginning. e.g. /vol1/buc1
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
index d6320061253..11f176362a6 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
@@ -119,7 +119,7 @@ public static InetSocketAddress getOmAddress(ConfigurationSource conf) {
* Return list of OM addresses by service ids - when HA is enabled.
*
* @param conf {@link ConfigurationSource}
- * @return {service.id -> [{@link InetSocketAddress}]}
+ * @return {service.id -> [{@link InetSocketAddress}]}
*/
public static Map> getOmHAAddressesById(
ConfigurationSource conf) {
@@ -243,10 +243,6 @@ public static boolean isReadOnly(
case ListKeys:
case ListKeysLight:
case ListTrash:
- // ListTrash is deprecated by HDDS-11251. Keeping this in here
- // As protobuf currently doesn't support deprecating enum fields
- // TODO: Remove once migrated to proto3 and mark fields in proto
- // as deprecated
case ServiceList:
case ListOpenFiles:
case ListMultiPartUploadParts:
@@ -278,8 +274,7 @@ public static boolean isReadOnly(
case SetSafeMode:
case PrintCompactionLogDag:
case GetSnapshotInfo:
- case GetQuotaRepairStatus:
- case StartQuotaRepair:
+ case GetServerDefaults:
return true;
case CreateVolume:
case SetVolumeProperty:
@@ -309,10 +304,6 @@ public static boolean isReadOnly(
case AddAcl:
case PurgeKeys:
case RecoverTrash:
- // RecoverTrash is deprecated by HDDS-11251. Keeping this in here
- // As protobuf currently doesn't support deprecating enum fields
- // TODO: Remove once migrated to proto3 and mark fields in proto
- // as deprecated
case FinalizeUpgrade:
case Prepare:
case CancelPrepare:
@@ -332,7 +323,6 @@ public static boolean isReadOnly(
case DeleteSnapshot:
case RenameSnapshot:
case SnapshotMoveDeletedKeys:
- case SnapshotMoveTableKeys:
case SnapshotPurge:
case RecoverLease:
case SetTimes:
@@ -707,7 +697,7 @@ public static void verifyKeyNameWithSnapshotReservedWordForDeletion(String keyNa
* Look at 'ozone.om.internal.service.id' first. If configured, return that.
* If the above is not configured, look at 'ozone.om.service.ids'.
* If count(ozone.om.service.ids) == 1, return that id.
- * If count(ozone.om.service.ids) > 1 throw exception
+ * If count(ozone.om.service.ids) > 1 throw exception
* If 'ozone.om.service.ids' is not configured, return null. (Non HA)
* @param conf configuration
* @return OM service ID.
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/SelectorOutputStream.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/SelectorOutputStream.java
index c7e20fb7e8b..8ffa3c45c09 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/SelectorOutputStream.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/SelectorOutputStream.java
@@ -27,7 +27,7 @@
/**
* An {@link OutputStream} first write data to a buffer up to the capacity.
- * Then, select {@code Underlying} by the number of bytes written.
+ * Then, select {@link Underlying} by the number of bytes written.
* When {@link #flush()}, {@link #hflush()}, {@link #hsync()}
* or {@link #close()} is invoked,
* it will force flushing the buffer and {@link OutputStream} selection.
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
index a77bc4f5304..0f3b55235be 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
@@ -73,9 +73,6 @@ private OMConfigKeys() {
public static final String OZONE_OM_DECOMMISSIONED_NODES_KEY =
"ozone.om.decommissioned.nodes";
- public static final String OZONE_OM_FEATURES_DISABLED =
- "ozone.om.features.disabled";
-
public static final String OZONE_OM_ADDRESS_KEY =
"ozone.om.address";
public static final String OZONE_OM_BIND_HOST_DEFAULT =
@@ -403,8 +400,6 @@ private OMConfigKeys() {
/**
* Configuration properties for Snapshot Directory Service.
*/
- public static final String OZONE_SNAPSHOT_DEEP_CLEANING_ENABLED = "ozone.snapshot.deep.cleaning.enabled";
- public static final boolean OZONE_SNAPSHOT_DEEP_CLEANING_ENABLED_DEFAULT = false;
public static final String OZONE_SNAPSHOT_DIRECTORY_SERVICE_INTERVAL =
"ozone.snapshot.directory.service.interval";
public static final String OZONE_SNAPSHOT_DIRECTORY_SERVICE_INTERVAL_DEFAULT
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManager.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManager.java
index db00917dacc..ae238f1b45a 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManager.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManager.java
@@ -80,7 +80,7 @@ T doUnderLock(String lockId, S3SecretFunction action)
/**
* Default implementation of secret check method.
* @param kerberosId kerberos principal.
- * @return true if exist associated s3 secret for given {@code kerberosId},
+ * @return true if exist associated s3 secret for given {@param kerberosId},
* false if not.
*/
default boolean hasS3Secret(String kerberosId) throws IOException {
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMLeaderNotReadyException.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMLeaderNotReadyException.java
index 8c3943d0fab..0bfd6922fee 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMLeaderNotReadyException.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMLeaderNotReadyException.java
@@ -24,7 +24,7 @@
* Exception thrown by
* {@link org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB} when
* OM leader is not ready to serve requests. This error is thrown when Raft
- * Server returns {@link org.apache.ratis.protocol.exceptions.LeaderNotReadyException}.
+ * Server returns {@link org.apache.ratis.protocol.LeaderNotReadyException}.
*/
public class OMLeaderNotReadyException extends IOException {
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
index 0507a27de61..f52a142239b 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
@@ -360,6 +360,7 @@ public synchronized void appendNewBlocks(
* @param updateTime if true, updates modification time.
* @param keepOldVersions if false, old blocks won't be kept
* and the new block versions will always be 0
+ * @throws IOException
*/
public synchronized long addNewVersion(
List newLocationList, boolean updateTime,
@@ -627,7 +628,7 @@ public OmKeyInfo build() {
/**
* For network transmit.
- * @return KeyInfo
+ * @return
*/
public KeyInfo getProtobuf(int clientVersion) {
return getProtobuf(false, clientVersion);
@@ -659,7 +660,7 @@ public KeyInfo getNetworkProtobuf(String fullKeyName, int clientVersion,
/**
*
* @param ignorePipeline true for persist to DB, false for network transmit.
- * @return KeyInfo
+ * @return
*/
public KeyInfo getProtobuf(boolean ignorePipeline, int clientVersion) {
return getProtobuf(ignorePipeline, null, clientVersion, false);
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
index bf4ffa9d8de..74effbd80a3 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
@@ -18,15 +18,10 @@
package org.apache.hadoop.ozone.om.helpers;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdds.conf.ConfigurationSource;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import jakarta.annotation.Nonnull;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
import java.nio.file.Paths;
import java.util.UUID;
@@ -37,7 +32,6 @@
* Utility class for OzoneFileSystem.
*/
public final class OzoneFSUtils {
- static final Logger LOG = LoggerFactory.getLogger(OzoneFSUtils.class);
private OzoneFSUtils() { }
@@ -298,31 +292,4 @@ public static Path trimPathToDepth(Path path, int maxDepth) {
}
return res;
}
-
- /**
- * Helper method to return whether Hsync can be enabled.
- * And print warning when the config is ignored.
- */
- public static boolean canEnableHsync(ConfigurationSource conf, boolean isClient) {
- final String confKey = isClient ?
- "ozone.client.hbase.enhancements.allowed" :
- OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED;
-
- boolean confHBaseEnhancementsAllowed = conf.getBoolean(
- confKey, OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED_DEFAULT);
-
- boolean confHsyncEnabled = conf.getBoolean(
- OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED_DEFAULT);
-
- if (confHBaseEnhancementsAllowed) {
- return confHsyncEnabled;
- } else {
- if (confHsyncEnabled) {
- LOG.warn("Ignoring {} = {} because HBase enhancements are disallowed. To enable it, set {} = true as well.",
- OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true,
- confKey);
- }
- return false;
- }
- }
}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneIdentityProvider.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneIdentityProvider.java
index ed3d3ee25c2..6bab1025b13 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneIdentityProvider.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneIdentityProvider.java
@@ -21,6 +21,7 @@
import org.apache.hadoop.ipc.CallerContext;
import org.apache.hadoop.ipc.IdentityProvider;
import org.apache.hadoop.ipc.Schedulable;
+import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.security.UserGroupInformation;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -42,7 +43,7 @@ public OzoneIdentityProvider() {
}
/**
- * If schedulable isn't instance of {@link org.apache.hadoop.ipc.Server.Call},
+ * If schedulable isn't instance of {@link Server.Call},
* then trying to access getCallerContext() method, will
* result in an exception.
*
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java
index f1dd1e9eeba..24c172ef8fd 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java
@@ -34,7 +34,7 @@
/**
* Args for deleted keys. This is written to om metadata deletedTable.
* Once a key is deleted, it is moved to om metadata deletedTable. Having a
- * label: {@code List} ensures that if users create and delete keys with
+ * {label: List} ensures that if users create & delete keys with
* exact same uri multiple times, all the delete instances are bundled under
* the same key name. This is useful as part of GDPR compliance where an
* admin wants to confirm if a given key is deleted from deletedTable metadata.
@@ -110,7 +110,9 @@ public static RepeatedOmKeyInfo getFromProto(RepeatedKeyInfo
}
/**
- * @param compact true for persistence, false for network transmit
+ *
+ * @param compact, true for persistence, false for network transmit
+ * @return
*/
public RepeatedKeyInfo getProto(boolean compact, int clientVersion) {
List list = new ArrayList<>();
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java
index 5dbe3487e19..c8bdbf43c42 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java
@@ -25,7 +25,6 @@
import java.util.Map;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
-import org.apache.hadoop.ozone.OzoneFsServerDefaults;
import org.apache.hadoop.ozone.OzoneManagerVersion;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRoleInfo;
@@ -60,7 +59,6 @@ public final class ServiceInfo {
private Map ports;
private OMRoleInfo omRoleInfo;
- private OzoneFsServerDefaults serverDefaults;
/**
* Default constructor for JSON deserialization.
@@ -78,24 +76,6 @@ private ServiceInfo(NodeType nodeType,
List portList,
OzoneManagerVersion omVersion,
OMRoleInfo omRole) {
- this(nodeType, hostname, portList, omVersion, omRole, null);
- }
-
- /**
- * Constructs the ServiceInfo for the {@code nodeType}.
- * @param nodeType type of node/service
- * @param hostname hostname of the service
- * @param portList list of ports the service listens to
- * @param omVersion Om Version
- * @param omRole OM role Ino
- * @param keyProviderUri KMS provider URI
- */
- private ServiceInfo(NodeType nodeType,
- String hostname,
- List portList,
- OzoneManagerVersion omVersion,
- OMRoleInfo omRole,
- OzoneFsServerDefaults serverDefaults) {
Preconditions.checkNotNull(nodeType);
Preconditions.checkNotNull(hostname);
this.nodeType = nodeType;
@@ -106,7 +86,6 @@ private ServiceInfo(NodeType nodeType,
ports.put(port.getType(), port.getValue());
}
this.omRoleInfo = omRole;
- this.serverDefaults = serverDefaults;
}
/**
@@ -164,15 +143,6 @@ public OMRoleInfo getOmRoleInfo() {
return omRoleInfo;
}
- /**
- * Returns the Ozone Server default configuration.
- * @return OmRoleInfo
- */
- @JsonIgnore
- public OzoneFsServerDefaults getServerDefaults() {
- return serverDefaults;
- }
-
/**
* Converts {@link ServiceInfo} to OzoneManagerProtocolProtos.ServiceInfo.
*
@@ -200,9 +170,6 @@ public OzoneManagerProtocolProtos.ServiceInfo getProtobuf() {
if (nodeType == NodeType.OM && omRoleInfo != null) {
builder.setOmRole(omRoleInfo);
}
- if (serverDefaults != null) {
- builder.setServerDefaults(serverDefaults.getProtobuf());
- }
return builder.build();
}
@@ -218,9 +185,7 @@ public static ServiceInfo getFromProtobuf(
serviceInfo.getHostname(),
serviceInfo.getServicePortsList(),
OzoneManagerVersion.fromProtoValue(serviceInfo.getOMVersion()),
- serviceInfo.hasOmRole() ? serviceInfo.getOmRole() : null,
- serviceInfo.hasServerDefaults() ? OzoneFsServerDefaults.getFromProtobuf(
- serviceInfo.getServerDefaults()) : null);
+ serviceInfo.hasOmRole() ? serviceInfo.getOmRole() : null);
}
/**
@@ -241,7 +206,6 @@ public static class Builder {
private List portList = new ArrayList<>();
private OMRoleInfo omRoleInfo;
private OzoneManagerVersion omVersion;
- private OzoneFsServerDefaults serverDefaults;
/**
* Gets the Om Client Protocol Version.
@@ -295,11 +259,6 @@ public Builder setOmRoleInfo(OMRoleInfo omRole) {
return this;
}
- public Builder setServerDefaults(OzoneFsServerDefaults defaults) {
- serverDefaults = defaults;
- return this;
- }
-
/**
* Builds and returns {@link ServiceInfo} with the set values.
* @return {@link ServiceInfo}
@@ -309,8 +268,7 @@ public ServiceInfo build() {
host,
portList,
omVersion,
- omRoleInfo,
- serverDefaults);
+ omRoleInfo);
}
}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java
index 7feefdb0b22..47a48c37e8e 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java
@@ -19,7 +19,6 @@
*/
import com.google.common.annotations.VisibleForTesting;
-import com.google.protobuf.ByteString;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hdds.utils.db.Codec;
import org.apache.hadoop.hdds.utils.db.CopyObject;
@@ -52,7 +51,7 @@
* Each snapshot created has an associated SnapshotInfo entry
* containing the snapshotId, snapshot path,
* snapshot checkpoint directory, previous snapshotId
- * for the snapshot path and global amongst other necessary fields.
+ * for the snapshot path & global amongst other necessary fields.
*/
public final class SnapshotInfo implements Auditable, CopyObject {
private static final Codec CODEC = new DelegatedCodec<>(
@@ -125,7 +124,6 @@ public static SnapshotStatus valueOf(SnapshotStatusProto status) {
private long exclusiveSize;
private long exclusiveReplicatedSize;
private boolean deepCleanedDeletedDir;
- private ByteString lastTransactionInfo;
private SnapshotInfo(Builder b) {
this.snapshotId = b.snapshotId;
@@ -147,7 +145,6 @@ private SnapshotInfo(Builder b) {
this.exclusiveSize = b.exclusiveSize;
this.exclusiveReplicatedSize = b.exclusiveReplicatedSize;
this.deepCleanedDeletedDir = b.deepCleanedDeletedDir;
- this.lastTransactionInfo = b.lastTransactionInfo;
}
public void setName(String name) {
@@ -264,15 +261,13 @@ public SnapshotInfo.Builder toBuilder() {
.setGlobalPreviousSnapshotId(globalPreviousSnapshotId)
.setSnapshotPath(snapshotPath)
.setCheckpointDir(checkpointDir)
- .setDbTxSequenceNumber(dbTxSequenceNumber)
.setDeepClean(deepClean)
.setSstFiltered(sstFiltered)
.setReferencedSize(referencedSize)
.setReferencedReplicatedSize(referencedReplicatedSize)
.setExclusiveSize(exclusiveSize)
.setExclusiveReplicatedSize(exclusiveReplicatedSize)
- .setDeepCleanedDeletedDir(deepCleanedDeletedDir)
- .setLastTransactionInfo(lastTransactionInfo);
+ .setDeepCleanedDeletedDir(deepCleanedDeletedDir);
}
/**
@@ -298,7 +293,6 @@ public static class Builder {
private long exclusiveSize;
private long exclusiveReplicatedSize;
private boolean deepCleanedDeletedDir;
- private ByteString lastTransactionInfo;
public Builder() {
// default values
@@ -417,11 +411,6 @@ public Builder setDeepCleanedDeletedDir(boolean deepCleanedDeletedDir) {
return this;
}
- public Builder setLastTransactionInfo(ByteString lastTransactionInfo) {
- this.lastTransactionInfo = lastTransactionInfo;
- return this;
- }
-
public SnapshotInfo build() {
Preconditions.checkNotNull(name);
return new SnapshotInfo(this);
@@ -456,10 +445,6 @@ public OzoneManagerProtocolProtos.SnapshotInfo getProtobuf() {
sib.setGlobalPreviousSnapshotID(toProtobuf(globalPreviousSnapshotId));
}
- if (lastTransactionInfo != null) {
- sib.setLastTransactionInfo(lastTransactionInfo);
- }
-
sib.setSnapshotPath(snapshotPath)
.setCheckpointDir(checkpointDir)
.setDbTxSequenceNumber(dbTxSequenceNumber)
@@ -528,10 +513,6 @@ public static SnapshotInfo getFromProtobuf(
snapshotInfoProto.getDeepCleanedDeletedDir());
}
- if (snapshotInfoProto.hasLastTransactionInfo()) {
- osib.setLastTransactionInfo(snapshotInfoProto.getLastTransactionInfo());
- }
-
osib.setSnapshotPath(snapshotInfoProto.getSnapshotPath())
.setCheckpointDir(snapshotInfoProto.getCheckpointDir())
.setDbTxSequenceNumber(snapshotInfoProto.getDbTxSequenceNumber());
@@ -624,14 +605,6 @@ public void setDeepCleanedDeletedDir(boolean deepCleanedDeletedDir) {
this.deepCleanedDeletedDir = deepCleanedDeletedDir;
}
- public ByteString getLastTransactionInfo() {
- return lastTransactionInfo;
- }
-
- public void setLastTransactionInfo(ByteString lastTransactionInfo) {
- this.lastTransactionInfo = lastTransactionInfo;
- }
-
/**
* Generate default name of snapshot, (used if user doesn't provide one).
*/
@@ -700,8 +673,7 @@ public boolean equals(Object o) {
referencedReplicatedSize == that.referencedReplicatedSize &&
exclusiveSize == that.exclusiveSize &&
exclusiveReplicatedSize == that.exclusiveReplicatedSize &&
- deepCleanedDeletedDir == that.deepCleanedDeletedDir &&
- Objects.equals(lastTransactionInfo, that.lastTransactionInfo);
+ deepCleanedDeletedDir == that.deepCleanedDeletedDir;
}
@Override
@@ -712,7 +684,7 @@ public int hashCode() {
globalPreviousSnapshotId, snapshotPath, checkpointDir,
deepClean, sstFiltered,
referencedSize, referencedReplicatedSize,
- exclusiveSize, exclusiveReplicatedSize, deepCleanedDeletedDir, lastTransactionInfo);
+ exclusiveSize, exclusiveReplicatedSize, deepCleanedDeletedDir);
}
/**
@@ -720,7 +692,27 @@ public int hashCode() {
*/
@Override
public SnapshotInfo copyObject() {
- return this.toBuilder().build();
+ return new Builder()
+ .setSnapshotId(snapshotId)
+ .setName(name)
+ .setVolumeName(volumeName)
+ .setBucketName(bucketName)
+ .setSnapshotStatus(snapshotStatus)
+ .setCreationTime(creationTime)
+ .setDeletionTime(deletionTime)
+ .setPathPreviousSnapshotId(pathPreviousSnapshotId)
+ .setGlobalPreviousSnapshotId(globalPreviousSnapshotId)
+ .setSnapshotPath(snapshotPath)
+ .setCheckpointDir(checkpointDir)
+ .setDbTxSequenceNumber(dbTxSequenceNumber)
+ .setDeepClean(deepClean)
+ .setSstFiltered(sstFiltered)
+ .setReferencedSize(referencedSize)
+ .setReferencedReplicatedSize(referencedReplicatedSize)
+ .setExclusiveSize(exclusiveSize)
+ .setExclusiveReplicatedSize(exclusiveReplicatedSize)
+ .setDeepCleanedDeletedDir(deepCleanedDeletedDir)
+ .build();
}
@Override
@@ -745,7 +737,6 @@ public String toString() {
", exclusiveSize: '" + exclusiveSize + '\'' +
", exclusiveReplicatedSize: '" + exclusiveReplicatedSize + '\'' +
", deepCleanedDeletedDir: '" + deepCleanedDeletedDir + '\'' +
- ", lastTransactionInfo: '" + lastTransactionInfo + '\'' +
'}';
}
}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/AccountNameSpace.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/AccountNameSpace.java
index a715bfbc153..753d528cb05 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/AccountNameSpace.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/AccountNameSpace.java
@@ -57,7 +57,7 @@ public interface AccountNameSpace {
* Get Space Usage Information for this AccountNameSpace. This can be
* used for billing purpose. Such Aggregation can also be done lazily
* by a Recon job. Implementations can decide.
- * @return SpaceUsage
+ * @return
*/
SpaceUsageSource getSpaceUsage();
@@ -71,7 +71,7 @@ public interface AccountNameSpace {
/**
* Get Quota Information for this AccountNameSpace.
- * @return OzoneQuota
+ * @return
*/
OzoneQuota getQuota();
}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/BucketNameSpace.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/BucketNameSpace.java
index d5ecf7bba80..1481f1b466b 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/BucketNameSpace.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/BucketNameSpace.java
@@ -74,7 +74,7 @@ public interface BucketNameSpace {
* Get Space Usage Information for this BucketNameSpace. This can be
* used for billing purpose. Such Aggregation can also be done lazily
* by a Recon job. Implementations can decide.
- * @return SpaceUsageSource
+ * @return
*/
SpaceUsageSource getSpaceUsage();
@@ -88,7 +88,7 @@ public interface BucketNameSpace {
/**
* Get Quota Information for this BucketNameSpace.
- * @return OzoneQuota
+ * @return
*/
OzoneQuota getQuota();
}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java
index 94822630f8e..45922c107cb 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java
@@ -28,6 +28,7 @@
import org.apache.hadoop.fs.SafeModeAction;
import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
import org.apache.hadoop.ozone.OzoneAcl;
+import org.apache.hadoop.ozone.OzoneFsServerDefaults;
import org.apache.hadoop.ozone.om.IOmMetadataReader;
import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.exceptions.OMException;
@@ -55,6 +56,7 @@
import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
import org.apache.hadoop.ozone.om.helpers.OzoneFileStatusLight;
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.S3SecretValue;
import org.apache.hadoop.ozone.om.helpers.S3VolumeContext;
import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
@@ -1053,6 +1055,39 @@ DBUpdates getDBUpdates(
OzoneManagerProtocolProtos.DBUpdatesRequest dbUpdatesRequest)
throws IOException;
+ /**
+ * List trash allows the user to list the keys that were marked as deleted,
+ * but not actually deleted by Ozone Manager. This allows a user to recover
+ * keys within a configurable window.
+ * @param volumeName - The volume name, which can also be a wild card
+ * using '*'.
+ * @param bucketName - The bucket name, which can also be a wild card
+ * using '*'.
+ * @param startKeyName - List keys from a specific key name.
+ * @param keyPrefix - List keys using a specific prefix.
+ * @param maxKeys - The number of keys to be returned. This must be below
+ * the cluster level set by admins.
+ * @return The list of keys that are deleted from the deleted table.
+ * @throws IOException
+ */
+ List listTrash(String volumeName, String bucketName,
+ String startKeyName, String keyPrefix, int maxKeys) throws IOException;
+
+ /**
+ * Recover trash allows the user to recover keys that were marked as deleted,
+ * but not actually deleted by Ozone Manager.
+ * @param volumeName - The volume name.
+ * @param bucketName - The bucket name.
+ * @param keyName - The key user want to recover.
+ * @param destinationBucket - The bucket user want to recover to.
+ * @return The result of recovering operation is success or not.
+ * @throws IOException
+ */
+ default boolean recoverTrash(String volumeName, String bucketName,
+ String keyName, String destinationBucket) throws IOException {
+ return false;
+ }
+
/**
*
* @param txnApplyWaitTimeoutSeconds Max time in SECONDS to wait for all
@@ -1061,7 +1096,7 @@ DBUpdates getDBUpdates(
* @param txnApplyCheckIntervalSeconds Time in SECONDS to wait between
* successive checks for all transactions
* to be applied to the OM DB.
- * @return {@code long}
+ * @return
*/
default long prepareOzoneManager(
long txnApplyWaitTimeoutSeconds, long txnApplyCheckIntervalSeconds)
@@ -1146,15 +1181,10 @@ boolean setSafeMode(SafeModeAction action, boolean isChecked)
throws IOException;
/**
- * Get status of last triggered quota repair in OM.
- * @return String
- * @throws IOException
- */
- String getQuotaRepairStatus() throws IOException;
-
- /**
- * start quota repair in OM.
+ * Get server default configurations.
+ *
+ * @return OzoneFsServerDefaults some default configurations from server.
* @throws IOException
*/
- void startQuotaRepair(List buckets) throws IOException;
+ OzoneFsServerDefaults getServerDefaults() throws IOException;
}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
index b140cf95e69..f70beed5f25 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
@@ -41,6 +41,7 @@
import org.apache.hadoop.ipc.CallerContext;
import org.apache.hadoop.ozone.ClientVersion;
import org.apache.hadoop.ozone.OzoneAcl;
+import org.apache.hadoop.ozone.OzoneFsServerDefaults;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.helpers.BasicOmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.ErrorInfo;
@@ -71,6 +72,7 @@
import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
import org.apache.hadoop.ozone.om.helpers.OzoneFileStatusLight;
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.S3SecretValue;
import org.apache.hadoop.ozone.om.helpers.S3VolumeContext;
import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
@@ -148,6 +150,8 @@
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListStatusResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListTenantRequest;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListTenantResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListTrashRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListTrashResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListVolumeRequest;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListVolumeResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.LookupFileRequest;
@@ -178,6 +182,8 @@
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RangerBGSyncResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RecoverLeaseRequest;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RecoverLeaseResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RecoverTrashRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RecoverTrashResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RefetchSecretKeyRequest;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RefetchSecretKeyResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RemoveAclRequest;
@@ -192,6 +198,8 @@
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3Authentication;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3Secret;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SafeMode;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServerDefaultsRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServerDefaultsResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServiceListRequest;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServiceListResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetAclRequest;
@@ -2114,8 +2122,12 @@ public OzoneFileStatus getFileStatus(OmKeyArgs args) throws IOException {
.setGetFileStatusRequest(req)
.build();
- final GetFileStatusResponse resp = handleError(submitRequest(omRequest))
- .getGetFileStatusResponse();
+ final GetFileStatusResponse resp;
+ try {
+ resp = handleError(submitRequest(omRequest)).getGetFileStatusResponse();
+ } catch (IOException e) {
+ throw e;
+ }
return OzoneFileStatus.getFromProtobuf(resp.getStatus());
}
@@ -2430,6 +2442,85 @@ public List listStatus(OmKeyArgs args, boolean recursive,
return listStatus(args, recursive, startKey, numEntries, false);
}
+ @Override
+ public List listTrash(String volumeName,
+ String bucketName, String startKeyName, String keyPrefix, int maxKeys)
+ throws IOException {
+
+ Preconditions.checkArgument(Strings.isNullOrEmpty(volumeName),
+ "The volume name cannot be null or " +
+ "empty. Please enter a valid volume name or use '*' as a wild card");
+
+ Preconditions.checkArgument(Strings.isNullOrEmpty(bucketName),
+ "The bucket name cannot be null or " +
+ "empty. Please enter a valid bucket name or use '*' as a wild card");
+
+ ListTrashRequest trashRequest = ListTrashRequest.newBuilder()
+ .setVolumeName(volumeName)
+ .setBucketName(bucketName)
+ .setStartKeyName(startKeyName)
+ .setKeyPrefix(keyPrefix)
+ .setMaxKeys(maxKeys)
+ .build();
+
+ OMRequest omRequest = createOMRequest(Type.ListTrash)
+ .setListTrashRequest(trashRequest)
+ .build();
+
+ ListTrashResponse trashResponse =
+ handleError(submitRequest(omRequest)).getListTrashResponse();
+
+ List deletedKeyList =
+ new ArrayList<>(trashResponse.getDeletedKeysCount());
+
+ List list = new ArrayList<>();
+ for (OzoneManagerProtocolProtos.RepeatedKeyInfo
+ repeatedKeyInfo : trashResponse.getDeletedKeysList()) {
+ RepeatedOmKeyInfo fromProto =
+ RepeatedOmKeyInfo.getFromProto(repeatedKeyInfo);
+ list.add(fromProto);
+ }
+ deletedKeyList.addAll(list);
+
+ return deletedKeyList;
+ }
+
+ @Override
+ public boolean recoverTrash(String volumeName, String bucketName,
+ String keyName, String destinationBucket) throws IOException {
+
+ Preconditions.checkArgument(Strings.isNullOrEmpty(volumeName),
+ "The volume name cannot be null or empty. " +
+ "Please enter a valid volume name.");
+
+ Preconditions.checkArgument(Strings.isNullOrEmpty(bucketName),
+ "The bucket name cannot be null or empty. " +
+ "Please enter a valid bucket name.");
+
+ Preconditions.checkArgument(Strings.isNullOrEmpty(keyName),
+ "The key name cannot be null or empty. " +
+ "Please enter a valid key name.");
+
+ Preconditions.checkArgument(Strings.isNullOrEmpty(destinationBucket),
+ "The destination bucket name cannot be null or empty. " +
+ "Please enter a valid destination bucket name.");
+
+ RecoverTrashRequest.Builder req = RecoverTrashRequest.newBuilder()
+ .setVolumeName(volumeName)
+ .setBucketName(bucketName)
+ .setKeyName(keyName)
+ .setDestinationBucket(destinationBucket);
+
+ OMRequest omRequest = createOMRequest(Type.RecoverTrash)
+ .setRecoverTrashRequest(req)
+ .build();
+
+ RecoverTrashResponse recoverResponse =
+ handleError(submitRequest(omRequest)).getRecoverTrashResponse();
+
+ return recoverResponse.getResponse();
+ }
+
@Override
public long prepareOzoneManager(
long txnApplyWaitTimeoutSeconds, long txnApplyCheckIntervalSeconds)
@@ -2557,27 +2648,19 @@ public boolean setSafeMode(SafeModeAction action, boolean isChecked)
}
@Override
- public String getQuotaRepairStatus() throws IOException {
- OzoneManagerProtocolProtos.GetQuotaRepairStatusRequest quotaRepairStatusRequest =
- OzoneManagerProtocolProtos.GetQuotaRepairStatusRequest.newBuilder()
- .build();
+ public OzoneFsServerDefaults getServerDefaults()
+ throws IOException {
+ ServerDefaultsRequest serverDefaultsRequest =
+ ServerDefaultsRequest.newBuilder().build();
- OMRequest omRequest = createOMRequest(Type.GetQuotaRepairStatus)
- .setGetQuotaRepairStatusRequest(quotaRepairStatusRequest).build();
+ OMRequest omRequest = createOMRequest(Type.GetServerDefaults)
+ .setServerDefaultsRequest(serverDefaultsRequest).build();
- OzoneManagerProtocolProtos.GetQuotaRepairStatusResponse quotaRepairStatusResponse
- = handleError(submitRequest(omRequest)).getGetQuotaRepairStatusResponse();
- return quotaRepairStatusResponse.getStatus();
- }
+ ServerDefaultsResponse serverDefaultsResponse =
+ handleError(submitRequest(omRequest)).getServerDefaultsResponse();
- @Override
- public void startQuotaRepair(List buckets) throws IOException {
- OzoneManagerProtocolProtos.StartQuotaRepairRequest startQuotaRepairRequest =
- OzoneManagerProtocolProtos.StartQuotaRepairRequest.newBuilder()
- .build();
- OMRequest omRequest = createOMRequest(Type.StartQuotaRepair)
- .setStartQuotaRepairRequest(startQuotaRepairRequest).build();
- handleError(submitRequest(omRequest));
+ return OzoneFsServerDefaults.getFromProtobuf(
+ serverDefaultsResponse.getServerDefaults());
}
private SafeMode toProtoBuf(SafeModeAction action) {
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java
index e28c9477f29..ccb2080a875 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java
@@ -242,13 +242,11 @@ public static MD5MD5Crc32FileChecksumProto convert(
DataOutputBuffer buf = new DataOutputBuffer();
checksum.write(buf);
byte[] bytes = buf.getData();
- int bytesPerCRC;
- long crcPerBlock;
- try (DataInputBuffer buffer = new DataInputBuffer()) {
- buffer.reset(bytes, 0, bytes.length);
- bytesPerCRC = buffer.readInt();
- crcPerBlock = buffer.readLong();
- }
+ DataInputBuffer buffer = new DataInputBuffer();
+ buffer.reset(bytes, 0, bytes.length);
+ int bytesPerCRC = buffer.readInt();
+ long crcPerBlock = buffer.readLong();
+ buffer.close();
int offset = Integer.BYTES + Long.BYTES;
ByteString byteString = ByteString.copyFrom(
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAccessAuthorizer.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAccessAuthorizer.java
index abd4cd6f6d2..1f105a03ad4 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAccessAuthorizer.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAccessAuthorizer.java
@@ -1,4 +1,4 @@
-/*
+/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
@@ -19,7 +19,7 @@
import org.apache.hadoop.ozone.om.exceptions.OMException;
/**
- * No-op implementation for {@link IAccessAuthorizer}, allows everything.
+ * Default implementation for {@link IAccessAuthorizer}.
* */
public class OzoneAccessAuthorizer implements IAccessAuthorizer {
@@ -35,9 +35,4 @@ public boolean checkAccess(IOzoneObj ozoneObject, RequestContext context)
throws OMException {
return true;
}
-
- @Override
- public boolean isNative() {
- return true;
- }
}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java
index e1f1f3a8c1e..ca32c96855d 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java
@@ -24,7 +24,7 @@
/**
* Class representing an ozone object.
- * It can be a volume with non-null volumeName {@literal (bucketName=null & name=null)}
+ * It can be a volume with non-null volumeName (bucketName=null & name=null)
* or a bucket with non-null volumeName and bucketName (name=null)
* or a key with non-null volumeName, bucketName and key name
* (via getKeyName)
diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneFsUtils.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneFsUtils.java
index f8363af3751..84ad208cf93 100644
--- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneFsUtils.java
+++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneFsUtils.java
@@ -18,13 +18,8 @@
package org.apache.hadoop.ozone.om.helpers;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.junit.jupiter.api.Test;
-import org.junit.jupiter.params.ParameterizedTest;
-import org.junit.jupiter.params.provider.CsvSource;
-import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.assertFalse;
@@ -43,29 +38,4 @@ public void testPaths() {
assertFalse(OzoneFSUtils.isValidName("/a:/b"));
assertFalse(OzoneFSUtils.isValidName("/a//b"));
}
-
- /**
- * In these scenarios below, OzoneFSUtils.canEnableHsync() should return false:
- * 1. ozone.hbase.enhancements.allowed = false, ozone.fs.hsync.enabled = false
- * 2. ozone.hbase.enhancements.allowed = false, ozone.fs.hsync.enabled = true
- * 3. ozone.hbase.enhancements.allowed = true, ozone.fs.hsync.enabled = false
- *
- * The only case where OzoneFSUtils.canEnableHsync() would return true:
- * 4. ozone.hbase.enhancements.allowed = true, ozone.fs.hsync.enabled = true
- */
- @ParameterizedTest
- @CsvSource({"false,false,false,false", "false,false,true,false", "false,true,false,false", "true,true,true,false",
- "false,false,false,true", "false,false,true,true", "false,true,false,true", "true,true,true,true"})
- void testCanEnableHsync(boolean canEnableHsync,
- boolean hbaseEnhancementsEnabled, boolean fsHsyncEnabled,
- boolean isClient) {
- OzoneConfiguration conf = new OzoneConfiguration();
- final String confKey = isClient ?
- "ozone.client.hbase.enhancements.allowed" :
- OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED;
- conf.setBoolean(confKey, hbaseEnhancementsEnabled);
- conf.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, fsHsyncEnabled);
-
- assertEquals(canEnableHsync, OzoneFSUtils.canEnableHsync(conf, isClient));
- }
}
diff --git a/hadoop-ozone/dist/src/main/assemblies/ozone-src.xml b/hadoop-ozone/dist/src/main/assemblies/ozone-src.xml
index 3450b387393..3c97d3add76 100644
--- a/hadoop-ozone/dist/src/main/assemblies/ozone-src.xml
+++ b/hadoop-ozone/dist/src/main/assemblies/ozone-src.xml
@@ -67,13 +67,6 @@
dev-support
true
-
- **/.classpath
- **/.project
- **/.settings
- **/*.iml
- **/target/**
-
hadoop-hdds
diff --git a/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - ReadKey Metrics.json b/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - ReadKey Metrics.json
index 72325cba080..827e2f04e10 100644
--- a/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - ReadKey Metrics.json
+++ b/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - ReadKey Metrics.json
@@ -1,34 +1,4 @@
{
- "__inputs": [
- {
- "label": "prometheus",
- "description": "",
- "type": "datasource",
- "pluginId": "prometheus",
- "pluginName": "Prometheus"
- }
- ],
- "__elements": {},
- "__requires": [
- {
- "type": "grafana",
- "id": "grafana",
- "name": "Grafana",
- "version": "11.1.3"
- },
- {
- "type": "datasource",
- "id": "prometheus",
- "name": "Prometheus",
- "version": "1.0.0"
- },
- {
- "type": "panel",
- "id": "timeseries",
- "name": "Time series",
- "version": ""
- }
- ],
"annotations": {
"list": [
{
@@ -50,1763 +20,1284 @@
"liveNow": false,
"panels": [
{
- "collapsed": false,
+ "collapsed": true,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 0
},
- "id": 2,
- "panels": [],
- "title": "OM API Metrics",
- "type": "row"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- }
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 1
- },
- "id": 49,
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
- },
- "targets": [
+ "id": 19,
+ "panels": [
{
"datasource": {
"type": "prometheus"
},
- "disableTextWrap": false,
- "editorMode": "builder",
- "expr": "rate(om_performance_metrics_get_key_info_acl_check_latency_ns_num_ops[1m])",
- "fullMetaSearch": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{hostname}}",
- "range": true,
- "refId": "A",
- "useBackend": false
- }
- ],
- "title": "Rate of Key Reads ",
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "no. of keys",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
}
- ]
+ },
+ "overrides": []
},
- "unit": "ns"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 1
- },
- "id": 48,
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "timezone": [
- "browser"
- ],
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
- },
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
+ "gridPos": {
+ "h": 9,
+ "w": 8,
+ "x": 0,
+ "y": 1
},
- "editorMode": "builder",
- "expr": "om_performance_metrics_get_key_info_read_key_info_latency_ns_avg_time",
- "instant": false,
- "legendFormat": "{{hostname}}",
- "range": true,
- "refId": "A"
- }
- ],
- "title": "Read Key Info Latency",
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
+ "id": 8,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
}
},
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- }
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 9
- },
- "id": 53,
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "expr": "rocksdb_om_db_estimate_num_keys",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{__name__}}, {{hostname}}",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "expr": "rocksdb_om_db_filetable_estimate_num_keys",
+ "fullMetaSearch": false,
+ "hide": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{__name__}}, {{hostname}}",
+ "range": true,
+ "refId": "B",
+ "useBackend": false
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "expr": "rocksdb_om_db_keytable_estimate_num_keys",
+ "fullMetaSearch": false,
+ "hide": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{__name__}}, {{hostname}}",
+ "range": true,
+ "refId": "C",
+ "useBackend": false
+ }
+ ],
+ "title": "Rocksdb metrics (no. of keys)",
+ "type": "timeseries"
},
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
- },
- "targets": [
{
"datasource": {
"type": "prometheus"
},
- "disableTextWrap": false,
- "editorMode": "builder",
- "expr": "rate(om_performance_metrics_get_key_info_acl_check_latency_ns_num_ops[1m])",
- "fullMetaSearch": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{hostname}}",
- "range": true,
- "refId": "A",
- "useBackend": false
- }
- ],
- "title": "Rate of Get Key Info ACL Checks",
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "cache used",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
}
- ]
+ },
+ "overrides": []
},
- "unit": "ns"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 9
- },
- "id": 52,
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
- },
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
+ "gridPos": {
+ "h": 9,
+ "w": 8,
+ "x": 8,
+ "y": 1
},
- "editorMode": "code",
- "expr": "om_performance_metrics_get_key_info_acl_check_latency_ns_avg_time",
- "instant": false,
- "legendFormat": "{{hostname}}",
- "range": true,
- "refId": "A"
- }
- ],
- "title": "Get Key Info ACL check latency",
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
+ "id": 7,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
}
},
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- }
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 17
- },
- "id": 51,
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
- },
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "expr": "rate(om_performance_metrics_check_access_latency_ns_num_ops[1m])",
- "fullMetaSearch": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{hostname}}",
- "range": true,
- "refId": "A",
- "useBackend": false
- }
- ],
- "title": "Rate of Check Access",
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "expr": "rocksdb_om_db_block_cache_usage",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{__name__}}, {{hostname}}",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "expr": "rocksdb_om_db_filetable_block_cache_usage",
+ "fullMetaSearch": false,
+ "hide": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{__name__}}, {{hostname}}",
+ "range": true,
+ "refId": "B",
+ "useBackend": false
}
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "ns"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 17
- },
- "id": 50,
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
+ ],
+ "title": "Rocksdb block cache usage metrics",
+ "type": "timeseries"
},
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
- },
- "targets": [
{
"datasource": {
"type": "prometheus"
},
- "editorMode": "code",
- "expr": "om_performance_metrics_check_access_latency_ns_avg_time",
- "instant": false,
- "legendFormat": "{{hostname}}",
- "range": true,
- "refId": "A"
- }
- ],
- "title": "OM Check Access Latency",
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "no. of files",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
}
- ]
+ },
+ "overrides": []
},
- "unit": "ns"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 25
- },
- "id": 55,
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
- },
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
+ "gridPos": {
+ "h": 9,
+ "w": 8,
+ "x": 16,
+ "y": 1
},
- "disableTextWrap": false,
- "editorMode": "builder",
- "expr": "rate(om_performance_metrics_get_key_info_resolve_bucket_latency_ns_num_ops[1m])",
- "fullMetaSearch": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{hostname}}",
- "range": true,
- "refId": "A",
- "useBackend": false
- }
- ],
- "title": "Resolve Bucket Latency rate",
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
+ "id": 13,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
}
},
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "ns"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 25
- },
- "id": 54,
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "expr": "rocksdb_om_db_filetable_num_files_at_level0",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{__name__}}, {{hostname}}",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "expr": "rocksdb_om_db_keytable_num_files_at_level0",
+ "fullMetaSearch": false,
+ "hide": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{__name__}}, {{hostname}}",
+ "range": true,
+ "refId": "B",
+ "useBackend": false
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "expr": "rocksdb_om_db_s3secrettable_num_files_at_level0",
+ "fullMetaSearch": false,
+ "hide": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{__name__}}, {{hostname}}",
+ "range": true,
+ "refId": "C",
+ "useBackend": false
+ }
+ ],
+ "title": "Rocksdb level0 metrics (num files)",
+ "type": "timeseries"
},
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
- },
- "targets": [
{
"datasource": {
"type": "prometheus"
},
- "editorMode": "code",
- "expr": "om_performance_metrics_get_key_info_resolve_bucket_latency_ns_avg_time",
- "instant": false,
- "legendFormat": "{{hostname}}",
- "range": true,
- "refId": "A"
- }
- ],
- "title": "Resolve Bucket Latency for Get Key Info",
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "no. of keys",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
}
- ]
+ },
+ "overrides": []
},
- "unit": "ns"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 33
- },
- "id": 56,
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
+ "gridPos": {
+ "h": 9,
+ "w": 8,
+ "x": 0,
+ "y": 10
+ },
+ "id": 6,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "disableTextWrap": false,
+ "editorMode": "code",
+ "expr": "rdb_metrics_num_db_key_get_if_exist_checks{instance=~\".*:9875\"}",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{__name__}}, {{hostname}}",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "disableTextWrap": false,
+ "editorMode": "code",
+ "expr": "rdb_metrics_num_db_key_get_if_exist_gets{instance=~\".*:9875\"}",
+ "fullMetaSearch": false,
+ "hide": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{__name__}}, {{hostname}}",
+ "range": true,
+ "refId": "B",
+ "useBackend": false
+ }
+ ],
+ "title": "Rocksdb no. of db key metrics",
+ "type": "timeseries"
},
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
- },
- "targets": [
{
"datasource": {
"type": "prometheus"
},
- "editorMode": "code",
- "expr": "{__name__=~\"om_lock.*avg.*\"}",
- "instant": false,
- "legendFormat": "{{__name__}}, {{hostname}}",
- "range": true,
- "refId": "A"
- }
- ],
- "title": "OM Locking Metrics",
- "type": "timeseries"
- },
- {
- "collapsed": false,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 41
- },
- "id": 19,
- "panels": [],
- "title": "OM Rocksdb Metrics",
- "type": "row"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "no. of keys",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 8,
+ "x": 8,
+ "y": 10
+ },
+ "id": 10,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "expr": "rocksdb_om_db_filetable_cur_size_active_mem_table",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{__name__}}, {{hostname}}",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "expr": "rocksdb_om_db_filetable_cur_size_all_mem_tables",
+ "fullMetaSearch": false,
+ "hide": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{__name__}}, {{hostname}}",
+ "range": true,
+ "refId": "B",
+ "useBackend": false
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "expr": "rocksdb_om_db_filetable_size_all_mem_tables",
+ "fullMetaSearch": false,
+ "hide": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{__name__}}, {{hostname}}",
+ "range": true,
+ "refId": "C",
+ "useBackend": false
+ }
+ ],
+ "title": "Rocksdb mem table metrics (size)",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 8,
+ "x": 16,
+ "y": 10
+ },
+ "id": 11,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "expr": "rocksdb_om_db_estimate_table_readers_mem",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{__name__}}, {{hostname}}",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "expr": "rocksdb_om_db_filetable_estimate_table_readers_mem",
+ "fullMetaSearch": false,
+ "hide": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{__name__}}, {{hostname}}",
+ "range": true,
+ "refId": "B",
+ "useBackend": false
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "expr": "rocksdb_om_db_keytable_estimate_table_readers_mem",
+ "fullMetaSearch": false,
+ "hide": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{__name__}}, {{hostname}}",
+ "range": true,
+ "refId": "C",
+ "useBackend": false
}
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- }
- },
- "overrides": []
- },
- "gridPos": {
- "h": 9,
- "w": 8,
- "x": 0,
- "y": 42
- },
- "id": 8,
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
- },
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "expr": "rocksdb_om_db_estimate_num_keys",
- "fullMetaSearch": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{__name__}}, {{hostname}}",
- "range": true,
- "refId": "A",
- "useBackend": false
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "expr": "rocksdb_om_db_filetable_estimate_num_keys",
- "fullMetaSearch": false,
- "hide": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{__name__}}, {{hostname}}",
- "range": true,
- "refId": "B",
- "useBackend": false
+ ],
+ "title": "Rocksdb om db table readers mem metrics",
+ "type": "timeseries"
},
{
"datasource": {
"type": "prometheus"
},
- "disableTextWrap": false,
- "editorMode": "builder",
- "expr": "rocksdb_om_db_keytable_estimate_num_keys",
- "fullMetaSearch": false,
- "hide": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{__name__}}, {{hostname}}",
- "range": true,
- "refId": "C",
- "useBackend": false
- }
- ],
- "title": "Rocksdb metrics (no. of keys)",
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "cache used",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
}
- ]
- }
- },
- "overrides": []
- },
- "gridPos": {
- "h": 9,
- "w": 8,
- "x": 8,
- "y": 42
- },
- "id": 7,
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
- },
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
+ },
+ "overrides": []
},
- "disableTextWrap": false,
- "editorMode": "builder",
- "expr": "rocksdb_om_db_block_cache_usage",
- "fullMetaSearch": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{__name__}}, {{hostname}}",
- "range": true,
- "refId": "A",
- "useBackend": false
- },
- {
- "datasource": {
- "type": "prometheus"
+ "gridPos": {
+ "h": 9,
+ "w": 8,
+ "x": 0,
+ "y": 19
},
- "disableTextWrap": false,
- "editorMode": "builder",
- "expr": "rocksdb_om_db_filetable_block_cache_usage",
- "fullMetaSearch": false,
- "hide": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{__name__}}, {{hostname}}",
- "range": true,
- "refId": "B",
- "useBackend": false
- }
- ],
- "title": "Rocksdb block cache usage metrics",
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "no. of files",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
+ "id": 12,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
}
},
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- }
- },
- "overrides": []
- },
- "gridPos": {
- "h": 9,
- "w": 8,
- "x": 16,
- "y": 42
- },
- "id": 13,
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
- },
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "expr": "rocksdb_om_db_filetable_num_files_at_level0",
- "fullMetaSearch": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{__name__}}, {{hostname}}",
- "range": true,
- "refId": "A",
- "useBackend": false
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "expr": "rocksdb_om_db_keytable_num_files_at_level0",
- "fullMetaSearch": false,
- "hide": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{__name__}}, {{hostname}}",
- "range": true,
- "refId": "B",
- "useBackend": false
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "expr": "rocksdb_om_db_s3secrettable_num_files_at_level0",
- "fullMetaSearch": false,
- "hide": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{__name__}}, {{hostname}}",
- "range": true,
- "refId": "C",
- "useBackend": false
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "expr": "rocksdb_om_db_filetable_live_sst_files_size",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{__name__}}, {{hostname}}",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "expr": "rocksdb_om_db_keytable_live_sst_files_size",
+ "fullMetaSearch": false,
+ "hide": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{__name__}}, {{hostname}}",
+ "range": true,
+ "refId": "B",
+ "useBackend": false
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "expr": "rocksdb_om_db_s3secrettable_live_sst_files_size",
+ "fullMetaSearch": false,
+ "hide": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{__name__}}, {{hostname}}",
+ "range": true,
+ "refId": "C",
+ "useBackend": false
+ }
+ ],
+ "title": "Rocksdb live sst file size metrics",
+ "type": "timeseries"
}
],
- "title": "Rocksdb level0 metrics (num files)",
- "type": "timeseries"
+ "title": "OM Rocksdb Metrics",
+ "type": "row"
},
{
- "datasource": {
- "type": "prometheus"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "no. of keys",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- }
- },
- "overrides": []
- },
+ "collapsed": true,
"gridPos": {
- "h": 9,
- "w": 8,
+ "h": 1,
+ "w": 24,
"x": 0,
- "y": 51
- },
- "id": 6,
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
+ "y": 1
},
- "targets": [
+ "id": 20,
+ "panels": [
{
"datasource": {
"type": "prometheus"
},
- "disableTextWrap": false,
- "editorMode": "code",
- "expr": "rdb_metrics_num_db_key_get_if_exist_checks{instance=~\".*:9875\"}",
- "fullMetaSearch": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{__name__}}, {{hostname}}",
- "range": true,
- "refId": "A",
- "useBackend": false
- },
- {
- "datasource": {
- "type": "prometheus"
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "no. of ops",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 8,
+ "x": 0,
+ "y": 2
},
- "disableTextWrap": false,
- "editorMode": "code",
- "expr": "rdb_metrics_num_db_key_get_if_exist_gets{instance=~\".*:9875\"}",
- "fullMetaSearch": false,
- "hide": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{__name__}}, {{hostname}}",
- "range": true,
- "refId": "B",
- "useBackend": false
- }
- ],
- "title": "Rocksdb no. of db key metrics",
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
+ "id": 16,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
}
},
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- }
- },
- "overrides": []
- },
- "gridPos": {
- "h": 9,
- "w": 8,
- "x": 8,
- "y": 51
- },
- "id": 10,
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "expr": "ugi_metrics_get_groups_num_ops{servername=\"ozoneManager\"}",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{__name__}}, {{hostname}}",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
+ }
+ ],
+ "title": "Ugi Metrics (no. of ops)",
+ "type": "timeseries"
},
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
- },
- "targets": [
{
"datasource": {
"type": "prometheus"
},
- "disableTextWrap": false,
- "editorMode": "builder",
- "expr": "rocksdb_om_db_filetable_cur_size_active_mem_table",
- "fullMetaSearch": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{__name__}}, {{hostname}}",
- "range": true,
- "refId": "A",
- "useBackend": false
- },
- {
- "datasource": {
- "type": "prometheus"
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "time (ns)",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "ns"
+ },
+ "overrides": []
},
- "disableTextWrap": false,
- "editorMode": "builder",
- "expr": "rocksdb_om_db_filetable_cur_size_all_mem_tables",
- "fullMetaSearch": false,
- "hide": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{__name__}}, {{hostname}}",
- "range": true,
- "refId": "B",
- "useBackend": false
- },
- {
- "datasource": {
- "type": "prometheus"
+ "gridPos": {
+ "h": 8,
+ "w": 8,
+ "x": 8,
+ "y": 2
+ },
+ "id": 15,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
},
- "disableTextWrap": false,
- "editorMode": "builder",
- "expr": "rocksdb_om_db_filetable_size_all_mem_tables",
- "fullMetaSearch": false,
- "hide": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{__name__}}, {{hostname}}",
- "range": true,
- "refId": "C",
- "useBackend": false
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "expr": "ugi_metrics_get_groups_avg_time{servername=\"ozoneManager\"}",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{__name__}}, {{hostname}}",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
+ }
+ ],
+ "title": "Ugi Metrics (avg. time)",
+ "type": "timeseries"
}
],
- "title": "Rocksdb mem table metrics (size)",
- "type": "timeseries"
+ "title": "OM Ugi Metrics",
+ "type": "row"
},
{
- "datasource": {
- "type": "prometheus"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- }
- },
- "overrides": []
- },
+ "collapsed": true,
"gridPos": {
- "h": 9,
- "w": 8,
- "x": 16,
- "y": 51
- },
- "id": 11,
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 2
},
- "targets": [
+ "id": 2,
+ "panels": [
{
"datasource": {
"type": "prometheus"
},
- "disableTextWrap": false,
- "editorMode": "builder",
- "expr": "rocksdb_om_db_estimate_table_readers_mem",
- "fullMetaSearch": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{__name__}}, {{hostname}}",
- "range": true,
- "refId": "A",
- "useBackend": false
- },
- {
- "datasource": {
- "type": "prometheus"
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "no of keys",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
},
- "disableTextWrap": false,
- "editorMode": "builder",
- "expr": "rocksdb_om_db_filetable_estimate_table_readers_mem",
- "fullMetaSearch": false,
- "hide": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{__name__}}, {{hostname}}",
- "range": true,
- "refId": "B",
- "useBackend": false
- },
- {
- "datasource": {
- "type": "prometheus"
+ "gridPos": {
+ "h": 8,
+ "w": 8,
+ "x": 0,
+ "y": 163
},
- "disableTextWrap": false,
- "editorMode": "builder",
- "expr": "rocksdb_om_db_keytable_estimate_table_readers_mem",
- "fullMetaSearch": false,
- "hide": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{__name__}}, {{hostname}}",
- "range": true,
- "refId": "C",
- "useBackend": false
- }
- ],
- "title": "Rocksdb om db table readers mem metrics",
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
+ "id": 4,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "expr": "om_metrics_num_keys",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{__name__}}, {{hostname}}",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
}
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- }
- },
- "overrides": []
- },
- "gridPos": {
- "h": 9,
- "w": 8,
- "x": 0,
- "y": 60
- },
- "id": 12,
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
+ ],
+ "title": "OM num key metrics",
+ "type": "timeseries"
},
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
- },
- "targets": [
{
"datasource": {
"type": "prometheus"
},
- "disableTextWrap": false,
- "editorMode": "builder",
- "expr": "rocksdb_om_db_filetable_live_sst_files_size",
- "fullMetaSearch": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{__name__}}, {{hostname}}",
- "range": true,
- "refId": "A",
- "useBackend": false
- },
- {
- "datasource": {
- "type": "prometheus"
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "no. of ops",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
},
- "disableTextWrap": false,
- "editorMode": "builder",
- "expr": "rocksdb_om_db_keytable_live_sst_files_size",
- "fullMetaSearch": false,
- "hide": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{__name__}}, {{hostname}}",
- "range": true,
- "refId": "B",
- "useBackend": false
- },
- {
- "datasource": {
- "type": "prometheus"
+ "gridPos": {
+ "h": 8,
+ "w": 8,
+ "x": 8,
+ "y": 163
+ },
+ "id": 5,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
},
- "disableTextWrap": false,
- "editorMode": "builder",
- "expr": "rocksdb_om_db_s3secrettable_live_sst_files_size",
- "fullMetaSearch": false,
- "hide": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{__name__}}, {{hostname}}",
- "range": true,
- "refId": "C",
- "useBackend": false
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "expr": "om_metrics_num_key_ops",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{__name__}}, {{hostname}}",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
+ }
+ ],
+ "title": "OM num key ops metrics",
+ "type": "timeseries"
}
],
- "title": "Rocksdb live sst file size metrics",
- "type": "timeseries"
+ "title": "OM Num Key Metrics",
+ "type": "row"
},
{
"collapsed": true,
@@ -1814,9 +1305,9 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 69
+ "y": 3
},
- "id": 20,
+ "id": 21,
"panels": [
{
"datasource": {
@@ -1828,10 +1319,9 @@
"mode": "palette-classic"
},
"custom": {
- "axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
- "axisLabel": "no. of ops",
+ "axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
@@ -1879,9 +1369,9 @@
"h": 8,
"w": 8,
"x": 0,
- "y": 83
+ "y": 164
},
- "id": 16,
+ "id": 1,
"options": {
"legend": {
"calcs": [],
@@ -1897,13 +1387,13 @@
"targets": [
{
"datasource": {
- "ype": "prometheus"
+ "type": "prometheus"
},
"disableTextWrap": false,
"editorMode": "builder",
- "expr": "rate(ugi_metrics_get_groups_num_ops{servername=\"ozoneManager\"}[1m])",
+ "expr": "om_metrics_num_get_service_lists",
"fullMetaSearch": false,
- "includeNullMetadata": false,
+ "includeNullMetadata": true,
"instant": false,
"legendFormat": "{{__name__}}, {{hostname}}",
"range": true,
@@ -1911,9 +1401,23 @@
"useBackend": false
}
],
- "title": "Ugi Metrics (no. of ops)",
+ "title": "Get service lists metrics",
"type": "timeseries"
- },
+ }
+ ],
+ "title": "OM Service Lists Metrics",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 4
+ },
+ "id": 22,
+ "panels": [
{
"datasource": {
"type": "prometheus"
@@ -1924,7 +1428,6 @@
"mode": "palette-classic"
},
"custom": {
- "axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "time (ns)",
@@ -1975,10 +1478,10 @@
"gridPos": {
"h": 8,
"w": 8,
- "x": 8,
- "y": 83
+ "x": 0,
+ "y": 5
},
- "id": 15,
+ "id": 3,
"options": {
"legend": {
"calcs": [],
@@ -1998,7 +1501,7 @@
},
"disableTextWrap": false,
"editorMode": "builder",
- "expr": "ugi_metrics_get_groups_avg_time{servername=\"ozoneManager\"}",
+ "expr": "om_performance_metrics_get_key_info_read_key_info_latency_ns_avg_time",
"fullMetaSearch": false,
"includeNullMetadata": true,
"instant": false,
@@ -2008,138 +1511,137 @@
"useBackend": false
}
],
- "title": "Ugi Metrics (avg. time)",
+ "title": "Read key info (avg time) metrics",
"type": "timeseries"
}
],
- "title": "OM Ugi Metrics",
+ "title": "OM Read Key Info Metrics",
"type": "row"
},
{
- "collapsed": false,
+ "collapsed": true,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
- "y": 70
+ "y": 5
},
"id": 23,
- "panels": [],
- "title": "OM Table Cache Metrics",
- "type": "row"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- }
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 71
- },
- "id": 14,
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
- },
- "targets": [
+ "panels": [
{
"datasource": {
"type": "prometheus"
},
- "disableTextWrap": false,
- "editorMode": "code",
- "expr": "table_cache_metrics_hit_count{instance=~\".*:9875|.+9876\"}",
- "fullMetaSearch": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{__name__}}, {{hostname}}, {{tablename}}",
- "range": true,
- "refId": "A",
- "useBackend": false
- },
- {
- "datasource": {
- "type": "prometheus"
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 8,
+ "x": 0,
+ "y": 174
+ },
+ "id": 14,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
},
- "disableTextWrap": false,
- "editorMode": "code",
- "expr": "table_cache_metrics_miss_count{instance=~\".*:9875|.+9876\"}",
- "fullMetaSearch": false,
- "hide": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{__name__}}, {{hostname}}, {{tablename}}",
- "range": true,
- "refId": "B",
- "useBackend": false
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "expr": "table_cache_metrics_hit_count{instance=~\".*:9875\"}",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{__name__}}, {{hostname}}, {{tablename}}",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "disableTextWrap": false,
+ "editorMode": "code",
+ "expr": "table_cache_metrics_miss_count{instance=~\".*:9875\"}",
+ "fullMetaSearch": false,
+ "hide": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{__name__}}, {{hostname}}, {{tablename}}",
+ "range": true,
+ "refId": "B",
+ "useBackend": false
+ }
+ ],
+ "title": "Table cache metrics (count)",
+ "type": "timeseries"
}
],
- "title": "Table cache metrics (count)",
- "type": "timeseries"
+ "title": "OM Table Cache Metrics",
+ "type": "row"
},
{
"collapsed": true,
@@ -2147,7 +1649,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 79
+ "y": 6
},
"id": 9,
"panels": [
@@ -2212,7 +1714,7 @@
"h": 9,
"w": 8,
"x": 0,
- "y": 111
+ "y": 47
},
"id": 17,
"options": {
@@ -2355,7 +1857,7 @@
"h": 9,
"w": 8,
"x": 8,
- "y": 111
+ "y": 47
},
"id": 18,
"options": {
@@ -2448,7 +1950,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 80
+ "y": 7
},
"id": 24,
"panels": [
@@ -2512,7 +2014,7 @@
"h": 9,
"w": 8,
"x": 0,
- "y": 210
+ "y": 146
},
"id": 26,
"options": {
@@ -2623,7 +2125,7 @@
"h": 9,
"w": 8,
"x": 8,
- "y": 210
+ "y": 146
},
"id": 27,
"options": {
@@ -2668,7 +2170,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 81
+ "y": 8
},
"id": 25,
"panels": [
@@ -2732,7 +2234,7 @@
"h": 9,
"w": 8,
"x": 0,
- "y": 202
+ "y": 138
},
"id": 30,
"options": {
@@ -2777,7 +2279,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 82
+ "y": 10
},
"id": 29,
"panels": [
@@ -2841,7 +2343,7 @@
"h": 8,
"w": 8,
"x": 0,
- "y": 107
+ "y": 43
},
"id": 36,
"options": {
@@ -2936,7 +2438,7 @@
"h": 8,
"w": 8,
"x": 8,
- "y": 107
+ "y": 43
},
"id": 37,
"options": {
@@ -2981,7 +2483,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 83
+ "y": 11
},
"id": 38,
"panels": [
@@ -2995,7 +2497,6 @@
"mode": "palette-classic"
},
"custom": {
- "axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "time (ns)",
@@ -3047,7 +2548,7 @@
"h": 8,
"w": 8,
"x": 0,
- "y": 73
+ "y": 44
},
"id": 39,
"options": {
@@ -3108,7 +2609,6 @@
"mode": "palette-classic"
},
"custom": {
- "axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "no. of ops",
@@ -3159,7 +2659,7 @@
"h": 8,
"w": 8,
"x": 8,
- "y": 73
+ "y": 44
},
"id": 40,
"options": {
@@ -3204,7 +2704,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 84
+ "y": 12
},
"id": 42,
"panels": [
@@ -3268,7 +2768,7 @@
"h": 8,
"w": 8,
"x": 0,
- "y": 85
+ "y": 21
},
"id": 41,
"options": {
@@ -3363,7 +2863,7 @@
"h": 8,
"w": 8,
"x": 8,
- "y": 85
+ "y": 21
},
"id": 43,
"options": {
@@ -3458,7 +2958,7 @@
"h": 8,
"w": 8,
"x": 16,
- "y": 85
+ "y": 21
},
"id": 44,
"options": {
@@ -3503,7 +3003,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 85
+ "y": 13
},
"id": 45,
"panels": [
@@ -3567,7 +3067,7 @@
"h": 8,
"w": 8,
"x": 0,
- "y": 78
+ "y": 14
},
"id": 46,
"options": {
@@ -3662,7 +3162,7 @@
"h": 8,
"w": 8,
"x": 8,
- "y": 78
+ "y": 14
},
"id": 47,
"options": {
@@ -3703,7 +3203,8 @@
}
],
"refresh": "",
- "schemaVersion": 39,
+ "schemaVersion": 38,
+ "style": "dark",
"tags": [],
"templating": {
"list": []
@@ -3715,7 +3216,6 @@
"timepicker": {},
"timezone": "",
"title": "Read Key Dashboard",
- "uid": "edu3g1mx0be2oc",
- "version": 29,
+ "version": 21,
"weekStart": ""
}
\ No newline at end of file
diff --git a/hadoop-ozone/dist/src/main/compose/common/s3a-test.sh b/hadoop-ozone/dist/src/main/compose/common/s3a-test.sh
index e8032068465..554b22b5a39 100644
--- a/hadoop-ozone/dist/src/main/compose/common/s3a-test.sh
+++ b/hadoop-ozone/dist/src/main/compose/common/s3a-test.sh
@@ -94,9 +94,10 @@ EOF
# Some tests are skipped due to known issues.
# - ITestS3AContractDistCp: HDDS-10616
+ # - ITestS3AContractGetFileStatusV1List: HDDS-10617
# - ITestS3AContractRename: HDDS-10665
mvn -B -V --fail-never --no-transfer-progress \
- -Dtest='ITestS3AContract*, ITestS3ACommitterMRJob, !ITestS3AContractDistCp, !ITestS3AContractRename' \
+ -Dtest='ITestS3AContract*, ITestS3ACommitterMRJob, !ITestS3AContractDistCp, !ITestS3AContractGetFileStatusV1List, !ITestS3AContractRename' \
clean test
local target="${RESULT_DIR}/junit/${bucket}/target"
diff --git a/hadoop-ozone/dist/src/main/compose/compatibility/docker-config b/hadoop-ozone/dist/src/main/compose/compatibility/docker-config
index d3984110d8d..a5727d2b1e4 100644
--- a/hadoop-ozone/dist/src/main/compose/compatibility/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/compatibility/docker-config
@@ -31,7 +31,6 @@ OZONE-SITE.XML_hdds.datanode.volume.min.free.space=100MB
OZONE-SITE.XML_ozone.recon.address=recon:9891
OZONE-SITE.XML_hdds.scmclient.max.retry.timeout=30s
OZONE-SITE.XML_ozone.http.basedir=/tmp/ozone_http
-OZONE-SITE.XML_ozone.om.features.disabled=ATOMIC_REWRITE_KEY
HADOOP_OPTS="-Dhadoop.opts=test"
HDFS_STORAGECONTAINERMANAGER_OPTS="-Dhdfs.scm.opts=test"
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config
index 38cc5b71a18..db517a7f7c6 100644
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config
@@ -33,7 +33,6 @@ OZONE-SITE.XML_ozone.om.http-address.omservice.om3=om3
OZONE-SITE.XML_ozone.om.ratis.enable=true
OZONE-SITE.XML_ozone.scm.service.ids=scmservice
-OZONE-SITE.XML_ozone.scm.primordial.node.id=scm1
OZONE-SITE.XML_ozone.scm.nodes.scmservice=scm1,scm2,scm3
OZONE-SITE.XML_ozone.scm.address.scmservice.scm1=scm1.org
OZONE-SITE.XML_ozone.scm.address.scmservice.scm2=scm2.org
diff --git a/hadoop-ozone/dist/src/main/compose/test-all.sh b/hadoop-ozone/dist/src/main/compose/test-all.sh
index 863e1d0b75a..cb76257cd8d 100755
--- a/hadoop-ozone/dist/src/main/compose/test-all.sh
+++ b/hadoop-ozone/dist/src/main/compose/test-all.sh
@@ -33,7 +33,7 @@ source "$SCRIPT_DIR"/testlib.sh
if [[ "${OZONE_WITH_COVERAGE}" == "true" ]]; then
java -cp "$PROJECT_DIR"/share/coverage/$(ls "$PROJECT_DIR"/share/coverage | grep test-util):"$PROJECT_DIR"/share/coverage/jacoco-core.jar org.apache.ozone.test.JacocoServer &
DOCKER_BRIDGE_IP=$(docker network inspect bridge --format='{{(index .IPAM.Config 0).Gateway}}')
- export OZONE_OPTS="-javaagent:share/coverage/jacoco-agent.jar=output=tcpclient,address=$DOCKER_BRIDGE_IP,includes=org.apache.hadoop.ozone.*:org.apache.hadoop.hdds.*:org.apache.hadoop.fs.ozone.*:org.apache.ozone.*:org.hadoop.ozone.*"
+ export OZONE_OPTS="-javaagent:share/coverage/jacoco-agent.jar=output=tcpclient,address=$DOCKER_BRIDGE_IP,includes=org.apache.hadoop.ozone.*:org.apache.hadoop.hdds.*:org.apache.hadoop.fs.ozone.*"
fi
cd "$SCRIPT_DIR"
diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-config b/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-config
index a1b6da80c4b..909b72852aa 100644
--- a/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-config
@@ -40,7 +40,6 @@ OZONE-SITE.XML_ozone.scm.container.size=1GB
OZONE-SITE.XML_hdds.datanode.dir=/data/hdds
OZONE-SITE.XML_hdds.datanode.volume.min.free.space=100MB
OZONE-SITE.XML_ozone.http.basedir=/tmp/ozone_http
-OZONE-SITE.XML_ozone.fs.hsync.enabled=true
# If SCM sends container close commands as part of upgrade finalization while
# datanodes are doing a leader election, all 3 replicas may end up in the
diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/docker-config b/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/docker-config
index 88126ddf2cb..95ce6c0c9c5 100644
--- a/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/docker-config
@@ -37,7 +37,6 @@ OZONE-SITE.XML_ozone.recon.db.dir=/data/metadata/recon
OZONE-SITE.XML_ozone.recon.om.snapshot.task.interval.delay=1m
OZONE-SITE.XML_hdds.scmclient.max.retry.timeout=30s
OZONE-SITE.XML_ozone.http.basedir=/tmp/ozone_http
-OZONE-SITE.XML_ozone.fs.hsync.enabled=true
OZONE_CONF_DIR=/etc/hadoop
OZONE_LOG_DIR=/var/log/hadoop
diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/docker-config b/hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/docker-config
index 77fa2b40ee4..1b805c98960 100644
--- a/hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/docker-config
@@ -32,7 +32,6 @@ OZONE-SITE.XML_ozone.scm.block.client.address=scm
OZONE-SITE.XML_ozone.scm.container.size=1GB
OZONE-SITE.XML_ozone.scm.client.address=scm
OZONE-SITE.XML_ozone.http.basedir=/tmp/ozone_http
-OZONE-SITE.XML_ozone.fs.hsync.enabled=true
OZONE-SITE.XML_hdds.datanode.dir=/data/hdds
OZONE-SITE.XML_hdds.datanode.volume.min.free.space=100MB
diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/testlib.sh b/hadoop-ozone/dist/src/main/compose/upgrade/testlib.sh
index d2718d04b7d..69af73f50c9 100755
--- a/hadoop-ozone/dist/src/main/compose/upgrade/testlib.sh
+++ b/hadoop-ozone/dist/src/main/compose/upgrade/testlib.sh
@@ -51,12 +51,12 @@ create_data_dirs() {
# be used.
## Else, a binary image will be used.
prepare_for_image() {
- local image_version="${1}"
+ local image_version="$1"
if [[ "$image_version" = "$OZONE_CURRENT_VERSION" ]]; then
prepare_for_runner_image
else
- prepare_for_binary_image "${image_version}-rocky"
+ prepare_for_binary_image "$image_version"
fi
}
diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/upgrades/non-rolling-upgrade/callbacks/1.5.0/callback.sh b/hadoop-ozone/dist/src/main/compose/upgrade/upgrades/non-rolling-upgrade/callbacks/1.5.0/callback.sh
deleted file mode 100644
index ec64d5dcd54..00000000000
--- a/hadoop-ozone/dist/src/main/compose/upgrade/upgrades/non-rolling-upgrade/callbacks/1.5.0/callback.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/usr/bin/env bash
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-source "$TEST_DIR"/testlib.sh
-
-with_this_version_pre_finalized() {
- # New layout features were added in this version, so OM and SCM should be pre-finalized.
- execute_robot_test "$SCM" -N "${OUTPUT_NAME}-check-finalization" --include pre-finalized upgrade/check-finalization.robot
- # Test that HSync is disabled when pre-finalized.
- execute_robot_test "$SCM" -N "${OUTPUT_NAME}-hsync" --include pre-finalized-hsync-tests hsync/upgrade-hsync-check.robot
-}
-
-with_this_version_finalized() {
- execute_robot_test "$SCM" -N "${OUTPUT_NAME}-check-finalization" --include finalized upgrade/check-finalization.robot
- execute_robot_test "$SCM" -N "${OUTPUT_NAME}-hsync" debug/ozone-debug-lease-recovery.robot
- execute_robot_test "$SCM" -N "${OUTPUT_NAME}-freon-hsync" freon/hsync.robot
-}
diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/.env b/hadoop-ozone/dist/src/main/compose/xcompat/.env
index a673b7f4655..140975d4bd0 100644
--- a/hadoop-ozone/dist/src/main/compose/xcompat/.env
+++ b/hadoop-ozone/dist/src/main/compose/xcompat/.env
@@ -17,5 +17,3 @@
HDDS_VERSION=${hdds.version}
OZONE_RUNNER_VERSION=${docker.ozone-runner.version}
OZONE_RUNNER_IMAGE=apache/ozone-runner
-HADOOP_VERSION=${hadoop.version}
-OZONE_TESTKRB5_IMAGE=${docker.ozone-testkr5b.image}
diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/clients.yaml b/hadoop-ozone/dist/src/main/compose/xcompat/clients.yaml
index eda14353688..0bf0f619bd7 100644
--- a/hadoop-ozone/dist/src/main/compose/xcompat/clients.yaml
+++ b/hadoop-ozone/dist/src/main/compose/xcompat/clients.yaml
@@ -16,49 +16,39 @@
services:
old_client_1_0_0:
- image: apache/ozone:1.0.0-rocky
+ image: apache/ozone:1.0.0
env_file:
- docker-config
volumes:
- ../..:/opt/ozone
- - ../_keytabs:/etc/security/keytabs
- - ./krb5.conf:/etc/krb5.conf
command: ["sleep","1000000"]
old_client_1_1_0:
- image: apache/ozone:1.1.0-rocky
+ image: apache/ozone:1.1.0
env_file:
- docker-config
volumes:
- ../..:/opt/ozone
- - ../_keytabs:/etc/security/keytabs
- - ./krb5.conf:/etc/krb5.conf
command: ["sleep","1000000"]
old_client_1_2_1:
- image: apache/ozone:1.2.1-rocky
+ image: apache/ozone:1.2.1
env_file:
- docker-config
volumes:
- ../..:/opt/ozone
- - ../_keytabs:/etc/security/keytabs
- - ./krb5.conf:/etc/krb5.conf
command: ["sleep","1000000"]
old_client_1_3_0:
- image: apache/ozone:1.3.0-rocky
+ image: apache/ozone:1.3.0
env_file:
- docker-config
volumes:
- ../..:/opt/ozone
- - ../_keytabs:/etc/security/keytabs
- - ./krb5.conf:/etc/krb5.conf
command: ["sleep","1000000"]
old_client_1_4_0:
- image: apache/ozone:1.4.0-rocky
+ image: apache/ozone:1.4.0
env_file:
- docker-config
volumes:
- ../..:/opt/ozone
- - ../_keytabs:/etc/security/keytabs
- - ./krb5.conf:/etc/krb5.conf
command: ["sleep","1000000"]
new_client:
image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
@@ -66,8 +56,6 @@ services:
- docker-config
volumes:
- ../..:/opt/hadoop
- - ../_keytabs:/etc/security/keytabs
- - ./krb5.conf:/etc/krb5.conf
environment:
OZONE_OPTS:
command: ["sleep","1000000"]
diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/docker-config b/hadoop-ozone/dist/src/main/compose/xcompat/docker-config
index 1a61aaf4f7e..85099f902d3 100644
--- a/hadoop-ozone/dist/src/main/compose/xcompat/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/xcompat/docker-config
@@ -14,8 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-CORE-SITE.XML_fs.defaultFS=ofs://om
-CORE-SITE.XML_fs.trash.interval=1
CORE-SITE.XML_fs.ofs.impl=org.apache.hadoop.fs.ozone.RootedOzoneFileSystem
OZONE-SITE.XML_hdds.datanode.dir=/data/hdds
@@ -24,7 +22,6 @@ OZONE-SITE.XML_hdds.scm.safemode.min.datanode=3
OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
OZONE-SITE.XML_ozone.om.address=om
OZONE-SITE.XML_ozone.om.http-address=om:9874
-OZONE-SITE.XML_ozone.scm.http-address=scm:9876
OZONE-SITE.XML_ozone.recon.address=recon:9891
OZONE-SITE.XML_ozone.recon.db.dir=/data/metadata/recon
OZONE-SITE.XML_ozone.server.default.replication=3
@@ -34,98 +31,9 @@ OZONE-SITE.XML_ozone.scm.container.size=1GB
OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB
OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data
OZONE-SITE.XML_ozone.scm.names=scm
-OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s
OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1
-OZONE-SITE.XML_ozone.datanode.pipeline.limit=1
OZONE-SITE.XML_recon.om.snapshot.task.interval.delay=1m
OZONE-SITE.XML_hdds.scmclient.max.retry.timeout=30s
OZONE-SITE.XML_ozone.default.bucket.layout=LEGACY
OZONE-SITE.XML_ozone.http.basedir=/tmp/ozone_http
-
-OZONE-SITE.XML_hdds.block.token.enabled=true
-OZONE-SITE.XML_hdds.container.token.enabled=true
-OZONE-SITE.XML_hdds.grpc.tls.enabled=true
-
-OZONE-SITE.XML_ozone.security.enabled=true
-OZONE-SITE.XML_ozone.acl.enabled=true
-OZONE-SITE.XML_ozone.acl.authorizer.class=org.apache.hadoop.ozone.security.acl.OzoneNativeAuthorizer
-OZONE-SITE.XML_ozone.administrators="testuser,recon,om"
-OZONE-SITE.XML_ozone.s3.administrators="testuser,recon,om"
-OZONE-SITE.XML_ozone.recon.administrators="testuser2"
-OZONE-SITE.XML_ozone.s3.administrators="testuser,s3g"
-
-HDFS-SITE.XML_dfs.datanode.address=0.0.0.0:1019
-HDFS-SITE.XML_dfs.datanode.http.address=0.0.0.0:1012
-CORE-SITE.XML_dfs.data.transfer.protection=authentication
-CORE-SITE.XML_hadoop.security.authentication=kerberos
-CORE-SITE.XML_hadoop.security.auth_to_local="DEFAULT"
-CORE-SITE.XML_hadoop.security.key.provider.path=kms://http@kms:9600/kms
-
-OZONE-SITE.XML_hdds.scm.kerberos.principal=scm/scm@EXAMPLE.COM
-OZONE-SITE.XML_hdds.scm.kerberos.keytab.file=/etc/security/keytabs/scm.keytab
-OZONE-SITE.XML_ozone.om.kerberos.principal=om/om@EXAMPLE.COM
-OZONE-SITE.XML_ozone.om.kerberos.keytab.file=/etc/security/keytabs/om.keytab
-OZONE-SITE.XML_ozone.recon.kerberos.keytab.file=/etc/security/keytabs/recon.keytab
-OZONE-SITE.XML_ozone.recon.kerberos.principal=recon/recon@EXAMPLE.COM
-
-OZONE-SITE.XML_ozone.s3g.kerberos.keytab.file=/etc/security/keytabs/s3g.keytab
-OZONE-SITE.XML_ozone.s3g.kerberos.principal=s3g/s3g@EXAMPLE.COM
-
-OZONE-SITE.XML_ozone.httpfs.kerberos.keytab.file=/etc/security/keytabs/httpfs.keytab
-OZONE-SITE.XML_ozone.httpfs.kerberos.principal=httpfs/httpfs@EXAMPLE.COM
-
-HDFS-SITE.XML_dfs.datanode.kerberos.principal=dn/dn@EXAMPLE.COM
-HDFS-SITE.XML_dfs.datanode.keytab.file=/etc/security/keytabs/dn.keytab
-HDFS-SITE.XML_dfs.datanode.kerberos.keytab.file=/etc/security/keytabs/dn.keytab
-HDFS-SITE.XML_dfs.web.authentication.kerberos.principal=HTTP/ozone@EXAMPLE.COM
-HDFS-SITE.XML_dfs.web.authentication.kerberos.keytab=/etc/security/keytabs/HTTP.keytab
-
-OZONE-SITE.XML_ozone.security.http.kerberos.enabled=true
-OZONE-SITE.XML_ozone.s3g.secret.http.enabled=true
-OZONE-SITE.XML_ozone.http.filter.initializers=org.apache.hadoop.security.AuthenticationFilterInitializer
-
-OZONE-SITE.XML_ozone.om.http.auth.type=kerberos
-OZONE-SITE.XML_hdds.scm.http.auth.type=kerberos
-OZONE-SITE.XML_hdds.datanode.http.auth.type=kerberos
-OZONE-SITE.XML_ozone.s3g.http.auth.type=kerberos
-OZONE-SITE.XML_ozone.s3g.secret.http.auth.type=kerberos
-OZONE-SITE.XML_ozone.httpfs.http.auth.type=kerberos
-OZONE-SITE.XML_ozone.recon.http.auth.type=kerberos
-
-OZONE-SITE.XML_hdds.scm.http.auth.kerberos.principal=HTTP/scm@EXAMPLE.COM
-OZONE-SITE.XML_hdds.scm.http.auth.kerberos.keytab=/etc/security/keytabs/scm.keytab
-OZONE-SITE.XML_ozone.om.http.auth.kerberos.principal=HTTP/om@EXAMPLE.COM
-OZONE-SITE.XML_ozone.om.http.auth.kerberos.keytab=/etc/security/keytabs/om.keytab
-OZONE-SITE.XML_hdds.datanode.http.auth.kerberos.principal=HTTP/dn@EXAMPLE.COM
-OZONE-SITE.XML_hdds.datanode.http.auth.kerberos.keytab=/etc/security/keytabs/dn.keytab
-OZONE-SITE.XML_ozone.s3g.http.auth.kerberos.keytab=/etc/security/keytabs/s3g.keytab
-OZONE-SITE.XML_ozone.s3g.http.auth.kerberos.principal=HTTP/s3g@EXAMPLE.COM
-OZONE-SITE.XML_ozone.httpfs.http.auth.kerberos.keytab=/etc/security/keytabs/httpfs.keytab
-OZONE-SITE.XML_ozone.httpfs.http.auth.kerberos.principal=HTTP/httpfs@EXAMPLE.COM
-OZONE-SITE.XML_ozone.recon.http.auth.kerberos.principal=*
-OZONE-SITE.XML_ozone.recon.http.auth.kerberos.keytab=/etc/security/keytabs/recon.keytab
-
-CORE-SITE.XML_hadoop.http.authentication.simple.anonymous.allowed=false
-CORE-SITE.XML_hadoop.http.authentication.signature.secret.file=/etc/security/http_secret
-CORE-SITE.XML_hadoop.http.authentication.type=kerberos
-CORE-SITE.XML_hadoop.http.authentication.kerberos.principal=HTTP/ozone@EXAMPLE.COM
-CORE-SITE.XML_hadoop.http.authentication.kerberos.keytab=/etc/security/keytabs/HTTP.keytab
-
-CORE-SITE.XML_hadoop.security.authorization=true
-HADOOP-POLICY.XML_ozone.om.security.client.protocol.acl=*
-HADOOP-POLICY.XML_hdds.security.client.datanode.container.protocol.acl=*
-HADOOP-POLICY.XML_hdds.security.client.scm.container.protocol.acl=*
-HADOOP-POLICY.XML_hdds.security.client.scm.block.protocol.acl=*
-HADOOP-POLICY.XML_hdds.security.client.scm.certificate.protocol.acl=*
-HADOOP-POLICY.XML_ozone.security.reconfigure.protocol.acl=*
-
-KMS-SITE.XML_hadoop.kms.proxyuser.s3g.users=*
-KMS-SITE.XML_hadoop.kms.proxyuser.s3g.groups=*
-KMS-SITE.XML_hadoop.kms.proxyuser.s3g.hosts=*
-
-OZONE_DATANODE_SECURE_USER=root
-JSVC_HOME=/usr/bin
-
-OZONE_LOG_DIR=/var/log/hadoop
-
-no_proxy=om,scm,recon,s3g,kdc,localhost,127.0.0.1
+no_proxy=om,recon,scm,s3g,kdc,localhost,127.0.0.1
diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/krb5.conf b/hadoop-ozone/dist/src/main/compose/xcompat/krb5.conf
deleted file mode 100644
index eefc5b9c685..00000000000
--- a/hadoop-ozone/dist/src/main/compose/xcompat/krb5.conf
+++ /dev/null
@@ -1,41 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-[logging]
-default = FILE:/var/log/krb5libs.log
-kdc = FILE:/var/log/krb5kdc.log
-admin_server = FILE:/var/log/kadmind.log
-
-[libdefaults]
- dns_canonicalize_hostname = false
- dns_lookup_realm = false
- ticket_lifetime = 24h
- renew_lifetime = 7d
- forwardable = true
- rdns = false
- default_realm = EXAMPLE.COM
-
-[realms]
- EXAMPLE.COM = {
- kdc = kdc
- admin_server = kdc
- max_renewable_life = 7d
- }
-
-[domain_realm]
- .example.com = EXAMPLE.COM
- example.com = EXAMPLE.COM
-
diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/new-cluster.yaml b/hadoop-ozone/dist/src/main/compose/xcompat/new-cluster.yaml
index 32059140ce9..6e3ff6cfbc9 100644
--- a/hadoop-ozone/dist/src/main/compose/xcompat/new-cluster.yaml
+++ b/hadoop-ozone/dist/src/main/compose/xcompat/new-cluster.yaml
@@ -18,39 +18,14 @@
x-new-config:
&new-config
image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
- dns_search: .
env_file:
- docker-config
volumes:
- ../..:/opt/hadoop
- - ../_keytabs:/etc/security/keytabs
- - ./krb5.conf:/etc/krb5.conf
services:
- kdc:
- image: ${OZONE_TESTKRB5_IMAGE}
- hostname: kdc
- dns_search: .
- volumes:
- - ../..:/opt/hadoop
- - ../_keytabs:/etc/security/keytabs
- command: [ "krb5kdc","-n" ]
- kms:
- image: apache/hadoop:${HADOOP_VERSION}
- hostname: kms
- dns_search: .
- ports:
- - 9600:9600
- env_file:
- - ./docker-config
- environment:
- HADOOP_CONF_DIR: /opt/hadoop/etc/hadoop
- volumes:
- - ../../libexec/transformation.py:/opt/transformation.py
- command: [ "hadoop", "kms" ]
datanode:
<<: *new-config
- hostname: dn
ports:
- 19864
- 9882
@@ -59,17 +34,15 @@ services:
command: ["ozone","datanode"]
om:
<<: *new-config
- hostname: om
environment:
ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION
- OZONE_OPTS: -Dcom.sun.net.ssl.checkRevocation=false
+ OZONE_OPTS:
ports:
- 9874:9874
- 9862:9862
command: ["ozone","om"]
recon:
<<: *new-config
- hostname: recon
ports:
- 9888:9888
environment:
@@ -77,7 +50,6 @@ services:
command: ["ozone","recon"]
s3g:
<<: *new-config
- hostname: s3g
environment:
OZONE_OPTS:
ports:
@@ -85,12 +57,9 @@ services:
command: ["ozone","s3g"]
scm:
<<: *new-config
- hostname: scm
ports:
- 9876:9876
- - 9860:9860
environment:
ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
- OZONE-SITE.XML_hdds.scm.safemode.min.datanode: "${OZONE_SAFEMODE_MIN_DATANODES:-1}"
OZONE_OPTS:
command: ["ozone","scm"]
diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/old-cluster.yaml b/hadoop-ozone/dist/src/main/compose/xcompat/old-cluster.yaml
index d1b6e56a084..c603bb51df3 100644
--- a/hadoop-ozone/dist/src/main/compose/xcompat/old-cluster.yaml
+++ b/hadoop-ozone/dist/src/main/compose/xcompat/old-cluster.yaml
@@ -17,40 +17,15 @@
# reusable fragments (see https://docs.docker.com/compose/compose-file/#extension-fields)
x-old-config:
&old-config
- image: apache/ozone:${OZONE_VERSION}-rocky
- dns_search: .
+ image: apache/ozone:${OZONE_VERSION}
env_file:
- docker-config
volumes:
- ../..:/opt/ozone
- - ../_keytabs:/etc/security/keytabs
- - ./krb5.conf:/etc/krb5.conf
services:
- kdc:
- image: ${OZONE_TESTKRB5_IMAGE}
- hostname: kdc
- dns_search: .
- volumes:
- - ../..:/opt/ozone
- - ../_keytabs:/etc/security/keytabs
- command: [ "krb5kdc","-n" ]
- kms:
- image: apache/hadoop:${HADOOP_VERSION}
- hostname: kms
- dns_search: .
- ports:
- - 9600:9600
- env_file:
- - ./docker-config
- environment:
- HADOOP_CONF_DIR: /opt/hadoop/etc/hadoop
- volumes:
- - ../../libexec/transformation.py:/opt/transformation.py
- command: [ "hadoop", "kms" ]
datanode:
<<: *old-config
- hostname: dn
ports:
- 19864
- 9882
@@ -59,10 +34,8 @@ services:
command: ["ozone","datanode"]
om:
<<: *old-config
- hostname: om
environment:
ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION
- OZONE_OPTS: -Dcom.sun.net.ssl.checkRevocation=false
HADOOP_OPTS:
ports:
- 9874:9874
@@ -70,7 +43,6 @@ services:
command: ["ozone","om"]
recon:
<<: *old-config
- hostname: recon
ports:
- 9888:9888
environment:
@@ -78,7 +50,6 @@ services:
command: ["ozone","recon"]
s3g:
<<: *old-config
- hostname: s3g
environment:
HADOOP_OPTS:
ports:
@@ -86,11 +57,9 @@ services:
command: ["ozone","s3g"]
scm:
<<: *old-config
- hostname: scm
ports:
- 9876:9876
environment:
ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
- OZONE-SITE.XML_hdds.scm.safemode.min.datanode: "${OZONE_SAFEMODE_MIN_DATANODES:-1}"
HADOOP_OPTS:
command: ["ozone","scm"]
diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/test.sh b/hadoop-ozone/dist/src/main/compose/xcompat/test.sh
index 8774cf2f632..695d8bf06ab 100755
--- a/hadoop-ozone/dist/src/main/compose/xcompat/test.sh
+++ b/hadoop-ozone/dist/src/main/compose/xcompat/test.sh
@@ -22,15 +22,11 @@ export COMPOSE_DIR
basename=$(basename ${COMPOSE_DIR})
current_version="${ozone.version}"
-# TODO: debug acceptance test failures for client versions 1.0.0 on secure clusters
-old_versions="1.1.0 1.2.1 1.3.0 1.4.0" # container is needed for each version in clients.yaml
+old_versions="1.0.0 1.1.0 1.2.1 1.3.0 1.4.0" # container is needed for each version in clients.yaml
# shellcheck source=hadoop-ozone/dist/src/main/compose/testlib.sh
source "${COMPOSE_DIR}/../testlib.sh"
-export SECURITY_ENABLED=true
-: ${OZONE_BUCKET_KEY_NAME:=key1}
-
old_client() {
OZONE_DIR=/opt/ozone
container=${client}
@@ -44,40 +40,24 @@ new_client() {
"$@"
}
-_kinit() {
- execute_command_in_container ${container} kinit -k -t /etc/security/keytabs/testuser.keytab testuser/scm@EXAMPLE.COM
-}
-
_init() {
- _kinit
execute_command_in_container ${container} ozone freon ockg -n1 -t1 -p warmup
}
_write() {
- _kinit
execute_robot_test ${container} -N "xcompat-cluster-${cluster_version}-client-${client_version}-write" -v SUFFIX:${client_version} compatibility/write.robot
}
_read() {
- _kinit
local data_version="$1"
execute_robot_test ${container} -N "xcompat-cluster-${cluster_version}-client-${client_version}-read-${data_version}" -v SUFFIX:${data_version} compatibility/read.robot
}
-test_bucket_encryption() {
- _kinit
- execute_robot_test ${container} -N "xcompat-cluster-${cluster_version}-client-${client_version}" -v SUFFIX:${client_version} security/bucket-encryption.robot
-}
-
test_cross_compatibility() {
echo "Starting cluster with COMPOSE_FILE=${COMPOSE_FILE}"
OZONE_KEEP_RESULTS=true start_docker_env
- execute_command_in_container kms hadoop key create ${OZONE_BUCKET_KEY_NAME}
- new_client test_bucket_encryption
-
- container=scm _kinit
execute_command_in_container scm ozone freon ockg -n1 -t1 -p warmup
new_client _write
new_client _read ${current_version}
@@ -85,8 +65,6 @@ test_cross_compatibility() {
for client_version in "$@"; do
client="old_client_${client_version//./_}"
- old_client test_bucket_encryption
-
old_client _write
old_client _read ${client_version}
@@ -101,8 +79,7 @@ test_ec_cross_compatibility() {
echo "Running Erasure Coded storage backward compatibility tests."
# local cluster_versions_with_ec="1.3.0 1.4.0 ${current_version}"
local cluster_versions_with_ec="${current_version}" # until HDDS-11334
- # TODO: debug acceptance test failures for client versions 1.0.0 on secure clusters
- local non_ec_client_versions="1.1.0 1.2.1"
+ local non_ec_client_versions="1.0.0 1.1.0 1.2.1"
for cluster_version in ${cluster_versions_with_ec}; do
export COMPOSE_FILE=new-cluster.yaml:clients.yaml cluster_version=${cluster_version}
@@ -125,14 +102,12 @@ test_ec_cross_compatibility() {
local prefix=$(LC_CTYPE=C tr -dc '[:alnum:]' < /dev/urandom | head -c 5 | tr '[:upper:]' '[:lower:]')
OZONE_DIR=/opt/hadoop
- new_client _kinit
execute_robot_test new_client --include setup-ec-data -N "xcompat-cluster-${cluster_version}-setup-data" -v prefix:"${prefix}" ec/backward-compat.robot
OZONE_DIR=/opt/ozone
for client_version in ${non_ec_client_versions}; do
client="old_client_${client_version//./_}"
unset OUTPUT_PATH
- container="${client}" _kinit
execute_robot_test "${client}" --include test-ec-compat -N "xcompat-cluster-${cluster_version}-client-${client_version}-read-${cluster_version}" -v prefix:"${prefix}" ec/backward-compat.robot
done
diff --git a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt b/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt
index 9cb9202be0b..c28483c6735 100644
--- a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt
+++ b/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt
@@ -407,7 +407,6 @@ Apache License 2.0
org.apache.ratis:ratis-proto
org.apache.ratis:ratis-server
org.apache.ratis:ratis-server-api
- org.apache.ratis:ratis-shell
org.apache.ratis:ratis-thirdparty-misc
org.apache.ratis:ratis-tools
org.apache.thrift:libthrift
diff --git a/hadoop-ozone/dist/src/main/license/jar-report.txt b/hadoop-ozone/dist/src/main/license/jar-report.txt
index 1e07ec1a2c2..042c9380e4a 100644
--- a/hadoop-ozone/dist/src/main/license/jar-report.txt
+++ b/hadoop-ozone/dist/src/main/license/jar-report.txt
@@ -252,7 +252,6 @@ share/ozone/lib/ratis-netty.jar
share/ozone/lib/ratis-proto.jar
share/ozone/lib/ratis-server-api.jar
share/ozone/lib/ratis-server.jar
-share/ozone/lib/ratis-shell.jar
share/ozone/lib/ratis-thirdparty-misc.jar
share/ozone/lib/ratis-tools.jar
share/ozone/lib/re2j.jar
diff --git a/hadoop-ozone/dist/src/main/smoketest/compatibility/om.robot b/hadoop-ozone/dist/src/main/smoketest/compatibility/om.robot
index c3caec2ae91..dc862d59c1a 100644
--- a/hadoop-ozone/dist/src/main/smoketest/compatibility/om.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/compatibility/om.robot
@@ -25,8 +25,3 @@ Picks up command line options
${processes} = List All Processes
Should Contain ${processes} %{HDFS_OM_OPTS}
Should Contain ${processes} %{HADOOP_OPTS}
-
-Rejects Atomic Key Rewrite
- Execute ozone freon ockg -n1 -t1 -p rewrite
- ${output} = Execute and check rc ozone sh key rewrite -t EC -r rs-3-2-1024k /vol1/bucket1/rewrite/0 255
- Should Contain ${output} Feature disabled: ATOMIC_REWRITE_KEY
diff --git a/hadoop-ozone/dist/src/main/smoketest/compatibility/read.robot b/hadoop-ozone/dist/src/main/smoketest/compatibility/read.robot
index 57715cda95f..511679c56f4 100644
--- a/hadoop-ozone/dist/src/main/smoketest/compatibility/read.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/compatibility/read.robot
@@ -30,10 +30,6 @@ Key Can Be Read
Dir Can Be Listed
Execute ozone fs -ls o3fs://bucket1.vol1/dir-${SUFFIX}
-Dir Can Be Listed Using Shell
- ${result} = Execute ozone sh key list /vol1/bucket1
- Should Contain ${result} key-${SUFFIX}
-
File Can Be Get
Execute ozone fs -get o3fs://bucket1.vol1/dir-${SUFFIX}/file-${SUFFIX} /tmp/
Execute diff -q ${TESTFILE} /tmp/file-${SUFFIX}
diff --git a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-ldb.robot b/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-ldb.robot
deleted file mode 100644
index e006e154af1..00000000000
--- a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-ldb.robot
+++ /dev/null
@@ -1,93 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-*** Settings ***
-Documentation Test ozone debug ldb CLI
-Library OperatingSystem
-Resource ../lib/os.robot
-Test Timeout 5 minute
-Suite Setup Write keys
-
-*** Variables ***
-${PREFIX} ${EMPTY}
-${VOLUME} cli-debug-volume${PREFIX}
-${BUCKET} cli-debug-bucket
-${DEBUGKEY} debugKey
-${TESTFILE} testfile
-
-*** Keywords ***
-Write keys
- Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit test user testuser testuser.keytab
- Execute ozone sh volume create ${VOLUME}
- Execute ozone sh bucket create ${VOLUME}/${BUCKET} -l OBJECT_STORE
- Execute dd if=/dev/urandom of=${TEMP_DIR}/${TESTFILE} bs=100000 count=15
- Execute ozone sh key put ${VOLUME}/${BUCKET}/${TESTFILE}1 ${TEMP_DIR}/${TESTFILE}
- Execute ozone sh key put ${VOLUME}/${BUCKET}/${TESTFILE}2 ${TEMP_DIR}/${TESTFILE}
- Execute ozone sh key put ${VOLUME}/${BUCKET}/${TESTFILE}3 ${TEMP_DIR}/${TESTFILE}
- Execute ozone sh key addacl -a user:systest:a ${VOLUME}/${BUCKET}/${TESTFILE}3
-
-*** Test Cases ***
-Test ozone debug ldb ls
- ${output} = Execute ozone debug ldb --db=/data/metadata/om.db ls
- Should contain ${output} keyTable
-
-Test ozone debug ldb scan
- # test count option
- ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --count
- Should Not Be Equal ${output} 0
- # test valid json for scan command
- ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable | jq -r '.'
- Should contain ${output} keyName
- Should contain ${output} testfile1
- Should contain ${output} testfile2
- Should contain ${output} testfile3
- # test startkey option
- ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --startkey="/cli-debug-volume/cli-debug-bucket/testfile2"
- Should not contain ${output} testfile1
- Should contain ${output} testfile2
- Should contain ${output} testfile3
- # test endkey option
- ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --endkey="/cli-debug-volume/cli-debug-bucket/testfile2"
- Should contain ${output} testfile1
- Should contain ${output} testfile2
- Should not contain ${output} testfile3
- # test fields option
- ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --fields="volumeName,bucketName,keyName"
- Should contain ${output} volumeName
- Should contain ${output} bucketName
- Should contain ${output} keyName
- Should not contain ${output} objectID
- Should not contain ${output} dataSize
- Should not contain ${output} keyLocationVersions
- # test filter option with one filter
- ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --filter="keyName:equals:testfile2"
- Should not contain ${output} testfile1
- Should contain ${output} testfile2
- Should not contain ${output} testfile3
- # test filter option with one multi-level filter
- ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --filter="acls.name:equals:systest"
- Should not contain ${output} testfile1
- Should not contain ${output} testfile2
- Should contain ${output} testfile3
- # test filter option with multiple filter
- ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --filter="keyName:equals:testfile3,acls.name:equals:systest"
- Should not contain ${output} testfile1
- Should not contain ${output} testfile2
- Should contain ${output} testfile3
- # test filter option with no records match both filters
- ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --filter="acls.name:equals:systest,keyName:equals:testfile2"
- Should not contain ${output} testfile1
- Should not contain ${output} testfile2
- Should not contain ${output} testfile3
diff --git a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-lease-recovery.robot b/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-lease-recovery.robot
index 691769dbd72..f867ee99f64 100644
--- a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-lease-recovery.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-lease-recovery.robot
@@ -17,13 +17,11 @@
Documentation Test lease recovery of ozone filesystem
Library OperatingSystem
Resource ../lib/os.robot
-Resource ../lib/fs.robot
Resource ozone-debug.robot
Test Timeout 5 minute
Suite Setup Create volume bucket and put key
*** Variables ***
-${OM_SERVICE_ID} %{OM_SERVICE_ID}
${VOLUME} lease-recovery-volume
${BUCKET} lease-recovery-bucket
${TESTFILE} testfile22
@@ -37,17 +35,13 @@ Create volume bucket and put key
*** Test Cases ***
Test ozone debug recover for o3fs
- ${o3fs_path} = Format FS URL o3fs ${VOLUME} ${BUCKET} ${TESTFILE}
- ${result} = Execute Lease recovery cli ${o3fs_path}
- Should Contain ${result} Lease recovery SUCCEEDED
- ${o3fs_path} = Format FS URL o3fs ${VOLUME} ${BUCKET} randomfile
- ${result} = Execute Lease recovery cli ${o3fs_path}
- Should Contain ${result} not found
+ ${result} = Execute Lease recovery cli o3fs://${BUCKET}.${VOLUME}.om/${TESTFILE}
+ Should Contain ${result} Lease recovery SUCCEEDED
+ ${result} = Execute Lease recovery cli o3fs://${BUCKET}.${VOLUME}.om/randomfile
+ Should Contain ${result} not found
Test ozone debug recover for ofs
- ${ofs_path} = Format FS URL ofs ${VOLUME} ${BUCKET} ${TESTFILE}
- ${result} = Execute Lease recovery cli ${ofs_path}
- Should Contain ${result} Lease recovery SUCCEEDED
- ${ofs_path} = Format FS URL ofs ${VOLUME} ${BUCKET} randomfile
- ${result} = Execute Lease recovery cli ${ofs_path}
- Should Contain ${result} not found
+ ${result} = Execute Lease recovery cli ofs://om/${VOLUME}/${BUCKET}/${TESTFILE}
+ Should Contain ${result} Lease recovery SUCCEEDED
+ ${result} = Execute Lease recovery cli ofs://om/${VOLUME}/${BUCKET}/randomfile
+ Should Contain ${result} not found
diff --git a/hadoop-ozone/dist/src/main/smoketest/freon/hsync.robot b/hadoop-ozone/dist/src/main/smoketest/freon/hsync.robot
deleted file mode 100644
index c8462124427..00000000000
--- a/hadoop-ozone/dist/src/main/smoketest/freon/hsync.robot
+++ /dev/null
@@ -1,51 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-*** Settings ***
-Documentation Test HSync via freon CLI.
-Library OperatingSystem
-Library String
-Library BuiltIn
-Resource ../ozone-lib/freon.robot
-Resource ../lib/fs.robot
-Test Timeout 10 minutes
-Suite Setup Create volume and bucket
-
-*** Variables ***
-${OM_SERVICE_ID} %{OM_SERVICE_ID}
-${VOLUME} hsync-volume
-${BUCKET} hsync-bucket
-
-*** Keywords ***
-Create volume and bucket
- Execute ozone sh volume create /${volume}
- Execute ozone sh bucket create /${volume}/${bucket}
-
-*** Test Cases ***
-Generate key for o3fs by HSYNC
- ${path} = Format FS URL o3fs ${VOLUME} ${BUCKET}
- Freon DFSG sync=HSYNC path=${path}
-
-Generate key for o3fs by HFLUSH
- ${path} = Format FS URL o3fs ${VOLUME} ${BUCKET}
- Freon DFSG sync=HFLUSH path=${path}
-
-Generate key for ofs by HSYNC
- ${path} = Format FS URL ofs ${VOLUME} ${BUCKET}
- Freon DFSG sync=HSYNC path=${path}
-
-Generate key for ofs by HFLUSH
- ${path} = Format FS URL ofs ${VOLUME} ${BUCKET}
- Freon DFSG sync=HFLUSH path=${path}
diff --git a/hadoop-ozone/dist/src/main/smoketest/hsync/upgrade-hsync-check.robot b/hadoop-ozone/dist/src/main/smoketest/hsync/upgrade-hsync-check.robot
deleted file mode 100644
index 1250ad1344e..00000000000
--- a/hadoop-ozone/dist/src/main/smoketest/hsync/upgrade-hsync-check.robot
+++ /dev/null
@@ -1,68 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-*** Settings ***
-Documentation Test HSync during upgrade
-Library OperatingSystem
-Library String
-Library BuiltIn
-Resource ../commonlib.robot
-Resource ../lib/fs.robot
-Resource ../debug/ozone-debug.robot
-Default Tags pre-finalized-hsync-tests
-Suite Setup Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit test user testuser testuser.keytab
-
-*** Variables ***
-${OM_SERVICE_ID} %{OM_SERVICE_ID}
-${VOLUME} upgrade-hsync-volume
-${BUCKET} upgrade-hsync-bucket
-${KEY} upgrade-hsync-key
-
-*** Keywords ***
-Create volume bucket and put key
- Execute ozone sh volume create /${volume}
- Execute ozone sh bucket create /${volume}/${bucket}
- Execute ozone sh key put /${volume}/${bucket}/${key} /etc/hosts
-
-Freon DFSG
- [arguments] ${prefix}=dfsg ${n}=1000 ${path}={EMPTY} ${sync}=HSYNC ${buffer}=1024 ${copy-buffer}=1024 ${size}=10240
- ${result} = Execute and checkrc ozone freon dfsg -n ${n} --sync ${sync} -s ${size} --path ${path} --buffer ${buffer} --copy-buffer ${copy-buffer} -p ${prefix} 255
- Should contain ${result} NOT_SUPPORTED_OPERATION_PRIOR_FINALIZATION
-
-*** Test Cases ***
-Test HSync lease recover prior to finalization
- Create volume bucket and put key
- ${o3fs_path} = Format FS URL o3fs ${VOLUME} ${BUCKET} ${KEY}
- ${result} = Execute and checkrc ozone debug recover --path=${o3fs_path} 255
- Should contain ${result} It belongs to the layout feature HBASE_SUPPORT, whose layout version is 7
- ${ofs_path} = Format FS URL ofs ${VOLUME} ${BUCKET} ${KEY}
- ${result} = Execute and checkrc ozone debug recover --path=${ofs_path} 255
- Should contain ${result} It belongs to the layout feature HBASE_SUPPORT, whose layout version is 7
-
-Generate key for o3fs by HSYNC prior to finalization
- ${path} = Format FS URL o3fs ${VOLUME} ${BUCKET}
- Freon DFSG sync=HSYNC path=${path}
-
-Generate key for o3fs by HFLUSH prior to finalization
- ${path} = Format FS URL o3fs ${VOLUME} ${BUCKET}
- Freon DFSG sync=HFLUSH path=${path}
-
-Generate key for ofs by HSYNC prior to finalization
- ${path} = Format FS URL ofs ${VOLUME} ${BUCKET}
- Freon DFSG sync=HSYNC path=${path}
-
-Generate key for ofs by HFLUSH prior to finalization
- ${path} = Format FS URL ofs ${VOLUME} ${BUCKET}
- Freon DFSG sync=HFLUSH path=${path}
diff --git a/hadoop-ozone/dist/src/main/smoketest/ozone-lib/freon.robot b/hadoop-ozone/dist/src/main/smoketest/ozone-lib/freon.robot
index b813c9ed411..8d10cc81e90 100644
--- a/hadoop-ozone/dist/src/main/smoketest/ozone-lib/freon.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/ozone-lib/freon.robot
@@ -67,9 +67,3 @@ Freon OMBR
[arguments] ${prefix}=ombg ${n}=1 ${threads}=1 ${args}=${EMPTY}
${result} = Execute ozone freon ombr ${OM_HA_PARAM} -t ${threads} -n${n} -p ${prefix} ${args}
Should contain ${result} Successful executions: ${n}
-
-Freon DFSG
- [arguments] ${prefix}=dfsg ${n}=1000 ${path}={EMPTY} ${threads}=1 ${sync}=HSYNC ${buffer}=1024 ${copy-buffer}=1024 ${size}=10240 ${args}=${EMPTY}
- ${result} = Execute ozone freon dfsg -n ${n} --sync ${sync} -s ${size} --path ${path} --buffer ${buffer} --copy-buffer ${copy-buffer} -p ${prefix} -t ${threads} ${args}
- Should contain ${result} Successful executions: ${n}
-
diff --git a/hadoop-ozone/dist/src/main/smoketest/ozone-lib/shell_tests.robot b/hadoop-ozone/dist/src/main/smoketest/ozone-lib/shell_tests.robot
index 651cda016f2..22805efcb1b 100644
--- a/hadoop-ozone/dist/src/main/smoketest/ozone-lib/shell_tests.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/ozone-lib/shell_tests.robot
@@ -56,11 +56,3 @@ Compare Key With Local File with Different File
Compare Key With Local File if File Does Not Exist
${matches} = Compare Key With Local File o3://${OM_SERVICE_ID}/vol1/bucket/passwd /no-such-file
Should Be Equal ${matches} ${FALSE}
-
-Rejects Put Key With Zero Expected Generation
- ${output} = Execute and checkrc ozone sh key put --expectedGeneration 0 o3://${OM_SERVICE_ID}/vol1/bucket/passwd /etc/passwd 255
- Should Contain ${output} must be positive
-
-Rejects Put Key With Negative Expected Generation
- ${output} = Execute and checkrc ozone sh key put --expectedGeneration -1 o3://${OM_SERVICE_ID}/vol1/bucket/passwd /etc/passwd 255
- Should Contain ${output} must be positive
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot b/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot
index d62a217e606..dd06d55f75f 100644
--- a/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot
@@ -107,11 +107,6 @@ Test Multipart Upload Complete
${part2Md5Sum} = Execute md5sum /tmp/part2 | awk '{print $1}'
Should Be Equal As Strings ${eTag2} ${part2Md5Sum}
-#complete multipart upload without any parts
- ${result} = Execute AWSS3APICli and checkrc complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 255
- Should contain ${result} InvalidRequest
- Should contain ${result} must specify at least one part
-
#complete multipart upload
${result} = Execute AWSS3APICli complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 --multipart-upload 'Parts=[{ETag=${eTag1},PartNumber=1},{ETag=${eTag2},PartNumber=2}]'
Should contain ${result} ${BUCKET}
diff --git a/hadoop-ozone/dist/src/shell/ozone/ozone b/hadoop-ozone/dist/src/shell/ozone/ozone
index 0d005b3bd78..22ceed9ed3c 100755
--- a/hadoop-ozone/dist/src/shell/ozone/ozone
+++ b/hadoop-ozone/dist/src/shell/ozone/ozone
@@ -61,7 +61,6 @@ function ozone_usage
ozone_add_subcommand "debug" client "Ozone debug tool"
ozone_add_subcommand "repair" client "Ozone repair tool"
ozone_add_subcommand "checknative" client "checks if native libraries are loaded"
- ozone_add_subcommand "ratis" client "Ozone ratis tool"
ozone_generate_usage "${OZONE_SHELL_EXECNAME}" false
}
@@ -232,10 +231,6 @@ function ozonecmd_case
OZONE_CLASSNAME=org.apache.hadoop.ozone.shell.checknative.CheckNative
OZONE_RUN_ARTIFACT_NAME="ozone-tools"
;;
- ratis)
- OZONE_CLASSNAME=org.apache.hadoop.ozone.shell.OzoneRatis
- OZONE_RUN_ARTIFACT_NAME="ozone-tools"
- ;;
*)
OZONE_CLASSNAME="${subcmd}"
if ! ozone_validate_classname "${OZONE_CLASSNAME}"; then
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java
index 69242d2b1f0..78b67f99f1e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java
@@ -187,8 +187,6 @@ void init() throws Exception {
conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, omRatisEnabled);
conf.setBoolean(OZONE_ACL_ENABLED, true);
- conf.setBoolean(OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, true);
- conf.setBoolean("ozone.client.hbase.enhancements.allowed", true);
conf.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true);
conf.set(OzoneConfigKeys.OZONE_OM_LEASE_SOFT_LIMIT, "0s");
if (!bucketLayout.equals(FILE_SYSTEM_OPTIMIZED)) {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java
index cfc9029019a..32a785a95a9 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java
@@ -237,8 +237,6 @@ void initClusterAndEnv() throws IOException, InterruptedException, TimeoutExcept
conf.setFloat(FS_TRASH_INTERVAL_KEY, TRASH_INTERVAL);
conf.setFloat(FS_TRASH_CHECKPOINT_INTERVAL_KEY, TRASH_INTERVAL / 2);
conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, omRatisEnabled);
- conf.setBoolean(OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, true);
- conf.setBoolean("ozone.client.hbase.enhancements.allowed", true);
conf.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true);
conf.set(OzoneConfigKeys.OZONE_OM_LEASE_SOFT_LIMIT, "0s");
if (bucketLayout == BucketLayout.FILE_SYSTEM_OPTIMIZED) {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java
index 8d161dedeb3..0abfb133654 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java
@@ -18,11 +18,7 @@
package org.apache.hadoop.fs.ozone;
-import java.util.List;
-import java.util.Random;
import java.util.concurrent.CompletableFuture;
-
-import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
@@ -36,16 +32,10 @@
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.TestDataUtil;
-import org.apache.hadoop.ozone.client.BucketArgs;
-import org.apache.hadoop.ozone.client.ObjectStore;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OmSnapshotManager;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
import org.apache.hadoop.ozone.om.helpers.SnapshotInfo;
import org.apache.hadoop.ozone.om.ratis.OzoneManagerDoubleBuffer;
@@ -58,16 +48,12 @@
import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import org.apache.ozone.test.GenericTestUtils;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.AfterAll;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
-import org.mockito.ArgumentMatchers;
-import org.mockito.Mockito;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -78,8 +64,6 @@
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.LongSupplier;
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
import static org.junit.jupiter.api.Assertions.assertEquals;
@@ -88,12 +72,6 @@
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.anyInt;
-import static org.mockito.ArgumentMatchers.anyList;
-import static org.mockito.ArgumentMatchers.anyLong;
-import static org.mockito.ArgumentMatchers.eq;
-import static org.mockito.Mockito.when;
/**
* Directory deletion service test cases.
@@ -119,7 +97,6 @@ public static void init() throws Exception {
conf.setInt(OMConfigKeys.OZONE_PATH_DELETING_LIMIT_PER_TASK, 5);
conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100,
TimeUnit.MILLISECONDS);
- conf.setTimeDuration(OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL, 1000, TimeUnit.MILLISECONDS);
conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, omRatisEnabled);
conf.setBoolean(OZONE_ACL_ENABLED, true);
cluster = MiniOzoneCluster.newBuilder(conf)
@@ -483,123 +460,6 @@ public void testDeleteFilesAndSubFiles() throws Exception {
assertEquals(prevDeletedKeyCount + 5, currentDeletedKeyCount);
}
- private void createFileKey(OzoneBucket bucket, String key)
- throws Exception {
- byte[] value = RandomStringUtils.randomAscii(10240).getBytes(UTF_8);
- OzoneOutputStream fileKey = bucket.createKey(key, value.length);
- fileKey.write(value);
- fileKey.close();
- }
-
- /*
- * Create key d1/k1
- * Create snap1
- * Rename dir1 to dir2
- * Delete dir2
- * Wait for KeyDeletingService to start processing deleted key k2
- * Create snap2 by making the KeyDeletingService thread wait till snap2 is flushed
- * Resume KeyDeletingService thread.
- * Read d1 from snap1.
- */
- @Test
- public void testAOSKeyDeletingWithSnapshotCreateParallelExecution()
- throws Exception {
- OMMetadataManager omMetadataManager = cluster.getOzoneManager().getMetadataManager();
- Table snapshotInfoTable = omMetadataManager.getSnapshotInfoTable();
- Table deletedDirTable = omMetadataManager.getDeletedDirTable();
- Table renameTable = omMetadataManager.getSnapshotRenamedTable();
- cluster.getOzoneManager().getKeyManager().getSnapshotDeletingService().shutdown();
- DirectoryDeletingService dirDeletingService = cluster.getOzoneManager().getKeyManager().getDirDeletingService();
- // Suspend KeyDeletingService
- dirDeletingService.suspend();
- GenericTestUtils.waitFor(() -> !dirDeletingService.isRunningOnAOS(), 1000, 10000);
- Random random = new Random();
- final String testVolumeName = "volume" + random.nextInt();
- final String testBucketName = "bucket" + random.nextInt();
- // Create Volume and Buckets
- ObjectStore store = client.getObjectStore();
- store.createVolume(testVolumeName);
- OzoneVolume volume = store.getVolume(testVolumeName);
- volume.createBucket(testBucketName,
- BucketArgs.newBuilder().setBucketLayout(BucketLayout.FILE_SYSTEM_OPTIMIZED).build());
- OzoneBucket bucket = volume.getBucket(testBucketName);
-
- OzoneManager ozoneManager = Mockito.spy(cluster.getOzoneManager());
- OmSnapshotManager omSnapshotManager = Mockito.spy(ozoneManager.getOmSnapshotManager());
- when(ozoneManager.getOmSnapshotManager()).thenAnswer(i -> omSnapshotManager);
- DirectoryDeletingService service = Mockito.spy(new DirectoryDeletingService(1000, TimeUnit.MILLISECONDS, 1000,
- ozoneManager,
- cluster.getConf()));
- service.shutdown();
- final int initialSnapshotCount =
- (int) cluster.getOzoneManager().getMetadataManager().countRowsInTable(snapshotInfoTable);
- final int initialDeletedCount = (int) omMetadataManager.countRowsInTable(deletedDirTable);
- final int initialRenameCount = (int) omMetadataManager.countRowsInTable(renameTable);
- String snap1 = "snap1";
- String snap2 = "snap2";
- createFileKey(bucket, "dir1/key1");
- store.createSnapshot(testVolumeName, testBucketName, "snap1");
- bucket.renameKey("dir1", "dir2");
- OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
- .setVolumeName(testVolumeName)
- .setBucketName(testBucketName)
- .setKeyName("dir2").build();
- long objectId = store.getClientProxy().getOzoneManagerClient().getKeyInfo(omKeyArgs, false)
- .getKeyInfo().getObjectID();
- long volumeId = omMetadataManager.getVolumeId(testVolumeName);
- long bucketId = omMetadataManager.getBucketId(testVolumeName, testBucketName);
- String deletePathKey = omMetadataManager.getOzoneDeletePathKey(objectId,
- omMetadataManager.getOzonePathKey(volumeId,
- bucketId, bucketId, "dir2"));
- bucket.deleteDirectory("dir2", true);
-
-
- assertTableRowCount(deletedDirTable, initialDeletedCount + 1);
- assertTableRowCount(renameTable, initialRenameCount + 1);
- Mockito.doAnswer(i -> {
- List purgePathRequestList = i.getArgument(5);
- for (OzoneManagerProtocolProtos.PurgePathRequest purgeRequest : purgePathRequestList) {
- Assertions.assertNotEquals(deletePathKey, purgeRequest.getDeletedDir());
- }
- return i.callRealMethod();
- }).when(service).optimizeDirDeletesAndSubmitRequest(anyLong(), anyLong(), anyLong(),
- anyLong(), anyList(), anyList(), eq(null), anyLong(), anyInt(), Mockito.any(), any());
-
- Mockito.doAnswer(i -> {
- store.createSnapshot(testVolumeName, testBucketName, snap2);
- GenericTestUtils.waitFor(() -> {
- try {
- SnapshotInfo snapshotInfo = store.getClientProxy().getOzoneManagerClient()
- .getSnapshotInfo(testVolumeName, testBucketName, snap2);
-
- return OmSnapshotManager.areSnapshotChangesFlushedToDB(cluster.getOzoneManager().getMetadataManager(),
- snapshotInfo);
- } catch (IOException e) {
- throw new RuntimeException(e);
- }
- }, 1000, 100000);
- GenericTestUtils.waitFor(() -> {
- try {
- return renameTable.get(omMetadataManager.getRenameKey(testVolumeName, testBucketName, objectId)) == null;
- } catch (IOException e) {
- throw new RuntimeException(e);
- }
- }, 1000, 10000);
- return i.callRealMethod();
- }).when(omSnapshotManager).getSnapshot(ArgumentMatchers.eq(testVolumeName), ArgumentMatchers.eq(testBucketName),
- ArgumentMatchers.eq(snap1));
- assertTableRowCount(snapshotInfoTable, initialSnapshotCount + 1);
- service.runPeriodicalTaskNow();
- service.runPeriodicalTaskNow();
- assertTableRowCount(snapshotInfoTable, initialSnapshotCount + 2);
- store.deleteSnapshot(testVolumeName, testBucketName, snap2);
- service.runPeriodicalTaskNow();
- store.deleteSnapshot(testVolumeName, testBucketName, snap1);
- cluster.restartOzoneManager();
- assertTableRowCount(cluster.getOzoneManager().getMetadataManager().getSnapshotInfoTable(), initialSnapshotCount);
- dirDeletingService.resume();
- }
-
@Test
public void testDirDeletedTableCleanUpForSnapshot() throws Exception {
Table deletedDirTable =
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java
index c39e24571a8..49b515d53c5 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java
@@ -70,7 +70,6 @@
import org.apache.hadoop.fs.StreamCapabilities;
import org.apache.hadoop.ozone.ClientConfigForTesting;
-import org.apache.hadoop.ozone.HddsDatanodeService;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts;
@@ -84,9 +83,7 @@
import org.apache.hadoop.ozone.client.io.KeyOutputStream;
import org.apache.hadoop.ozone.client.io.OzoneInputStream;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.container.TestHelper;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler;
-import org.apache.hadoop.ozone.container.keyvalue.impl.AbstractTestChunkManager;
import org.apache.hadoop.ozone.container.keyvalue.impl.BlockManagerImpl;
import org.apache.hadoop.ozone.container.metadata.AbstractDatanodeStore;
import org.apache.hadoop.ozone.om.OMMetadataManager;
@@ -96,7 +93,6 @@
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
import org.apache.hadoop.ozone.om.service.OpenKeyCleanupService;
import org.apache.hadoop.security.UserGroupInformation;
@@ -181,8 +177,6 @@ public static void init() throws Exception {
CONF.setBoolean(OZONE_OM_RATIS_ENABLE_KEY, false);
CONF.set(OZONE_DEFAULT_BUCKET_LAYOUT, layout.name());
- CONF.setBoolean(OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, true);
- CONF.setBoolean("ozone.client.hbase.enhancements.allowed", true);
CONF.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true);
CONF.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10);
// Reduce KeyDeletingService interval
@@ -349,8 +343,6 @@ public void testEmptyHsync() throws Exception {
}
@Test
- // Making this the second test to be run to avoid lingering block files from previous tests
- @Order(2)
public void testKeyHSyncThenClose() throws Exception {
// Check that deletedTable should not have keys with the same block as in
// keyTable's when a key is hsync()'ed then close()'d.
@@ -366,16 +358,10 @@ public void testKeyHSyncThenClose() throws Exception {
String data = "random data";
final Path file = new Path(dir, "file-hsync-then-close");
try (FileSystem fs = FileSystem.get(CONF)) {
- String chunkPath;
try (FSDataOutputStream outputStream = fs.create(file, true)) {
outputStream.write(data.getBytes(UTF_8), 0, data.length());
outputStream.hsync();
- // locate the container chunk path on the first DataNode.
- chunkPath = getChunkPathOnDataNode(outputStream);
- assertFalse(AbstractTestChunkManager.checkChunkFilesClosed(chunkPath));
}
- // After close, the chunk file should be closed.
- assertTrue(AbstractTestChunkManager.checkChunkFilesClosed(chunkPath));
}
OzoneManager ozoneManager = cluster.getOzoneManager();
@@ -401,22 +387,6 @@ public void testKeyHSyncThenClose() throws Exception {
}
}
- private static String getChunkPathOnDataNode(FSDataOutputStream outputStream)
- throws IOException {
- String chunkPath;
- KeyOutputStream groupOutputStream =
- ((OzoneFSOutputStream) outputStream.getWrappedStream()).getWrappedOutputStream().getKeyOutputStream();
- List locationInfoList =
- groupOutputStream.getLocationInfoList();
- OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
- HddsDatanodeService dn = TestHelper.getDatanodeService(omKeyLocationInfo, cluster);
- chunkPath = dn.getDatanodeStateMachine()
- .getContainer().getContainerSet()
- .getContainer(omKeyLocationInfo.getContainerID()).
- getContainerData().getChunksPath();
- return chunkPath;
- }
-
@ParameterizedTest
@ValueSource(booleans = {false, true})
public void testO3fsHSync(boolean incrementalChunkList) throws Exception {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSyncUpgrade.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSyncUpgrade.java
index 624b5e02c14..917ce57fe7d 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSyncUpgrade.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSyncUpgrade.java
@@ -107,8 +107,6 @@ public void init() throws Exception {
conf.setBoolean(OZONE_OM_RATIS_ENABLE_KEY, false);
conf.set(OZONE_DEFAULT_BUCKET_LAYOUT, layout.name());
- conf.setBoolean(OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, true);
- conf.setBoolean("ozone.client.hbase.enhancements.allowed", true);
conf.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true);
conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10);
// Reduce KeyDeletingService interval
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java
index 6a3a0eb5b67..a4a9bcff470 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java
@@ -120,8 +120,6 @@ public void init() throws IOException, InterruptedException,
final BucketLayout layout = BucketLayout.FILE_SYSTEM_OPTIMIZED;
conf.setBoolean(OZONE_OM_RATIS_ENABLE_KEY, false);
- conf.setBoolean(OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, true);
- conf.setBoolean("ozone.client.hbase.enhancements.allowed", true);
conf.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true);
conf.set(OZONE_DEFAULT_BUCKET_LAYOUT, layout.name());
conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/AbstractOzoneContractTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/AbstractOzoneContractTest.java
index bce96251873..b79c9a870e4 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/AbstractOzoneContractTest.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/AbstractOzoneContractTest.java
@@ -47,7 +47,6 @@
import static org.apache.hadoop.fs.contract.ContractTestUtils.cleanup;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT;
import static org.assertj.core.api.Assumptions.assumeThat;
@@ -94,8 +93,6 @@ protected static OzoneConfiguration createBaseConfiguration() {
conf.addResource(CONTRACT_XML);
- conf.setBoolean(OZONE_HBASE_ENHANCEMENTS_ALLOWED, true);
- conf.setBoolean("ozone.client.hbase.enhancements.allowed", true);
conf.setBoolean(OZONE_FS_HSYNC_ENABLED, true);
return conf;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerMXBean.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerMXBean.java
index 4a9efceeb7b..75d860d951b 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerMXBean.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerMXBean.java
@@ -21,6 +21,7 @@
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.ozone.test.GenericTestUtils;
+import org.apache.ozone.test.tag.Flaky;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
@@ -61,15 +62,16 @@ public void init()
*
* @throws Exception
*/
+ @Flaky("HDDS-11359")
@Test
public void testPipelineInfo() throws Exception {
ObjectName bean = new ObjectName(
"Hadoop:service=SCMPipelineManager,name=SCMPipelineManagerInfo");
+ Map pipelineStateCount = cluster
+ .getStorageContainerManager().getPipelineManager().getPipelineInfo();
GenericTestUtils.waitFor(() -> {
try {
- Map pipelineStateCount = cluster
- .getStorageContainerManager().getPipelineManager().getPipelineInfo();
final TabularData data = (TabularData) mbs.getAttribute(
bean, "PipelineInfo");
for (Map.Entry entry : pipelineStateCount.entrySet()) {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java
index 6f79839cd02..c274d8fea30 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java
@@ -61,7 +61,6 @@
import org.apache.hadoop.ozone.client.OzoneKeyDetails;
import org.apache.hadoop.ozone.client.OzoneVolume;
import org.apache.hadoop.ozone.client.SecretKeyTestClient;
-import org.apache.hadoop.ozone.client.io.BlockOutputStreamEntry;
import org.apache.hadoop.ozone.client.io.InsufficientLocationsException;
import org.apache.hadoop.ozone.client.io.KeyOutputStream;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
@@ -84,7 +83,6 @@
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest;
-import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
import java.io.IOException;
@@ -101,7 +99,6 @@
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.UUID;
-import java.util.concurrent.CompletableFuture;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
import java.util.stream.Collectors;
@@ -120,7 +117,6 @@
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertInstanceOf;
import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.junit.jupiter.params.provider.Arguments.arguments;
/**
* This class tests container commands on EC containers.
@@ -617,33 +613,30 @@ private static byte[] getBytesWith(int singleDigitNumber, int total) {
@ParameterizedTest
@MethodSource("recoverableMissingIndexes")
- void testECReconstructionCoordinatorWith(List missingIndexes, boolean triggerRetry)
+ void testECReconstructionCoordinatorWith(List missingIndexes)
throws Exception {
- testECReconstructionCoordinator(missingIndexes, 3, triggerRetry);
+ testECReconstructionCoordinator(missingIndexes, 3);
}
@ParameterizedTest
@MethodSource("recoverableMissingIndexes")
- void testECReconstructionCoordinatorWithPartialStripe(List missingIndexes,
- boolean triggerRetry) throws Exception {
- testECReconstructionCoordinator(missingIndexes, 1, triggerRetry);
+ void testECReconstructionCoordinatorWithPartialStripe(List missingIndexes)
+ throws Exception {
+ testECReconstructionCoordinator(missingIndexes, 1);
}
@ParameterizedTest
@MethodSource("recoverableMissingIndexes")
- void testECReconstructionCoordinatorWithFullAndPartialStripe(List missingIndexes,
- boolean triggerRetry) throws Exception {
- testECReconstructionCoordinator(missingIndexes, 4, triggerRetry);
+ void testECReconstructionCoordinatorWithFullAndPartialStripe(List missingIndexes)
+ throws Exception {
+ testECReconstructionCoordinator(missingIndexes, 4);
}
- static Stream recoverableMissingIndexes() {
- Stream args = IntStream.rangeClosed(1, 5).mapToObj(i -> arguments(ImmutableList.of(i), true));
- Stream args1 = IntStream.rangeClosed(1, 5).mapToObj(i -> arguments(ImmutableList.of(i), false));
- Stream args2 = Stream.of(arguments(ImmutableList.of(2, 3), true),
- arguments(ImmutableList.of(2, 4), true), arguments(ImmutableList.of(3, 5), true));
- Stream args3 = Stream.of(arguments(ImmutableList.of(2, 3), false),
- arguments(ImmutableList.of(2, 4), false), arguments(ImmutableList.of(3, 5), false));
- return Stream.concat(Stream.concat(args, args1), Stream.concat(args2, args3));
+ static Stream> recoverableMissingIndexes() {
+ return Stream
+ .concat(IntStream.rangeClosed(1, 5).mapToObj(ImmutableList::of), Stream
+ .of(ImmutableList.of(2, 3), ImmutableList.of(2, 4),
+ ImmutableList.of(3, 5), ImmutableList.of(4, 5)));
}
/**
@@ -654,7 +647,7 @@ static Stream recoverableMissingIndexes() {
public void testECReconstructionCoordinatorWithMissingIndexes135() {
InsufficientLocationsException exception =
assertThrows(InsufficientLocationsException.class, () -> {
- testECReconstructionCoordinator(ImmutableList.of(1, 3, 5), 3, false);
+ testECReconstructionCoordinator(ImmutableList.of(1, 3, 5), 3);
});
String expectedMessage =
@@ -665,7 +658,7 @@ public void testECReconstructionCoordinatorWithMissingIndexes135() {
}
private void testECReconstructionCoordinator(List missingIndexes,
- int numInputChunks, boolean triggerRetry) throws Exception {
+ int numInputChunks) throws Exception {
ObjectStore objectStore = rpcClient.getObjectStore();
String keyString = UUID.randomUUID().toString();
String volumeName = UUID.randomUUID().toString();
@@ -674,7 +667,7 @@ private void testECReconstructionCoordinator(List missingIndexes,
objectStore.getVolume(volumeName).createBucket(bucketName);
OzoneVolume volume = objectStore.getVolume(volumeName);
OzoneBucket bucket = volume.getBucket(bucketName);
- createKeyAndWriteData(keyString, bucket, numInputChunks, triggerRetry);
+ createKeyAndWriteData(keyString, bucket, numInputChunks);
try (
XceiverClientManager xceiverClientManager =
@@ -786,7 +779,7 @@ private void testECReconstructionCoordinator(List missingIndexes,
.getReplicationConfig(), cToken);
assertEquals(blockDataArrList.get(i).length,
reconstructedBlockData.length);
- checkBlockDataWithRetry(blockDataArrList.get(i), reconstructedBlockData, triggerRetry);
+ checkBlockData(blockDataArrList.get(i), reconstructedBlockData);
XceiverClientSpi client = xceiverClientManager.acquireClient(
newTargetPipeline);
try {
@@ -807,7 +800,7 @@ private void testECReconstructionCoordinator(List missingIndexes,
}
private void createKeyAndWriteData(String keyString, OzoneBucket bucket,
- int numChunks, boolean triggerRetry) throws IOException {
+ int numChunks) throws IOException {
for (int i = 0; i < numChunks; i++) {
inputChunks[i] = getBytesWith(i + 1, EC_CHUNK_SIZE);
}
@@ -816,48 +809,11 @@ private void createKeyAndWriteData(String keyString, OzoneBucket bucket,
new HashMap<>())) {
assertInstanceOf(KeyOutputStream.class, out.getOutputStream());
for (int i = 0; i < numChunks; i++) {
- // We generally wait until the data is written to the last chunk
- // before attempting to trigger CloseContainer.
- // We use an asynchronous approach for this trigger,
- // aiming to ensure that closing the container does not interfere with the write operation.
- // However, this process often needs to be executed multiple times before it takes effect.
- if (i == numChunks - 1 && triggerRetry) {
- triggerRetryByCloseContainer(out);
- }
out.write(inputChunks[i]);
}
}
}
- private void triggerRetryByCloseContainer(OzoneOutputStream out) {
- CompletableFuture.runAsync(() -> {
- BlockOutputStreamEntry blockOutputStreamEntry = out.getKeyOutputStream().getStreamEntries().get(0);
- BlockID entryBlockID = blockOutputStreamEntry.getBlockID();
- long entryContainerID = entryBlockID.getContainerID();
- Pipeline entryPipeline = blockOutputStreamEntry.getPipeline();
- Map replicaIndexes = entryPipeline.getReplicaIndexes();
- try {
- for (Map.Entry entry : replicaIndexes.entrySet()) {
- DatanodeDetails key = entry.getKey();
- Integer value = entry.getValue();
- XceiverClientManager xceiverClientManager = new XceiverClientManager(config);
- Token cToken = containerTokenGenerator
- .generateToken(ANY_USER, ContainerID.valueOf(entryContainerID));
- XceiverClientSpi client = xceiverClientManager.acquireClient(
- createSingleNodePipeline(entryPipeline, key, value));
- try {
- ContainerProtocolCalls.closeContainer(client, entryContainerID, cToken.encodeToUrlString());
- } finally {
- xceiverClientManager.releaseClient(client, false);
- }
- break;
- }
- } catch (Exception e) {
- throw new RuntimeException(e);
- }
- });
- }
-
@Test
public void testECReconstructionCoordinatorShouldCleanupContainersOnFailure()
throws Exception {
@@ -870,7 +826,7 @@ public void testECReconstructionCoordinatorShouldCleanupContainersOnFailure()
objectStore.getVolume(volumeName).createBucket(bucketName);
OzoneVolume volume = objectStore.getVolume(volumeName);
OzoneBucket bucket = volume.getBucket(bucketName);
- createKeyAndWriteData(keyString, bucket, 3, false);
+ createKeyAndWriteData(keyString, bucket, 3);
OzoneKeyDetails key = bucket.getKey(keyString);
long conID = key.getOzoneKeyLocations().get(0).getContainerID();
@@ -944,25 +900,6 @@ private void closeContainer(long conID)
HddsProtos.LifeCycleEvent.CLOSE);
}
- private void checkBlockDataWithRetry(
- org.apache.hadoop.ozone.container.common.helpers.BlockData[] blockData,
- org.apache.hadoop.ozone.container.common.helpers.BlockData[]
- reconstructedBlockData, boolean triggerRetry) {
- if (triggerRetry) {
- for (int i = 0; i < reconstructedBlockData.length; i++) {
- assertEquals(blockData[i].getBlockID(), reconstructedBlockData[i].getBlockID());
- List oldBlockDataChunks = blockData[i].getChunks();
- List newBlockDataChunks = reconstructedBlockData[i].getChunks();
- for (int j = 0; j < newBlockDataChunks.size(); j++) {
- ContainerProtos.ChunkInfo chunkInfo = oldBlockDataChunks.get(j);
- assertEquals(chunkInfo, newBlockDataChunks.get(j));
- }
- }
- return;
- }
- checkBlockData(blockData, reconstructedBlockData);
- }
-
private void checkBlockData(
org.apache.hadoop.ozone.container.common.helpers.BlockData[] blockData,
org.apache.hadoop.ozone.container.common.helpers.BlockData[]
@@ -1030,7 +967,8 @@ public static void prepareData(int[][] ranges) throws Exception {
out.write(values[i]);
}
}
-
+// List containerIDs =
+// new ArrayList<>(scm.getContainerManager().getContainerIDs());
List containerIDs =
scm.getContainerManager().getContainers()
.stream()
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
index 8a219514d34..cc1f93fbc1e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
@@ -93,7 +93,6 @@ private void addPropertiesNotInXml() {
OMConfigKeys.OZONE_FS_TRASH_INTERVAL_KEY,
OMConfigKeys.OZONE_FS_TRASH_CHECKPOINT_INTERVAL_KEY,
OMConfigKeys.OZONE_OM_S3_GPRC_SERVER_ENABLED,
- OMConfigKeys.OZONE_OM_FEATURES_DISABLED,
OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS_NATIVE,
OzoneConfigKeys.OZONE_CLIENT_REQUIRED_OM_VERSION_MIN_KEY,
OzoneConfigKeys.OZONE_RECOVERING_CONTAINER_SCRUBBING_SERVICE_WORKERS,
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java
index 3063e2587e4..766ed09bccd 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java
@@ -126,8 +126,6 @@ protected static void init(boolean zeroCopyEnabled) throws Exception {
zeroCopyEnabled);
conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 10);
// "Enable" hsync to verify that hsync would be blocked by ECKeyOutputStream
- conf.setBoolean(OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, true);
- conf.setBoolean("ozone.client.hbase.enhancements.allowed", true);
conf.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true);
ClientConfigForTesting.newBuilder(StorageUnit.BYTES)
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java
index eb3709c9a85..8810bab5190 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java
@@ -41,7 +41,6 @@
import org.apache.hadoop.hdds.scm.storage.RatisBlockOutputStream;
import org.apache.hadoop.ozone.ClientConfigForTesting;
import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.client.ObjectStore;
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.OzoneClientFactory;
@@ -49,6 +48,7 @@
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.ozone.container.TestHelper;
+import org.apache.ozone.test.tag.Flaky;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.TestInstance;
@@ -96,9 +96,6 @@ static MiniOzoneCluster createCluster() throws IOException,
conf.setStorageSize(OZONE_SCM_BLOCK_SIZE, 4, StorageUnit.MB);
conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 3);
- conf.setBoolean(OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, true);
- conf.setBoolean("ozone.client.hbase.enhancements.allowed", true);
-
DatanodeRatisServerConfig ratisServerConfig =
conf.getObject(DatanodeRatisServerConfig.class);
ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3));
@@ -665,6 +662,7 @@ void testWriteExactlyMaxFlushSize(boolean flushDelay, boolean enablePiggybacking
@ParameterizedTest
@MethodSource("clientParameters")
+ @Flaky("HDDS-11325")
void testWriteMoreThanMaxFlushSize(boolean flushDelay, boolean enablePiggybacking) throws Exception {
OzoneClientConfig config = newClientConfig(cluster.getConf(), flushDelay, enablePiggybacking);
try (OzoneClient client = newClient(cluster.getConf(), config)) {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java
index 5e5461634c0..f823add57bd 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java
@@ -93,6 +93,7 @@ private static Stream clientParameters() {
@ParameterizedTest
@MethodSource("clientParameters")
+ @Flaky("HDDS-11325")
void testContainerClose(boolean flushDelay, boolean enablePiggybacking) throws Exception {
OzoneClientConfig config = newClientConfig(cluster.getConf(), flushDelay, enablePiggybacking);
try (OzoneClient client = newClient(cluster.getConf(), config)) {
@@ -385,8 +386,7 @@ private void testWriteMoreThanMaxFlushSize(OzoneClient client)
assertInstanceOf(RatisBlockOutputStream.class,
keyOutputStream.getStreamEntries().get(0).getOutputStream());
- assertThat(blockOutputStream.getBufferPool().getSize())
- .isLessThanOrEqualTo(4);
+ assertEquals(4, blockOutputStream.getBufferPool().getSize());
assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
assertEquals(400, blockOutputStream.getTotalDataFlushedLength());
@@ -442,8 +442,7 @@ private void testExceptionDuringClose(OzoneClient client) throws Exception {
assertInstanceOf(RatisBlockOutputStream.class,
keyOutputStream.getStreamEntries().get(0).getOutputStream());
- assertThat(blockOutputStream.getBufferPool().getSize())
- .isLessThanOrEqualTo(2);
+ assertEquals(2, blockOutputStream.getBufferPool().getSize());
assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
assertEquals(0, blockOutputStream.getTotalDataFlushedLength());
@@ -456,8 +455,7 @@ private void testExceptionDuringClose(OzoneClient client) throws Exception {
// Since the data in the buffer is already flushed, flush here will have
// no impact on the counters and data structures
- assertThat(blockOutputStream.getBufferPool().getSize())
- .isLessThanOrEqualTo(2);
+ assertEquals(2, blockOutputStream.getBufferPool().getSize());
assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
assertEquals(dataLength, blockOutputStream.getTotalDataFlushedLength());
@@ -508,10 +506,9 @@ private void testWatchForCommitWithSingleNodeRatis(OzoneClient client)
keyOutputStream.getStreamEntries().get(0).getOutputStream());
// we have just written data more than flush Size(2 chunks), at this time
- // buffer pool will have up to 4 buffers allocated worth of chunk size
+ // buffer pool will have 4 buffers allocated worth of chunk size
- assertThat(blockOutputStream.getBufferPool().getSize())
- .isLessThanOrEqualTo(4);
+ assertEquals(4, blockOutputStream.getBufferPool().getSize());
// writtenDataLength as well flushedDataLength will be updated here
assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
@@ -534,8 +531,7 @@ private void testWatchForCommitWithSingleNodeRatis(OzoneClient client)
// Since the data in the buffer is already flushed, flush here will have
// no impact on the counters and data structures
- assertThat(blockOutputStream.getBufferPool().getSize())
- .isLessThanOrEqualTo(4);
+ assertEquals(4, blockOutputStream.getBufferPool().getSize());
assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
assertEquals(dataLength, blockOutputStream.getTotalDataFlushedLength());
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java
index b2766599ae4..958a37380cf 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java
@@ -121,8 +121,6 @@ public static void init() throws Exception {
// constructed.
conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT,
OMConfigKeys.OZONE_BUCKET_LAYOUT_OBJECT_STORE);
- conf.setBoolean(OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, true);
- conf.setBoolean("ozone.client.hbase.enhancements.allowed", true);
conf.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true);
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
keyProviderUri);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java
index a4327a49bfa..7af0b5f9aa1 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java
@@ -170,18 +170,6 @@ private static Stream scanTestCases() {
Named.of("Invalid EndKey key9", Arrays.asList("--endkey", "key9")),
Named.of("Expect key1-key5", Pair.of("key1", "key6"))
),
- Arguments.of(
- Named.of(KEY_TABLE, Pair.of(KEY_TABLE, false)),
- Named.of("Default", Pair.of(0, "")),
- Named.of("Filter key3", Arrays.asList("--filter", "keyName:equals:key3")),
- Named.of("Expect key3", Pair.of("key3", "key4"))
- ),
- Arguments.of(
- Named.of(KEY_TABLE, Pair.of(KEY_TABLE, false)),
- Named.of("Default", Pair.of(0, "")),
- Named.of("Filter invalid key", Arrays.asList("--filter", "keyName:equals:key9")),
- Named.of("Expect key1-key3", null)
- ),
Arguments.of(
Named.of(BLOCK_DATA + " V3", Pair.of(BLOCK_DATA, true)),
Named.of("Default", Pair.of(0, "")),
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLeaseRecoverer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLeaseRecoverer.java
index 29f91821ebd..c24cf748ddb 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLeaseRecoverer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLeaseRecoverer.java
@@ -70,8 +70,6 @@ public class TestLeaseRecoverer {
@BeforeAll
public static void init() throws Exception {
conf = new OzoneConfiguration();
- conf.setBoolean(OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, true);
- conf.setBoolean("ozone.client.hbase.enhancements.allowed", true);
conf.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true);
conf.set(OzoneConfigKeys.OZONE_OM_LEASE_SOFT_LIMIT, "0s");
OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java
index 0481ee4a867..4619af1baa2 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java
@@ -338,6 +338,7 @@ public void testKeyOps() throws Exception {
long initialNumKeyLookup = getLongCounter("NumKeyLookup", omMetrics);
long initialNumKeyDeletes = getLongCounter("NumKeyDeletes", omMetrics);
long initialNumKeyLists = getLongCounter("NumKeyLists", omMetrics);
+ long initialNumTrashKeyLists = getLongCounter("NumTrashKeyLists", omMetrics);
long initialNumKeys = getLongCounter("NumKeys", omMetrics);
long initialNumInitiateMultipartUploads = getLongCounter("NumInitiateMultipartUploads", omMetrics);
@@ -345,6 +346,7 @@ public void testKeyOps() throws Exception {
long initialNumKeyAllocateFails = getLongCounter("NumKeyAllocateFails", omMetrics);
long initialNumKeyLookupFails = getLongCounter("NumKeyLookupFails", omMetrics);
long initialNumKeyDeleteFails = getLongCounter("NumKeyDeleteFails", omMetrics);
+ long initialNumTrashKeyListFails = getLongCounter("NumTrashKeyListFails", omMetrics);
long initialNumInitiateMultipartUploadFails = getLongCounter("NumInitiateMultipartUploadFails", omMetrics);
long initialNumBlockAllocationFails = getLongCounter("NumBlockAllocationFails", omMetrics);
long initialNumKeyListFails = getLongCounter("NumKeyListFails", omMetrics);
@@ -354,15 +356,16 @@ public void testKeyOps() throws Exception {
TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName, BucketLayout.LEGACY);
OmKeyArgs keyArgs = createKeyArgs(volumeName, bucketName,
RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE));
- doKeyOps(keyArgs); // This will perform 7 different operations on the key
+ doKeyOps(keyArgs);
omMetrics = getMetrics("OMMetrics");
- assertEquals(initialNumKeyOps + 7, getLongCounter("NumKeyOps", omMetrics));
+ assertEquals(initialNumKeyOps + 8, getLongCounter("NumKeyOps", omMetrics));
assertEquals(initialNumKeyAllocate + 1, getLongCounter("NumKeyAllocate", omMetrics));
assertEquals(initialNumKeyLookup + 1, getLongCounter("NumKeyLookup", omMetrics));
assertEquals(initialNumKeyDeletes + 1, getLongCounter("NumKeyDeletes", omMetrics));
assertEquals(initialNumKeyLists + 1, getLongCounter("NumKeyLists", omMetrics));
+ assertEquals(initialNumTrashKeyLists + 1, getLongCounter("NumTrashKeyLists", omMetrics));
assertEquals(initialNumKeys, getLongCounter("NumKeys", omMetrics));
assertEquals(initialNumInitiateMultipartUploads + 1, getLongCounter("NumInitiateMultipartUploads", omMetrics));
@@ -406,6 +409,8 @@ public void testKeyOps() throws Exception {
doThrow(exception).when(mockKm).lookupKey(any(), any(), any());
doThrow(exception).when(mockKm).listKeys(
any(), any(), any(), any(), anyInt());
+ doThrow(exception).when(mockKm).listTrash(
+ any(), any(), any(), any(), anyInt());
OmMetadataReader omMetadataReader =
(OmMetadataReader) ozoneManager.getOmMetadataReader().get();
HddsWhiteboxTestUtils.setInternalState(
@@ -421,17 +426,19 @@ public void testKeyOps() throws Exception {
doKeyOps(keyArgs);
omMetrics = getMetrics("OMMetrics");
- assertEquals(initialNumKeyOps + 28, getLongCounter("NumKeyOps", omMetrics));
+ assertEquals(initialNumKeyOps + 31, getLongCounter("NumKeyOps", omMetrics));
assertEquals(initialNumKeyAllocate + 6, getLongCounter("NumKeyAllocate", omMetrics));
assertEquals(initialNumKeyLookup + 3, getLongCounter("NumKeyLookup", omMetrics));
assertEquals(initialNumKeyDeletes + 4, getLongCounter("NumKeyDeletes", omMetrics));
assertEquals(initialNumKeyLists + 3, getLongCounter("NumKeyLists", omMetrics));
+ assertEquals(initialNumTrashKeyLists + 3, getLongCounter("NumTrashKeyLists", omMetrics));
assertEquals(initialNumInitiateMultipartUploads + 3, getLongCounter("NumInitiateMultipartUploads", omMetrics));
assertEquals(initialNumKeyAllocateFails + 1, getLongCounter("NumKeyAllocateFails", omMetrics));
assertEquals(initialNumKeyLookupFails + 1, getLongCounter("NumKeyLookupFails", omMetrics));
assertEquals(initialNumKeyDeleteFails + 1, getLongCounter("NumKeyDeleteFails", omMetrics));
assertEquals(initialNumKeyListFails + 1, getLongCounter("NumKeyListFails", omMetrics));
+ assertEquals(initialNumTrashKeyListFails + 1, getLongCounter("NumTrashKeyListFails", omMetrics));
assertEquals(initialNumInitiateMultipartUploadFails + 1, getLongCounter(
"NumInitiateMultipartUploadFails", omMetrics));
assertEquals(initialNumKeys + 2, getLongCounter("NumKeys", omMetrics));
@@ -836,6 +843,12 @@ private void doKeyOps(OmKeyArgs keyArgs) {
} catch (IOException ignored) {
}
+ try {
+ ozoneManager.listTrash(keyArgs.getVolumeName(),
+ keyArgs.getBucketName(), null, null, 0);
+ } catch (IOException ignored) {
+ }
+
try {
writeClient.deleteKey(keyArgs);
} catch (IOException ignored) {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java
index a173bd9222e..abc21ed4351 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java
@@ -80,7 +80,6 @@
import static org.junit.jupiter.api.Assertions.assertInstanceOf;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.mockito.ArgumentMatchers.anyLong;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.doNothing;
@@ -234,7 +233,7 @@ public void setUp() throws IOException {
ozoneManager.getMetadataManager().getMetaTable().put(
OzoneConsts.RANGER_OZONE_SERVICE_VERSION_KEY, String.valueOf(v));
return null;
- }).when(omRatisServer).submitRequest(any(), any(), anyLong());
+ }).when(omRatisServer).submitRequest(any(), any());
} catch (ServiceException e) {
throw new RuntimeException(e);
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java
index 9a6bca29b88..c123675565a 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java
@@ -204,7 +204,11 @@ private void init() throws Exception {
conf.setBoolean(OZONE_OM_ENABLE_FILESYSTEM_PATHS, enabledFileSystemPaths);
conf.set(OZONE_DEFAULT_BUCKET_LAYOUT, bucketLayout.name());
conf.setBoolean(OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF, forceFullSnapshotDiff);
- conf.setBoolean(OZONE_OM_SNAPSHOT_DIFF_DISABLE_NATIVE_LIBS, disableNativeDiff);
+ conf.setBoolean(OZONE_OM_SNAPSHOT_DIFF_DISABLE_NATIVE_LIBS,
+ disableNativeDiff);
+ conf.setBoolean(OZONE_OM_ENABLE_FILESYSTEM_PATHS, enabledFileSystemPaths);
+ conf.set(OZONE_DEFAULT_BUCKET_LAYOUT, bucketLayout.name());
+ conf.setBoolean(OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF, forceFullSnapshotDiff);
conf.setEnum(HDDS_DB_PROFILE, DBProfile.TEST);
// Enable filesystem snapshot feature for the test regardless of the default
conf.setBoolean(OMConfigKeys.OZONE_FILESYSTEM_SNAPSHOT_ENABLED_KEY, true);
@@ -1477,8 +1481,10 @@ public void testSnapDiffCancel() throws Exception {
String toSnapshotTableKey =
SnapshotInfo.getTableKey(volumeName, bucketName, toSnapName);
- UUID fromSnapshotID = SnapshotUtils.getSnapshotInfo(ozoneManager, fromSnapshotTableKey).getSnapshotId();
- UUID toSnapshotID = SnapshotUtils.getSnapshotInfo(ozoneManager, toSnapshotTableKey).getSnapshotId();
+ UUID fromSnapshotID = ozoneManager.getOmSnapshotManager()
+ .getSnapshotInfo(fromSnapshotTableKey).getSnapshotId();
+ UUID toSnapshotID = ozoneManager.getOmSnapshotManager()
+ .getSnapshotInfo(toSnapshotTableKey).getSnapshotId();
// Construct SnapshotDiffJob table key.
String snapDiffJobKey = fromSnapshotID + DELIMITER + toSnapshotID;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerHASnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerHASnapshot.java
index f178d00daa7..341b5b78c60 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerHASnapshot.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerHASnapshot.java
@@ -21,12 +21,10 @@
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.utils.IOUtils;
-import org.apache.hadoop.hdds.utils.db.RDBCheckpointUtils;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TableIterator;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.TestDataUtil;
import org.apache.hadoop.ozone.client.ObjectStore;
import org.apache.hadoop.ozone.client.OzoneBucket;
@@ -36,27 +34,20 @@
import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.helpers.SnapshotInfo;
-import org.apache.hadoop.ozone.om.ratis.OzoneManagerDoubleBuffer;
import org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse;
-import org.apache.ozone.test.GenericTestUtils;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
-import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
import java.util.UUID;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
-import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPath;
import static org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.JobStatus.DONE;
import static org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.JobStatus.IN_PROGRESS;
import static org.apache.ozone.test.LambdaTestUtils.await;
@@ -81,8 +72,6 @@ public class TestOzoneManagerHASnapshot {
public static void staticInit() throws Exception {
OzoneConfiguration conf = new OzoneConfiguration();
conf.setBoolean(OMConfigKeys.OZONE_FILESYSTEM_SNAPSHOT_ENABLED_KEY, true);
- conf.setTimeDuration(OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 1, TimeUnit.SECONDS);
- conf.setTimeDuration(OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL, 1, TimeUnit.SECONDS);
cluster = MiniOzoneCluster.newHABuilder(conf)
.setOMServiceId("om-service-test")
@@ -276,97 +265,4 @@ private void createFileKey(OzoneBucket bucket, String keyName)
fileKey.write(value);
}
}
-
- /**
- * This is to simulate HDDS-11152 scenario. In which a follower's doubleBuffer is lagging and accumulates purgeKey
- * and purgeSnapshot in same batch.
- */
- @Test
- public void testKeyAndSnapshotDeletionService() throws IOException, InterruptedException, TimeoutException {
- OzoneManager omLeader = cluster.getOMLeader();
- OzoneManager omFollower;
-
- if (omLeader != cluster.getOzoneManager(0)) {
- omFollower = cluster.getOzoneManager(0);
- } else {
- omFollower = cluster.getOzoneManager(1);
- }
-
- int numKeys = 5;
- List keys = new ArrayList<>();
- for (int i = 0; i < numKeys; i++) {
- String keyName = "key-" + RandomStringUtils.randomNumeric(10);
- createFileKey(ozoneBucket, keyName);
- keys.add(keyName);
- }
-
- // Stop the key deletion service so that deleted keys get trapped in the snapshots.
- omLeader.getKeyManager().getDeletingService().suspend();
- // Stop the snapshot deletion service so that deleted keys get trapped in the snapshots.
- omLeader.getKeyManager().getSnapshotDeletingService().suspend();
-
- // Delete half of the keys
- for (int i = 0; i < numKeys / 2; i++) {
- ozoneBucket.deleteKey(keys.get(i));
- }
-
- String snapshotName = "snap-" + RandomStringUtils.randomNumeric(10);
- createSnapshot(volumeName, bucketName, snapshotName);
-
- store.deleteSnapshot(volumeName, bucketName, snapshotName);
-
- // Pause double buffer on follower node to accumulate all the key purge, snapshot delete and purge transactions.
- omFollower.getOmRatisServer().getOmStateMachine().getOzoneManagerDoubleBuffer().stopDaemon();
-
- long keyDeleteServiceCount = omLeader.getKeyManager().getDeletingService().getRunCount().get();
- omLeader.getKeyManager().getDeletingService().resume();
-
- GenericTestUtils.waitFor(
- () -> omLeader.getKeyManager().getDeletingService().getRunCount().get() > keyDeleteServiceCount,
- 1000, 60000);
-
- long snapshotDeleteServiceCount = omLeader.getKeyManager().getSnapshotDeletingService().getRunCount().get();
- omLeader.getKeyManager().getSnapshotDeletingService().resume();
-
- GenericTestUtils.waitFor(
- () -> omLeader.getKeyManager().getSnapshotDeletingService().getRunCount().get() > snapshotDeleteServiceCount,
- 1000, 60000);
-
- String tableKey = SnapshotInfo.getTableKey(volumeName, bucketName, snapshotName);
- checkSnapshotIsPurgedFromDB(omLeader, tableKey);
-
- // Resume the DoubleBuffer and flush the pending transactions.
- OzoneManagerDoubleBuffer omDoubleBuffer =
- omFollower.getOmRatisServer().getOmStateMachine().getOzoneManagerDoubleBuffer();
- omDoubleBuffer.resume();
- CompletableFuture.supplyAsync(() -> {
- omDoubleBuffer.flushTransactions();
- return null;
- });
- omDoubleBuffer.awaitFlush();
- checkSnapshotIsPurgedFromDB(omFollower, tableKey);
- }
-
- private void createSnapshot(String volName, String buckName, String snapName) throws IOException {
- store.createSnapshot(volName, buckName, snapName);
-
- String tableKey = SnapshotInfo.getTableKey(volName, buckName, snapName);
- SnapshotInfo snapshotInfo = SnapshotUtils.getSnapshotInfo(cluster.getOMLeader(), tableKey);
- String fileName = getSnapshotPath(cluster.getOMLeader().getConfiguration(), snapshotInfo);
- File snapshotDir = new File(fileName);
- if (!RDBCheckpointUtils.waitForCheckpointDirectoryExist(snapshotDir)) {
- throw new IOException("Snapshot directory doesn't exist");
- }
- }
-
- private void checkSnapshotIsPurgedFromDB(OzoneManager ozoneManager, String snapshotTableKey)
- throws InterruptedException, TimeoutException {
- GenericTestUtils.waitFor(() -> {
- try {
- return ozoneManager.getMetadataManager().getSnapshotInfoTable().get(snapshotTableKey) == null;
- } catch (IOException e) {
- throw new RuntimeException(e);
- }
- }, 1000, 60000);
- }
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingServiceIntegrationTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingService.java
similarity index 52%
rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingServiceIntegrationTest.java
rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingService.java
index 254de072e05..be4ea69095b 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingServiceIntegrationTest.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingService.java
@@ -19,7 +19,6 @@
package org.apache.hadoop.ozone.om.snapshot;
-import org.apache.commons.compress.utils.Lists;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -33,26 +32,20 @@
import org.apache.hadoop.ozone.client.BucketArgs;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.om.KeyManager;
import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
import org.apache.hadoop.ozone.om.OmSnapshot;
-import org.apache.hadoop.ozone.om.OmSnapshotManager;
import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.SnapshotChainManager;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.SnapshotInfo;
-import org.apache.hadoop.ozone.om.service.DirectoryDeletingService;
-import org.apache.hadoop.ozone.om.service.KeyDeletingService;
import org.apache.hadoop.ozone.om.service.SnapshotDeletingService;
import org.apache.ozone.test.GenericTestUtils;
import org.apache.ozone.test.tag.Flaky;
import org.junit.jupiter.api.AfterAll;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.MethodOrderer.OrderAnnotation;
import org.junit.jupiter.api.Order;
@@ -60,41 +53,25 @@
import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
-import org.mockito.Mockito;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.nio.ByteBuffer;
-import java.util.Collections;
-import java.util.Iterator;
import java.util.List;
-import java.util.Map;
import java.util.Objects;
-import java.util.Random;
import java.util.UUID;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
-import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
-import java.util.stream.Collectors;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_TIMEOUT;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_DEEP_CLEANING_ENABLED;
import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.mockito.ArgumentMatchers.anyBoolean;
-import static org.mockito.ArgumentMatchers.anyInt;
-import static org.mockito.ArgumentMatchers.anyString;
-import static org.mockito.Mockito.doAnswer;
-import static org.mockito.Mockito.when;
/**
* Test Snapshot Deleting Service.
@@ -103,10 +80,10 @@
@Timeout(300)
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
@TestMethodOrder(OrderAnnotation.class)
-public class TestSnapshotDeletingServiceIntegrationTest {
+public class TestSnapshotDeletingService {
private static final Logger LOG =
- LoggerFactory.getLogger(TestSnapshotDeletingServiceIntegrationTest.class);
+ LoggerFactory.getLogger(TestSnapshotDeletingService.class);
private static boolean omRatisEnabled = true;
private static final ByteBuffer CONTENT =
ByteBuffer.allocate(1024 * 1024 * 16);
@@ -131,7 +108,6 @@ public void setup() throws Exception {
1, StorageUnit.MB);
conf.setTimeDuration(OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL,
500, TimeUnit.MILLISECONDS);
- conf.setBoolean(OZONE_SNAPSHOT_DEEP_CLEANING_ENABLED, true);
conf.setTimeDuration(OZONE_SNAPSHOT_DELETING_SERVICE_TIMEOUT,
10000, TimeUnit.MILLISECONDS);
conf.setInt(OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL, 500);
@@ -171,7 +147,7 @@ public void testSnapshotSplitAndMove() throws Exception {
Table snapshotInfoTable =
om.getMetadataManager().getSnapshotInfoTable();
- createSnapshotDataForBucket(bucket1);
+ createSnapshotDataForBucket1();
assertTableRowCount(snapshotInfoTable, 2);
GenericTestUtils.waitFor(() -> snapshotDeletingService
@@ -198,7 +174,7 @@ public void testMultipleSnapshotKeyReclaim() throws Exception {
om.getMetadataManager().getSnapshotInfoTable();
runIndividualTest = false;
- createSnapshotDataForBucket(bucket1);
+ createSnapshotDataForBucket1();
BucketArgs bucketArgs = new BucketArgs.Builder()
.setBucketLayout(BucketLayout.LEGACY)
@@ -449,7 +425,7 @@ public void testSnapshotWithFSO() throws Exception {
while (iterator.hasNext()) {
Table.KeyValue next = iterator.next();
String activeDBDeletedKey = next.getKey();
- if (activeDBDeletedKey.matches(".*/key1/.*")) {
+ if (activeDBDeletedKey.matches(".*/key1.*")) {
RepeatedOmKeyInfo activeDBDeleted = next.getValue();
OMMetadataManager metadataManager =
cluster.getOzoneManager().getMetadataManager();
@@ -478,228 +454,6 @@ public void testSnapshotWithFSO() throws Exception {
rcSnap1.close();
}
- private DirectoryDeletingService getMockedDirectoryDeletingService(AtomicBoolean dirDeletionWaitStarted,
- AtomicBoolean dirDeletionStarted)
- throws InterruptedException, TimeoutException {
- OzoneManager ozoneManager = Mockito.spy(om);
- om.getKeyManager().getDirDeletingService().shutdown();
- GenericTestUtils.waitFor(() -> om.getKeyManager().getDirDeletingService().getThreadCount() == 0, 1000,
- 100000);
- DirectoryDeletingService directoryDeletingService = Mockito.spy(new DirectoryDeletingService(10000,
- TimeUnit.MILLISECONDS, 100000, ozoneManager, cluster.getConf()));
- directoryDeletingService.shutdown();
- GenericTestUtils.waitFor(() -> directoryDeletingService.getThreadCount() == 0, 1000,
- 100000);
- when(ozoneManager.getMetadataManager()).thenAnswer(i -> {
- // Wait for SDS to reach DDS wait block before processing any deleted directories.
- GenericTestUtils.waitFor(dirDeletionWaitStarted::get, 1000, 100000);
- dirDeletionStarted.set(true);
- return i.callRealMethod();
- });
- return directoryDeletingService;
- }
-
- private KeyDeletingService getMockedKeyDeletingService(AtomicBoolean keyDeletionWaitStarted,
- AtomicBoolean keyDeletionStarted)
- throws InterruptedException, TimeoutException, IOException {
- OzoneManager ozoneManager = Mockito.spy(om);
- om.getKeyManager().getDeletingService().shutdown();
- GenericTestUtils.waitFor(() -> om.getKeyManager().getDeletingService().getThreadCount() == 0, 1000,
- 100000);
- KeyManager keyManager = Mockito.spy(om.getKeyManager());
- when(ozoneManager.getKeyManager()).thenReturn(keyManager);
- KeyDeletingService keyDeletingService = Mockito.spy(new KeyDeletingService(ozoneManager,
- ozoneManager.getScmClient().getBlockClient(), keyManager, 10000,
- 100000, cluster.getConf(), false));
- keyDeletingService.shutdown();
- GenericTestUtils.waitFor(() -> keyDeletingService.getThreadCount() == 0, 1000,
- 100000);
- when(keyManager.getPendingDeletionKeys(anyInt())).thenAnswer(i -> {
- // wait for SDS to reach the KDS wait block before processing any key.
- GenericTestUtils.waitFor(keyDeletionWaitStarted::get, 1000, 100000);
- keyDeletionStarted.set(true);
- return i.callRealMethod();
- });
- return keyDeletingService;
- }
-
- @SuppressWarnings("checkstyle:parameternumber")
- private SnapshotDeletingService getMockedSnapshotDeletingService(KeyDeletingService keyDeletingService,
- DirectoryDeletingService directoryDeletingService,
- AtomicBoolean snapshotDeletionStarted,
- AtomicBoolean keyDeletionWaitStarted,
- AtomicBoolean dirDeletionWaitStarted,
- AtomicBoolean keyDeletionStarted,
- AtomicBoolean dirDeletionStarted,
- OzoneBucket testBucket)
- throws InterruptedException, TimeoutException, IOException {
- OzoneManager ozoneManager = Mockito.spy(om);
- om.getKeyManager().getSnapshotDeletingService().shutdown();
- GenericTestUtils.waitFor(() -> om.getKeyManager().getSnapshotDeletingService().getThreadCount() == 0, 1000,
- 100000);
- KeyManager keyManager = Mockito.spy(om.getKeyManager());
- OmMetadataManagerImpl omMetadataManager = Mockito.spy((OmMetadataManagerImpl)om.getMetadataManager());
- SnapshotChainManager unMockedSnapshotChainManager =
- ((OmMetadataManagerImpl)om.getMetadataManager()).getSnapshotChainManager();
- SnapshotChainManager snapshotChainManager = Mockito.spy(unMockedSnapshotChainManager);
- OmSnapshotManager omSnapshotManager = Mockito.spy(om.getOmSnapshotManager());
- when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager);
- when(ozoneManager.getKeyManager()).thenReturn(keyManager);
- when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager);
- when(omMetadataManager.getSnapshotChainManager()).thenReturn(snapshotChainManager);
- when(keyManager.getDeletingService()).thenReturn(keyDeletingService);
- when(keyManager.getDirDeletingService()).thenReturn(directoryDeletingService);
- SnapshotDeletingService snapshotDeletingService = Mockito.spy(new SnapshotDeletingService(10000,
- 100000, ozoneManager));
- snapshotDeletingService.shutdown();
- GenericTestUtils.waitFor(() -> snapshotDeletingService.getThreadCount() == 0, 1000,
- 100000);
- when(snapshotChainManager.iterator(anyBoolean())).thenAnswer(i -> {
- Iterator itr = (Iterator) i.callRealMethod();
- return Lists.newArrayList(itr).stream().filter(uuid -> {
- try {
- SnapshotInfo snapshotInfo = SnapshotUtils.getSnapshotInfo(om, snapshotChainManager, uuid);
- return snapshotInfo.getBucketName().equals(testBucket.getName()) &&
- snapshotInfo.getVolumeName().equals(testBucket.getVolumeName());
- } catch (IOException e) {
- throw new RuntimeException(e);
- }
- }).iterator();
- });
- when(snapshotChainManager.getLatestGlobalSnapshotId())
- .thenAnswer(i -> unMockedSnapshotChainManager.getLatestGlobalSnapshotId());
- when(snapshotChainManager.getOldestGlobalSnapshotId())
- .thenAnswer(i -> unMockedSnapshotChainManager.getOldestGlobalSnapshotId());
- doAnswer(i -> {
- // KDS wait block reached in SDS.
- GenericTestUtils.waitFor(() -> {
- return keyDeletingService.isRunningOnAOS();
- }, 1000, 100000);
- keyDeletionWaitStarted.set(true);
- return i.callRealMethod();
- }).when(snapshotDeletingService).waitForKeyDeletingService();
- doAnswer(i -> {
- // DDS wait block reached in SDS.
- GenericTestUtils.waitFor(directoryDeletingService::isRunningOnAOS, 1000, 100000);
- dirDeletionWaitStarted.set(true);
- return i.callRealMethod();
- }).when(snapshotDeletingService).waitForDirDeletingService();
- doAnswer(i -> {
- // Assert KDS & DDS is not running when SDS starts moving entries & assert all wait block, KDS processing
- // AOS block & DDS AOS block have been executed.
- Assertions.assertTrue(keyDeletionWaitStarted.get());
- Assertions.assertTrue(dirDeletionWaitStarted.get());
- Assertions.assertTrue(keyDeletionStarted.get());
- Assertions.assertTrue(dirDeletionStarted.get());
- Assertions.assertFalse(keyDeletingService.isRunningOnAOS());
- Assertions.assertFalse(directoryDeletingService.isRunningOnAOS());
- snapshotDeletionStarted.set(true);
- return i.callRealMethod();
- }).when(omSnapshotManager).getSnapshot(anyString(), anyString(), anyString());
- return snapshotDeletingService;
- }
-
- @Test
- @Order(4)
- public void testParallelExcecutionOfKeyDeletionAndSnapshotDeletion() throws Exception {
- AtomicBoolean keyDeletionWaitStarted = new AtomicBoolean(false);
- AtomicBoolean dirDeletionWaitStarted = new AtomicBoolean(false);
- AtomicBoolean keyDeletionStarted = new AtomicBoolean(false);
- AtomicBoolean dirDeletionStarted = new AtomicBoolean(false);
- AtomicBoolean snapshotDeletionStarted = new AtomicBoolean(false);
- Random random = new Random();
- String bucketName = "bucket" + random.nextInt();
- BucketArgs bucketArgs = new BucketArgs.Builder()
- .setBucketLayout(BucketLayout.FILE_SYSTEM_OPTIMIZED)
- .build();
- OzoneBucket testBucket = TestDataUtil.createBucket(
- client, VOLUME_NAME, bucketArgs, bucketName);
- // mock keyDeletingService
- KeyDeletingService keyDeletingService = getMockedKeyDeletingService(keyDeletionWaitStarted, keyDeletionStarted);
-
- // mock dirDeletingService
- DirectoryDeletingService directoryDeletingService = getMockedDirectoryDeletingService(dirDeletionWaitStarted,
- dirDeletionStarted);
-
- // mock snapshotDeletingService.
- SnapshotDeletingService snapshotDeletingService = getMockedSnapshotDeletingService(keyDeletingService,
- directoryDeletingService, snapshotDeletionStarted, keyDeletionWaitStarted, dirDeletionWaitStarted,
- keyDeletionStarted, dirDeletionStarted, testBucket);
- createSnapshotFSODataForBucket(testBucket);
- List> renamesKeyEntries;
- List>> deletedKeyEntries;
- List> deletedDirEntries;
- try (ReferenceCounted snapshot = om.getOmSnapshotManager().getSnapshot(testBucket.getVolumeName(),
- testBucket.getName(), testBucket.getName() + "snap2")) {
- renamesKeyEntries = snapshot.get().getKeyManager().getRenamesKeyEntries(testBucket.getVolumeName(),
- testBucket.getName(), "", 1000);
- deletedKeyEntries = snapshot.get().getKeyManager().getDeletedKeyEntries(testBucket.getVolumeName(),
- testBucket.getName(), "", 1000);
- deletedDirEntries = snapshot.get().getKeyManager().getDeletedDirEntries(testBucket.getVolumeName(),
- testBucket.getName(), 1000);
- }
- Thread keyDeletingThread = new Thread(() -> {
- try {
- keyDeletingService.runPeriodicalTaskNow();
- } catch (Exception e) {
- throw new RuntimeException(e);
- }
- });
- Thread directoryDeletingThread = new Thread(() -> {
- try {
- directoryDeletingService.runPeriodicalTaskNow();
- } catch (Exception e) {
- throw new RuntimeException(e);
- }
- });
- ExecutorService snapshotDeletingThread = Executors.newFixedThreadPool(1);
- Runnable snapshotDeletionRunnable = () -> {
- try {
- snapshotDeletingService.runPeriodicalTaskNow();
- } catch (Exception e) {
- throw new RuntimeException(e);
- }
- };
- keyDeletingThread.start();
- directoryDeletingThread.start();
- Future> future = snapshotDeletingThread.submit(snapshotDeletionRunnable);
- GenericTestUtils.waitFor(snapshotDeletionStarted::get, 1000, 30000);
- future.get();
- try (ReferenceCounted snapshot = om.getOmSnapshotManager().getSnapshot(testBucket.getVolumeName(),
- testBucket.getName(), testBucket.getName() + "snap2")) {
- Assertions.assertEquals(Collections.emptyList(),
- snapshot.get().getKeyManager().getRenamesKeyEntries(testBucket.getVolumeName(),
- testBucket.getName(), "", 1000));
- Assertions.assertEquals(Collections.emptyList(),
- snapshot.get().getKeyManager().getDeletedKeyEntries(testBucket.getVolumeName(),
- testBucket.getName(), "", 1000));
- Assertions.assertEquals(Collections.emptyList(),
- snapshot.get().getKeyManager().getDeletedDirEntries(testBucket.getVolumeName(),
- testBucket.getName(), 1000));
- }
- List> aosRenamesKeyEntries =
- om.getKeyManager().getRenamesKeyEntries(testBucket.getVolumeName(),
- testBucket.getName(), "", 1000);
- List>> aosDeletedKeyEntries =
- om.getKeyManager().getDeletedKeyEntries(testBucket.getVolumeName(),
- testBucket.getName(), "", 1000);
- List> aosDeletedDirEntries =
- om.getKeyManager().getDeletedDirEntries(testBucket.getVolumeName(),
- testBucket.getName(), 1000);
- renamesKeyEntries.forEach(entry -> Assertions.assertTrue(aosRenamesKeyEntries.contains(entry)));
- deletedKeyEntries.forEach(entry -> Assertions.assertTrue(aosDeletedKeyEntries.contains(entry)));
- deletedDirEntries.forEach(entry -> Assertions.assertTrue(aosDeletedDirEntries.contains(entry)));
- Mockito.reset(snapshotDeletingService);
- SnapshotInfo snap2 = SnapshotUtils.getSnapshotInfo(om, testBucket.getVolumeName(),
- testBucket.getName(), testBucket.getName() + "snap2");
- Assertions.assertEquals(snap2.getSnapshotStatus(), SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED);
- future = snapshotDeletingThread.submit(snapshotDeletionRunnable);
- future.get();
- Assertions.assertThrows(IOException.class, () -> SnapshotUtils.getSnapshotInfo(om, testBucket.getVolumeName(),
- testBucket.getName(), testBucket.getName() + "snap2"));
- cluster.restartOzoneManager();
- }
-
/*
Flow
----
@@ -718,7 +472,7 @@ public void testParallelExcecutionOfKeyDeletionAndSnapshotDeletion() throws Exce
create snapshot3
delete snapshot2
*/
- private synchronized void createSnapshotDataForBucket(OzoneBucket bucket) throws Exception {
+ private void createSnapshotDataForBucket1() throws Exception {
Table snapshotInfoTable =
om.getMetadataManager().getSnapshotInfoTable();
Table deletedTable =
@@ -728,147 +482,70 @@ private synchronized void createSnapshotDataForBucket(OzoneBucket bucket) throws
OmMetadataManagerImpl metadataManager = (OmMetadataManagerImpl)
om.getMetadataManager();
- TestDataUtil.createKey(bucket, bucket.getName() + "key0", ReplicationFactor.THREE,
+ TestDataUtil.createKey(bucket1, "bucket1key0", ReplicationFactor.THREE,
ReplicationType.RATIS, CONTENT);
- TestDataUtil.createKey(bucket, bucket.getName() + "key1", ReplicationFactor.THREE,
+ TestDataUtil.createKey(bucket1, "bucket1key1", ReplicationFactor.THREE,
ReplicationType.RATIS, CONTENT);
assertTableRowCount(keyTable, 2);
// Create Snapshot 1.
- client.getProxy().createSnapshot(bucket.getVolumeName(), bucket.getName(),
- bucket.getName() + "snap1");
+ client.getProxy().createSnapshot(VOLUME_NAME, BUCKET_NAME_ONE,
+ "bucket1snap1");
assertTableRowCount(snapshotInfoTable, 1);
// Overwrite bucket1key0, This is a newer version of the key which should
// reclaimed as this is a different version of the key.
- TestDataUtil.createKey(bucket, bucket.getName() + "key0", ReplicationFactor.THREE,
+ TestDataUtil.createKey(bucket1, "bucket1key0", ReplicationFactor.THREE,
ReplicationType.RATIS, CONTENT);
- TestDataUtil.createKey(bucket, bucket.getName() + "key2", ReplicationFactor.THREE,
+ TestDataUtil.createKey(bucket1, "bucket1key2", ReplicationFactor.THREE,
ReplicationType.RATIS, CONTENT);
// Key 1 cannot be reclaimed as it is still referenced by Snapshot 1.
- client.getProxy().deleteKey(bucket.getVolumeName(), bucket.getName(),
- bucket.getName() + "key1", false);
+ client.getProxy().deleteKey(VOLUME_NAME, BUCKET_NAME_ONE,
+ "bucket1key1", false);
// Key 2 is deleted here, which will be reclaimed here as
// it is not being referenced by previous snapshot.
- client.getProxy().deleteKey(bucket.getVolumeName(), bucket.getName(),
- bucket.getName() + "key2", false);
- client.getProxy().deleteKey(bucket.getVolumeName(), bucket.getName(),
- bucket.getName() + "key0", false);
+ client.getProxy().deleteKey(VOLUME_NAME, BUCKET_NAME_ONE,
+ "bucket1key2", false);
+ client.getProxy().deleteKey(VOLUME_NAME, BUCKET_NAME_ONE,
+ "bucket1key0", false);
assertTableRowCount(keyTable, 0);
// one copy of bucket1key0 should also be reclaimed as it not same
// but original deleted key created during overwrite should not be deleted
assertTableRowCount(deletedTable, 2);
// Create Snapshot 2.
- client.getProxy().createSnapshot(bucket.getVolumeName(), bucket.getName(),
- bucket.getName() + "snap2");
+ client.getProxy().createSnapshot(VOLUME_NAME, BUCKET_NAME_ONE,
+ "bucket1snap2");
assertTableRowCount(snapshotInfoTable, 2);
// Key 2 is removed from the active Db's
// deletedTable when Snapshot 2 is taken.
assertTableRowCount(deletedTable, 0);
- TestDataUtil.createKey(bucket, bucket.getName() + "key3", ReplicationFactor.THREE,
+ TestDataUtil.createKey(bucket1, "bucket1key3", ReplicationFactor.THREE,
ReplicationType.RATIS, CONTENT);
- TestDataUtil.createKey(bucket, bucket.getName() + "key4", ReplicationFactor.THREE,
+ TestDataUtil.createKey(bucket1, "bucket1key4", ReplicationFactor.THREE,
ReplicationType.RATIS, CONTENT);
- client.getProxy().deleteKey(bucket.getVolumeName(), bucket.getName(),
- bucket.getName() + "key4", false);
+ client.getProxy().deleteKey(VOLUME_NAME, BUCKET_NAME_ONE,
+ "bucket1key4", false);
assertTableRowCount(keyTable, 1);
assertTableRowCount(deletedTable, 0);
// Create Snapshot 3.
- client.getProxy().createSnapshot(bucket.getVolumeName(), bucket.getName(),
- bucket.getName() + "snap3");
+ client.getProxy().createSnapshot(VOLUME_NAME, BUCKET_NAME_ONE,
+ "bucket1snap3");
assertTableRowCount(snapshotInfoTable, 3);
SnapshotInfo snapshotInfo = metadataManager.getSnapshotInfoTable()
- .get(String.format("/%s/%s/%ssnap2", bucket.getVolumeName(), bucket.getName(), bucket.getName()));
+ .get("/vol1/bucket1/bucket1snap2");
// Delete Snapshot 2.
- client.getProxy().deleteSnapshot(bucket.getVolumeName(), bucket.getName(),
- bucket.getName() + "snap2");
+ client.getProxy().deleteSnapshot(VOLUME_NAME, BUCKET_NAME_ONE,
+ "bucket1snap2");
assertTableRowCount(snapshotInfoTable, 2);
- verifySnapshotChain(snapshotInfo, String.format("/%s/%s/%ssnap3", bucket.getVolumeName(), bucket.getName(),
- bucket.getName()));
- }
-
-
- /*
- Flow
- ----
- create dir0/key0
- create dir1/key1
- overwrite dir0/key0
- create dir2/key2
- create snap1
- rename dir1/key1 -> dir1/key10
- delete dir1/key10
- delete dir2
- create snap2
- delete snap2
- */
- private synchronized void createSnapshotFSODataForBucket(OzoneBucket bucket) throws Exception {
- Table snapshotInfoTable =
- om.getMetadataManager().getSnapshotInfoTable();
- Table deletedTable =
- om.getMetadataManager().getDeletedTable();
- Table deletedDirTable =
- om.getMetadataManager().getDeletedDirTable();
- Table keyTable =
- om.getMetadataManager().getKeyTable(BucketLayout.FILE_SYSTEM_OPTIMIZED);
- Table dirTable =
- om.getMetadataManager().getDirectoryTable();
- Table renameTable = om.getMetadataManager().getSnapshotRenamedTable();
- OmMetadataManagerImpl metadataManager = (OmMetadataManagerImpl)
- om.getMetadataManager();
- Map