pieces = Lists.newArrayList();
- for (String ele : elements) {
- if (ele.equals(""))
- continue;
- pieces.add(ele);
- }
- assert pieces.size() >= 4 : "Too few elements in path " + remoteFilePath;
- baseDir = pieces.get(0);
- region = pieces.get(1);
- clusterName = pieces.get(2);
- token = pieces.get(3);
- }
-
- @Override
- public String remotePrefix(Date start, Date end, String location) {
- StringBuffer buff = new StringBuffer(clusterPrefix(location));
- token = factory.getInstance().getToken();
- buff.append(token).append(S3BackupPath.PATH_SEP);
- // match the common characters to prefix.
- buff.append(match(start, end));
- return buff.toString();
- }
-
- @Override
- public String clusterPrefix(String location) {
- StringBuffer buff = new StringBuffer();
- String[] elements = location.split(String.valueOf(S3BackupPath.PATH_SEP));
- if (elements.length <= 1) {
- baseDir = config.getBackupLocation();
- region = config.getDC();
- clusterName = config.getAppName();
- } else {
- assert elements.length >= 4 : "Too few elements in path " + location;
- baseDir = elements[1];
- region = elements[2];
- clusterName = elements[3];
- }
- buff.append(baseDir).append(S3BackupPath.PATH_SEP);
- buff.append(region).append(S3BackupPath.PATH_SEP);
- buff.append(clusterName).append(S3BackupPath.PATH_SEP);
-
- return buff.toString();
- }
-
-}
diff --git a/priam/src/main/java/com/netflix/priam/aws/S3CrossAccountFileSystem.java b/priam/src/main/java/com/netflix/priam/aws/S3CrossAccountFileSystem.java
index af058ae95..0676b859a 100755
--- a/priam/src/main/java/com/netflix/priam/aws/S3CrossAccountFileSystem.java
+++ b/priam/src/main/java/com/netflix/priam/aws/S3CrossAccountFileSystem.java
@@ -1,16 +1,14 @@
/**
* Copyright 2017 Netflix, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
+ *
+ *
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ *
http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *
Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.aws;
@@ -20,38 +18,43 @@
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.google.inject.name.Named;
-import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.aws.auth.IS3Credential;
import com.netflix.priam.backup.IBackupFileSystem;
+import com.netflix.priam.config.IConfiguration;
+import com.netflix.priam.identity.config.InstanceInfo;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/*
* A version of S3FileSystem which allows it api access across different AWS accounts.
- *
+ *
* *Note: ideally, this object should extend S3FileSystem but could not be done because:
* - S3FileSystem is a singleton and it uses DI. To follow the DI pattern, the best way to get this singleton is via injection.
- * - S3FileSystem registers a MBean to JMX which must be only once per JVM. If not, you get
+ * - S3FileSystem registers a MBean to JMX which must be only once per JVM. If not, you get
* java.lang.RuntimeException: javax.management.InstanceAlreadyExistsException: com.priam.aws.S3FileSystemMBean:name=S3FileSystemMBean
- * -
+ * -
*/
@Singleton
public class S3CrossAccountFileSystem {
private static final Logger logger = LoggerFactory.getLogger(S3CrossAccountFileSystem.class);
private AmazonS3 s3Client;
- private S3FileSystem s3fs;
- private IConfiguration config;
- private IS3Credential s3Credential;
+ private final S3FileSystem s3fs;
+ private final IConfiguration config;
+ private final IS3Credential s3Credential;
+ private final InstanceInfo instanceInfo;
@Inject
- public S3CrossAccountFileSystem(@Named("backup") IBackupFileSystem fs, @Named("awss3roleassumption") IS3Credential s3Credential, IConfiguration config) {
-
+ public S3CrossAccountFileSystem(
+ @Named("backup") IBackupFileSystem fs,
+ @Named("awss3roleassumption") IS3Credential s3Credential,
+ IConfiguration config,
+ InstanceInfo instanceInfo) {
this.s3fs = (S3FileSystem) fs;
this.config = config;
this.s3Credential = s3Credential;
-
+ this.instanceInfo = instanceInfo;
}
public IBackupFileSystem getBackupFileSystem() {
@@ -62,29 +65,30 @@ public AmazonS3 getCrossAcctS3Client() {
if (this.s3Client == null) {
synchronized (this) {
-
if (this.s3Client == null) {
try {
- this.s3Client = AmazonS3Client.builder().withCredentials(s3Credential.getAwsCredentialProvider()).withRegion(config.getDC()).build();
+ this.s3Client =
+ AmazonS3Client.builder()
+ .withCredentials(s3Credential.getAwsCredentialProvider())
+ .withRegion(instanceInfo.getRegion())
+ .build();
} catch (Exception e) {
- throw new IllegalStateException("Exception in getting handle to s3 client. Msg: " + e.getLocalizedMessage(), e);
-
+ throw new IllegalStateException(
+ "Exception in getting handle to s3 client. Msg: "
+ + e.getLocalizedMessage(),
+ e);
}
- //Lets leverage the IBackupFileSystem behaviors except we want it to use our amazon S3 client which has cross AWS account api capability.
+ // Lets leverage the IBackupFileSystem behaviors except we want it to use our
+ // amazon S3 client which has cross AWS account api capability.
this.s3fs.setS3Client(s3Client);
-
}
-
}
-
}
-
return this.s3Client;
}
-
-}
\ No newline at end of file
+}
diff --git a/priam/src/main/java/com/netflix/priam/aws/S3EncryptedFileSystem.java b/priam/src/main/java/com/netflix/priam/aws/S3EncryptedFileSystem.java
index f36a2d3a1..eeeaa2ca4 100755
--- a/priam/src/main/java/com/netflix/priam/aws/S3EncryptedFileSystem.java
+++ b/priam/src/main/java/com/netflix/priam/aws/S3EncryptedFileSystem.java
@@ -1,16 +1,14 @@
/**
* Copyright 2017 Netflix, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
+ *
+ *
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ *
http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *
Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.aws;
@@ -25,187 +23,174 @@
import com.google.inject.Provider;
import com.google.inject.Singleton;
import com.google.inject.name.Named;
-import com.netflix.priam.config.IConfiguration;
-import com.netflix.priam.cred.ICredential;
import com.netflix.priam.backup.AbstractBackupPath;
import com.netflix.priam.backup.BackupRestoreException;
+import com.netflix.priam.backup.DynamicRateLimiter;
import com.netflix.priam.backup.RangeReadInputStream;
+import com.netflix.priam.compress.ChunkedStream;
import com.netflix.priam.compress.ICompression;
+import com.netflix.priam.config.IConfiguration;
+import com.netflix.priam.cred.ICredential;
import com.netflix.priam.cryptography.IFileCryptography;
+import com.netflix.priam.identity.config.InstanceInfo;
import com.netflix.priam.merics.BackupMetrics;
import com.netflix.priam.notification.BackupNotificationMgr;
-import org.apache.commons.io.IOUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.management.MBeanServer;
-import javax.management.ObjectName;
import java.io.*;
-import java.lang.management.ManagementFactory;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.time.Instant;
import java.util.Iterator;
import java.util.List;
-import java.util.concurrent.atomic.AtomicInteger;
+import org.apache.commons.io.IOUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
-/**
- * Implementation of IBackupFileSystem for S3. The upload/download will work with ciphertext.
- */
+/** Implementation of IBackupFileSystem for S3. The upload/download will work with ciphertext. */
@Singleton
-public class S3EncryptedFileSystem extends S3FileSystemBase implements S3EncryptedFileSystemMBean {
+public class S3EncryptedFileSystem extends S3FileSystemBase {
private static final Logger logger = LoggerFactory.getLogger(S3EncryptedFileSystem.class);
- private AtomicInteger uploadCount = new AtomicInteger();
- private IFileCryptography encryptor;
+ private final IFileCryptography encryptor;
+ private final DynamicRateLimiter dynamicRateLimiter;
@Inject
- public S3EncryptedFileSystem(Provider pathProvider, ICompression compress, final IConfiguration config, ICredential cred
- , @Named("filecryptoalgorithm") IFileCryptography fileCryptography
- , BackupMetrics backupMetrics,
- BackupNotificationMgr backupNotificationMgr
- ) {
+ public S3EncryptedFileSystem(
+ Provider pathProvider,
+ ICompression compress,
+ final IConfiguration config,
+ ICredential cred,
+ @Named("filecryptoalgorithm") IFileCryptography fileCryptography,
+ BackupMetrics backupMetrics,
+ BackupNotificationMgr backupNotificationMgr,
+ InstanceInfo instanceInfo,
+ DynamicRateLimiter dynamicRateLimiter) {
super(pathProvider, compress, config, backupMetrics, backupNotificationMgr);
this.encryptor = fileCryptography;
-
- MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
- String mbeanName = ENCRYPTED_FILE_SYSTEM_MBEAN_NAME;
- try {
- mbs.registerMBean(this, new ObjectName(mbeanName));
- } catch (Exception e) {
- throw new RuntimeException("Unable to regiser JMX bean: " + mbeanName + " to JMX server. Msg: " + e.getLocalizedMessage(), e);
- }
-
- super.s3Client = AmazonS3Client.builder().withCredentials(cred.getAwsCredentialProvider()).withRegion(config.getDC()).build();
+ this.dynamicRateLimiter = dynamicRateLimiter;
+ super.s3Client =
+ AmazonS3Client.builder()
+ .withCredentials(cred.getAwsCredentialProvider())
+ .withRegion(instanceInfo.getRegion())
+ .build();
}
@Override
- /*
- Note: provides same information as getBytesUploaded() but it's meant for S3FileSystemMBean object types.
- */
- public long bytesUploaded() {
- return bytesUploaded.get();
- }
-
-
- @Override
- public long bytesDownloaded() {
- return bytesDownloaded.get();
- }
-
- @Override
- public void downloadFile(AbstractBackupPath path, OutputStream os) throws BackupRestoreException {
- try {
-
- RangeReadInputStream rris = new RangeReadInputStream(s3Client, getPrefix(config), path);
-
- /*
+ protected void downloadFileImpl(AbstractBackupPath path, String suffix)
+ throws BackupRestoreException {
+ String remotePath = path.getRemotePath();
+ Path localPath = Paths.get(path.newRestoreFile().getAbsolutePath() + suffix);
+ try (OutputStream os = new FileOutputStream(localPath.toFile());
+ RangeReadInputStream rris =
+ new RangeReadInputStream(
+ s3Client, getShard(), super.getFileSize(remotePath), remotePath)) {
+ /*
* To handle use cases where decompression should be done outside of the download. For example, the file have been compressed and then encrypted.
- * Hence, decompressing it here would compromise the decryption.
- */
- try {
- IOUtils.copyLarge(rris, os);
-
- } catch (Exception ex) {
-
- throw new BackupRestoreException("Exception encountered when copying bytes from input to output during download", ex);
-
- } finally {
- IOUtils.closeQuietly(rris);
- IOUtils.closeQuietly(os);
- }
-
+ * Hence, decompressing it here would compromise the decryption.
+ */
+ IOUtils.copyLarge(rris, os);
} catch (Exception e) {
- throw new BackupRestoreException("Exception encountered downloading " + path.getRemotePath() + " from S3 bucket " + getPrefix(config)
- + ", Msg: " + e.getMessage(), e);
+ throw new BackupRestoreException(
+ "Exception encountered downloading "
+ + remotePath
+ + " from S3 bucket "
+ + getShard()
+ + ", Msg: "
+ + e.getMessage(),
+ e);
}
}
-
@Override
- public void uploadFile(AbstractBackupPath path, InputStream in, long chunkSize) throws BackupRestoreException {
-
- InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(config.getBackupPrefix(), path.getRemotePath()); //initialize chunking request to aws
- InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest); //Fetch the aws generated upload id for this chunking request
- DataPart part = new DataPart(config.getBackupPrefix(), path.getRemotePath(), initResponse.getUploadId());
- List partETags = Lists.newArrayList(); //Metadata on number of parts to be uploaded
-
-
- //== Read chunks from src, compress it, and write to temp file
- String compressedFileName = path.newRestoreFile() + ".compressed";
- logger.debug("Compressing {} with chunk size {}", compressedFileName, chunkSize);
- File compressedDstFile = null;
- FileOutputStream compressedDstFileOs = null;
- BufferedOutputStream compressedBos = null;
- try {
-
- compressedDstFile = new File(compressedFileName);
- compressedDstFileOs = new FileOutputStream(compressedDstFile);
- compressedBos = new BufferedOutputStream(compressedDstFileOs);
-
- } catch (FileNotFoundException e) {
- throw new BackupRestoreException("Not able to find temporary compressed file: " + compressedFileName);
- }
-
- try {
-
- Iterator compressedChunks = this.compress.compress(in, chunkSize);
+ protected long uploadFileImpl(AbstractBackupPath path, Instant target)
+ throws BackupRestoreException {
+ Path localPath = Paths.get(path.getBackupFile().getAbsolutePath());
+ String remotePath = path.getRemotePath();
+
+ long chunkSize = getChunkSize(localPath);
+ // initialize chunking request to aws
+ InitiateMultipartUploadRequest initRequest =
+ new InitiateMultipartUploadRequest(config.getBackupPrefix(), remotePath);
+ // Fetch the aws generated upload id for this chunking request
+ InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);
+ DataPart part =
+ new DataPart(config.getBackupPrefix(), remotePath, initResponse.getUploadId());
+ // Metadata on number of parts to be uploaded
+ List partETags = Lists.newArrayList();
+
+ // Read chunks from src, compress it, and write to temp file
+ File compressedDstFile = new File(localPath.toString() + ".compressed");
+ if (logger.isDebugEnabled())
+ logger.debug(
+ "Compressing {} with chunk size {}",
+ compressedDstFile.getAbsolutePath(),
+ chunkSize);
+
+ try (InputStream in = new FileInputStream(localPath.toFile());
+ BufferedOutputStream compressedBos =
+ new BufferedOutputStream(new FileOutputStream(compressedDstFile))) {
+ Iterator compressedChunks =
+ new ChunkedStream(in, chunkSize, path.getCompression());
while (compressedChunks.hasNext()) {
byte[] compressedChunk = compressedChunks.next();
compressedBos.write(compressedChunk);
}
-
- } catch (IOException e) {
- String message = String.format("Exception in compressing the input data during upload to EncryptedStore Msg: " + e.getMessage());
+ } catch (Exception e) {
+ String message =
+ "Exception in compressing the input data during upload to EncryptedStore Msg: "
+ + e.getMessage();
logger.error(message, e);
throw new BackupRestoreException(message);
- } finally {
- IOUtils.closeQuietly(in);
- IOUtils.closeQuietly(compressedBos);
}
- //== Read compressed data, encrypt each chunk, upload it to aws
- FileInputStream compressedFileIs = null;
- BufferedInputStream compressedBis = null;
- try {
+ // == Read compressed data, encrypt each chunk, upload it to aws
+ try (BufferedInputStream compressedBis =
+ new BufferedInputStream(new FileInputStream(compressedDstFile))) {
+ Iterator chunks = this.encryptor.encryptStream(compressedBis, remotePath);
- compressedFileIs = new FileInputStream(new File(compressedFileName));
- compressedBis = new BufferedInputStream(compressedFileIs);
- Iterator chunks = this.encryptor.encryptStream(compressedBis, path.getRemotePath());
+ // identifies this part position in the object we are uploading
+ int partNum = 0;
+ long encryptedFileSize = 0;
- int partNum = 0; //identifies this part position in the object we are uploading
while (chunks.hasNext()) {
byte[] chunk = chunks.next();
- rateLimiter.acquire(chunk.length); //throttle upload to endpoint
-
- DataPart dp = new DataPart(++partNum, chunk, config.getBackupPrefix(), path.getRemotePath(), initResponse.getUploadId());
+ // throttle upload to endpoint
+ rateLimiter.acquire(chunk.length);
+ dynamicRateLimiter.acquire(path, target, chunk.length);
+
+ DataPart dp =
+ new DataPart(
+ ++partNum,
+ chunk,
+ config.getBackupPrefix(),
+ remotePath,
+ initResponse.getUploadId());
S3PartUploader partUploader = new S3PartUploader(s3Client, dp, partETags);
+ encryptedFileSize += chunk.length;
executor.submit(partUploader);
-
- bytesUploaded.addAndGet(chunk.length);
}
executor.sleepTillEmpty();
if (partNum != partETags.size()) {
- throw new BackupRestoreException("Number of parts(" + partNum + ") does not match the expected number of uploaded parts(" + partETags.size() + ")");
+ throw new BackupRestoreException(
+ "Number of parts("
+ + partNum
+ + ") does not match the expected number of uploaded parts("
+ + partETags.size()
+ + ")");
}
- CompleteMultipartUploadResult resultS3MultiPartUploadComplete = new S3PartUploader(s3Client, part, partETags).completeUpload(); //complete the aws chunking upload by providing to aws the ETag that uniquely identifies the combined object data
- checkSuccessfulUpload(resultS3MultiPartUploadComplete, path);
-
+ // complete the aws chunking upload by providing to aws the ETag that uniquely
+ // identifies the combined object datav
+ CompleteMultipartUploadResult resultS3MultiPartUploadComplete =
+ new S3PartUploader(s3Client, part, partETags).completeUpload();
+ checkSuccessfulUpload(resultS3MultiPartUploadComplete, localPath);
+ return encryptedFileSize;
} catch (Exception e) {
- throw encounterError(path, new S3PartUploader(s3Client, part, partETags), e);
+ new S3PartUploader(s3Client, part, partETags).abortUpload();
+ throw new BackupRestoreException("Error uploading file: " + localPath, e);
} finally {
- IOUtils.closeQuietly(compressedBis);
- if (compressedDstFile.exists())
- compressedDstFile.delete();
+ if (compressedDstFile.exists()) compressedDstFile.delete();
}
-
}
-
-
- @Override
- public int getActivecount() {
- return executor.getActiveCount();
- }
-
-
-}
\ No newline at end of file
+}
diff --git a/priam/src/main/java/com/netflix/priam/aws/S3EncryptedFileSystemMBean.java b/priam/src/main/java/com/netflix/priam/aws/S3EncryptedFileSystemMBean.java
deleted file mode 100755
index a60a61103..000000000
--- a/priam/src/main/java/com/netflix/priam/aws/S3EncryptedFileSystemMBean.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * Copyright 2017 Netflix, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.netflix.priam.aws;
-
-public interface S3EncryptedFileSystemMBean {
-
- String ENCRYPTED_FILE_SYSTEM_MBEAN_NAME = "com.priam.aws.S3EncryptedFileSystemMBean:name=S3EncryptedFileSystemMBean";
-
- long downloadCount();
-
- long uploadCount();
-
- int getActivecount();
-
- long bytesUploaded();
-
- long bytesDownloaded();
-}
\ No newline at end of file
diff --git a/priam/src/main/java/com/netflix/priam/aws/S3FileSystem.java b/priam/src/main/java/com/netflix/priam/aws/S3FileSystem.java
index 42ff0cb7f..ce597c13e 100644
--- a/priam/src/main/java/com/netflix/priam/aws/S3FileSystem.java
+++ b/priam/src/main/java/com/netflix/priam/aws/S3FileSystem.java
@@ -1,189 +1,227 @@
-/*
- * Copyright 2013 Netflix, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package com.netflix.priam.aws;
-
-import com.amazonaws.services.s3.AmazonS3Client;
-import com.amazonaws.services.s3.S3ResponseMetadata;
-import com.amazonaws.services.s3.model.*;
-import com.google.inject.Inject;
-import com.google.inject.Provider;
-import com.google.inject.Singleton;
-import com.google.inject.name.Named;
-import com.netflix.priam.config.IConfiguration;
-import com.netflix.priam.aws.auth.IS3Credential;
-import com.netflix.priam.backup.AbstractBackupPath;
-import com.netflix.priam.backup.BackupRestoreException;
-import com.netflix.priam.backup.RangeReadInputStream;
-import com.netflix.priam.compress.ICompression;
-import com.netflix.priam.merics.BackupMetrics;
-import com.netflix.priam.notification.BackupNotificationMgr;
-import com.netflix.priam.utils.BoundedExponentialRetryCallable;
-import org.apache.commons.io.IOUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.management.MBeanServer;
-import javax.management.ObjectName;
-import java.io.*;
-import java.lang.management.ManagementFactory;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.List;
-import java.util.concurrent.atomic.AtomicInteger;
-
-/**
- * Implementation of IBackupFileSystem for S3
- */
-@Singleton
-public class S3FileSystem extends S3FileSystemBase implements S3FileSystemMBean {
- private static final Logger logger = LoggerFactory.getLogger(S3FileSystem.class);
-
- @Inject
- public S3FileSystem(@Named("awss3roleassumption") IS3Credential cred, Provider pathProvider,
- ICompression compress,
- final IConfiguration config,
- BackupMetrics backupMetrics,
- BackupNotificationMgr backupNotificationMgr) {
- super(pathProvider, compress, config, backupMetrics, backupNotificationMgr);
-
- MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
- try {
- mbs.registerMBean(this, new ObjectName(MBEAN_NAME));
- } catch (Exception e) {
- throw new RuntimeException(e);
- }
-
- s3Client = AmazonS3Client.builder().withCredentials(cred.getAwsCredentialProvider()).withRegion(config.getDC()).build();
- }
-
- @Override
- public void downloadFile(AbstractBackupPath path, OutputStream os) throws BackupRestoreException {
- try {
- RangeReadInputStream rris = new RangeReadInputStream(s3Client, getPrefix(this.config), path);
- final long bufSize = MAX_BUFFERED_IN_STREAM_SIZE > path.getSize() ? path.getSize() : MAX_BUFFERED_IN_STREAM_SIZE;
- compress.decompressAndClose(new BufferedInputStream(rris, (int) bufSize), os);
- } catch (Exception e) {
- throw new BackupRestoreException("Exception encountered downloading " + path.getRemotePath() + " from S3 bucket " + getPrefix(config)
- + ", Msg: " + e.getMessage(), e);
- }
- }
-
- private void uploadMultipart(AbstractBackupPath path, InputStream in, long chunkSize) throws BackupRestoreException {
- InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(config.getBackupPrefix(), path.getRemotePath());
- InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);
- DataPart part = new DataPart(config.getBackupPrefix(), path.getRemotePath(), initResponse.getUploadId());
- List partETags = Collections.synchronizedList(new ArrayList());
-
- try {
- Iterator chunks = compress.compress(in, chunkSize);
- // Upload parts.
- int partNum = 0;
- AtomicInteger partsUploaded = new AtomicInteger(0);
-
- while (chunks.hasNext()) {
- byte[] chunk = chunks.next();
- rateLimiter.acquire(chunk.length);
- DataPart dp = new DataPart(++partNum, chunk, config.getBackupPrefix(), path.getRemotePath(), initResponse.getUploadId());
- S3PartUploader partUploader = new S3PartUploader(s3Client, dp, partETags, partsUploaded);
- executor.submit(partUploader);
- bytesUploaded.addAndGet(chunk.length);
- }
- executor.sleepTillEmpty();
- logger.info("All chunks uploaded for file {}, num of expected parts:{}, num of actual uploaded parts: {}", path.getFileName(), partNum, partsUploaded.get());
-
- if (partNum != partETags.size())
- throw new BackupRestoreException("Number of parts(" + partNum + ") does not match the uploaded parts(" + partETags.size() + ")");
-
- CompleteMultipartUploadResult resultS3MultiPartUploadComplete = new S3PartUploader(s3Client, part, partETags).completeUpload();
- checkSuccessfulUpload(resultS3MultiPartUploadComplete, path);
-
- if (logger.isDebugEnabled()) {
- final S3ResponseMetadata responseMetadata = s3Client.getCachedResponseMetadata(initRequest);
- final String requestId = responseMetadata.getRequestId(); // "x-amz-request-id" header
- final String hostId = responseMetadata.getHostId(); // "x-amz-id-2" header
- logger.debug("S3 AWS x-amz-request-id[" + requestId + "], and x-amz-id-2[" + hostId + "]");
- }
-
- } catch (Exception e) {
- throw encounterError(path, new S3PartUploader(s3Client, part, partETags), e);
- } finally {
- IOUtils.closeQuietly(in);
- }
- }
-
- @Override
- public void uploadFile(AbstractBackupPath path, InputStream in, long chunkSize) throws BackupRestoreException {
-
- if (path.getSize() < chunkSize) {
- //Upload file without using multipart upload as it will be more efficient.
- if (logger.isDebugEnabled())
- logger.debug("Uploading file using put: {}", path.getRemotePath());
-
- try (ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream()) {
- Iterator chunkedStream = compress.compress(in, chunkSize);
- while (chunkedStream.hasNext()) {
- byteArrayOutputStream.write(chunkedStream.next());
- }
- byte[] chunk = byteArrayOutputStream.toByteArray();
- rateLimiter.acquire(chunk.length);
- ObjectMetadata objectMetadata = new ObjectMetadata();
- objectMetadata.setContentLength(chunk.length);
- PutObjectRequest putObjectRequest = new PutObjectRequest(config.getBackupPrefix(), path.getRemotePath(), new ByteArrayInputStream(chunk), objectMetadata);
- //Retry if failed.
- PutObjectResult upload = new BoundedExponentialRetryCallable() {
- @Override
- public PutObjectResult retriableCall() throws Exception {
- return s3Client.putObject(putObjectRequest);
- }
- }.retriableCall();
-
- bytesUploaded.addAndGet(chunk.length);
-
- if (logger.isDebugEnabled())
- logger.debug("Successfully uploaded file with putObject: {} and etag: {}", path.getRemotePath(), upload.getETag());
- } catch (Exception e) {
- throw encounterError(path, e);
- } finally {
- IOUtils.closeQuietly(in);
- }
- } else
- uploadMultipart(path, in, chunkSize);
- }
-
-
- @Override
- public int getActivecount() {
- return executor.getActiveCount();
- }
-
-
- @Override
- /*
- Note: provides same information as getBytesUploaded() but it's meant for S3FileSystemMBean object types.
- */
- public long bytesUploaded() {
- return super.bytesUploaded.get();
- }
-
-
- @Override
- public long bytesDownloaded() {
- return bytesDownloaded.get();
- }
-
-}
\ No newline at end of file
+/*
+ * Copyright 2013 Netflix, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.netflix.priam.aws;
+
+import com.amazonaws.services.s3.AmazonS3Client;
+import com.amazonaws.services.s3.S3ResponseMetadata;
+import com.amazonaws.services.s3.model.*;
+import com.google.common.base.Preconditions;
+import com.google.inject.Inject;
+import com.google.inject.Provider;
+import com.google.inject.Singleton;
+import com.google.inject.name.Named;
+import com.netflix.priam.aws.auth.IS3Credential;
+import com.netflix.priam.backup.AbstractBackupPath;
+import com.netflix.priam.backup.BackupRestoreException;
+import com.netflix.priam.backup.DynamicRateLimiter;
+import com.netflix.priam.backup.RangeReadInputStream;
+import com.netflix.priam.compress.ChunkedStream;
+import com.netflix.priam.compress.CompressionType;
+import com.netflix.priam.compress.ICompression;
+import com.netflix.priam.config.IConfiguration;
+import com.netflix.priam.identity.config.InstanceInfo;
+import com.netflix.priam.merics.BackupMetrics;
+import com.netflix.priam.notification.BackupNotificationMgr;
+import com.netflix.priam.utils.BoundedExponentialRetryCallable;
+import com.netflix.priam.utils.SystemUtils;
+import java.io.*;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.time.Instant;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicInteger;
+import org.apache.commons.io.IOUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/** Implementation of IBackupFileSystem for S3 */
+@Singleton
+public class S3FileSystem extends S3FileSystemBase {
+ private static final Logger logger = LoggerFactory.getLogger(S3FileSystem.class);
+ private static final long MAX_BUFFER_SIZE = 5L * 1024L * 1024L;
+ private final DynamicRateLimiter dynamicRateLimiter;
+
+ @Inject
+ public S3FileSystem(
+ @Named("awss3roleassumption") IS3Credential cred,
+ Provider pathProvider,
+ ICompression compress,
+ final IConfiguration config,
+ BackupMetrics backupMetrics,
+ BackupNotificationMgr backupNotificationMgr,
+ InstanceInfo instanceInfo,
+ DynamicRateLimiter dynamicRateLimiter) {
+ super(pathProvider, compress, config, backupMetrics, backupNotificationMgr);
+ s3Client =
+ AmazonS3Client.builder()
+ .withCredentials(cred.getAwsCredentialProvider())
+ .withRegion(instanceInfo.getRegion())
+ .build();
+ this.dynamicRateLimiter = dynamicRateLimiter;
+ }
+
+ @Override
+ protected void downloadFileImpl(AbstractBackupPath path, String suffix)
+ throws BackupRestoreException {
+ String remotePath = path.getRemotePath();
+ File localFile = new File(path.newRestoreFile().getAbsolutePath() + suffix);
+ long size = super.getFileSize(remotePath);
+ final int bufferSize = Math.toIntExact(Math.min(MAX_BUFFER_SIZE, size));
+ try (BufferedInputStream is =
+ new BufferedInputStream(
+ new RangeReadInputStream(s3Client, getShard(), size, remotePath),
+ bufferSize);
+ BufferedOutputStream os =
+ new BufferedOutputStream(new FileOutputStream(localFile))) {
+ if (path.getCompression() == CompressionType.NONE) {
+ IOUtils.copyLarge(is, os);
+ } else {
+ compress.decompressAndClose(is, os);
+ }
+ } catch (Exception e) {
+ String err =
+ String.format(
+ "Failed to GET %s Bucket: %s Msg: %s",
+ remotePath, getShard(), e.getMessage());
+ throw new BackupRestoreException(err);
+ }
+ }
+
+ private ObjectMetadata getObjectMetadata(File file) {
+ ObjectMetadata ret = new ObjectMetadata();
+ long lastModified = file.lastModified();
+
+ if (lastModified != 0) {
+ ret.addUserMetadata("local-modification-time", Long.toString(lastModified));
+ }
+
+ long fileSize = file.length();
+ if (fileSize != 0) {
+ ret.addUserMetadata("local-size", Long.toString(fileSize));
+ }
+ return ret;
+ }
+
+ private long uploadMultipart(AbstractBackupPath path, Instant target)
+ throws BackupRestoreException {
+ Path localPath = Paths.get(path.getBackupFile().getAbsolutePath());
+ String remotePath = path.getRemotePath();
+ long chunkSize = getChunkSize(localPath);
+ String prefix = config.getBackupPrefix();
+ if (logger.isDebugEnabled())
+ logger.debug("Uploading to {}/{} with chunk size {}", prefix, remotePath, chunkSize);
+ File localFile = localPath.toFile();
+ InitiateMultipartUploadRequest initRequest =
+ new InitiateMultipartUploadRequest(prefix, remotePath)
+ .withObjectMetadata(getObjectMetadata(localFile));
+ String uploadId = s3Client.initiateMultipartUpload(initRequest).getUploadId();
+ DataPart part = new DataPart(prefix, remotePath, uploadId);
+ List partETags = Collections.synchronizedList(new ArrayList<>());
+
+ try (InputStream in = new FileInputStream(localFile)) {
+ Iterator chunks = new ChunkedStream(in, chunkSize, path.getCompression());
+ int partNum = 0;
+ AtomicInteger partsPut = new AtomicInteger(0);
+ long compressedFileSize = 0;
+
+ while (chunks.hasNext()) {
+ byte[] chunk = chunks.next();
+ rateLimiter.acquire(chunk.length);
+ dynamicRateLimiter.acquire(path, target, chunk.length);
+ DataPart dp = new DataPart(++partNum, chunk, prefix, remotePath, uploadId);
+ S3PartUploader partUploader = new S3PartUploader(s3Client, dp, partETags, partsPut);
+ compressedFileSize += chunk.length;
+ // TODO: output Future instead, collect them here, wait for all below
+ executor.submit(partUploader);
+ }
+
+ executor.sleepTillEmpty();
+ logger.info("{} done. part count: {} expected: {}", localFile, partsPut.get(), partNum);
+ Preconditions.checkState(partNum == partETags.size(), "part count mismatch");
+ CompleteMultipartUploadResult resultS3MultiPartUploadComplete =
+ new S3PartUploader(s3Client, part, partETags).completeUpload();
+ checkSuccessfulUpload(resultS3MultiPartUploadComplete, localPath);
+
+ if (logger.isDebugEnabled()) {
+ final S3ResponseMetadata info = s3Client.getCachedResponseMetadata(initRequest);
+ logger.debug("Request Id: {}, Host Id: {}", info.getRequestId(), info.getHostId());
+ }
+
+ return compressedFileSize;
+ } catch (Exception e) {
+ new S3PartUploader(s3Client, part, partETags).abortUpload();
+ throw new BackupRestoreException("Error uploading file: " + localPath.toString(), e);
+ }
+ }
+
+ protected long uploadFileImpl(AbstractBackupPath path, Instant target)
+ throws BackupRestoreException {
+ File localFile = Paths.get(path.getBackupFile().getAbsolutePath()).toFile();
+ if (localFile.length() >= config.getBackupChunkSize()) return uploadMultipart(path, target);
+ byte[] chunk = getFileContents(path);
+ // C* snapshots may have empty files. That is probably unintentional.
+ if (chunk.length > 0) {
+ rateLimiter.acquire(chunk.length);
+ dynamicRateLimiter.acquire(path, target, chunk.length);
+ }
+ try {
+ new BoundedExponentialRetryCallable(1000, 10000, 5) {
+ @Override
+ public PutObjectResult retriableCall() {
+ return s3Client.putObject(generatePut(path, chunk));
+ }
+ }.call();
+ } catch (Exception e) {
+ throw new BackupRestoreException("Error uploading file: " + localFile.getName(), e);
+ }
+ return chunk.length;
+ }
+
+ private PutObjectRequest generatePut(AbstractBackupPath path, byte[] chunk) {
+ File localFile = Paths.get(path.getBackupFile().getAbsolutePath()).toFile();
+ ObjectMetadata metadata = getObjectMetadata(localFile);
+ metadata.setContentLength(chunk.length);
+ PutObjectRequest put =
+ new PutObjectRequest(
+ config.getBackupPrefix(),
+ path.getRemotePath(),
+ new ByteArrayInputStream(chunk),
+ metadata);
+ if (config.addMD5ToBackupUploads()) {
+ put.getMetadata().setContentMD5(SystemUtils.toBase64(SystemUtils.md5(chunk)));
+ }
+ return put;
+ }
+
+ private byte[] getFileContents(AbstractBackupPath path) throws BackupRestoreException {
+ File localFile = Paths.get(path.getBackupFile().getAbsolutePath()).toFile();
+ try (ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
+ InputStream in = new BufferedInputStream(new FileInputStream(localFile))) {
+ Iterator chunks =
+ new ChunkedStream(in, config.getBackupChunkSize(), path.getCompression());
+ while (chunks.hasNext()) {
+ byteArrayOutputStream.write(chunks.next());
+ }
+ return byteArrayOutputStream.toByteArray();
+ } catch (Exception e) {
+ throw new BackupRestoreException("Error reading file: " + localFile.getName(), e);
+ }
+ }
+}
diff --git a/priam/src/main/java/com/netflix/priam/aws/S3FileSystemBase.java b/priam/src/main/java/com/netflix/priam/aws/S3FileSystemBase.java
index 6d651ac53..5703424f2 100755
--- a/priam/src/main/java/com/netflix/priam/aws/S3FileSystemBase.java
+++ b/priam/src/main/java/com/netflix/priam/aws/S3FileSystemBase.java
@@ -1,377 +1,291 @@
-/**
- * Copyright 2017 Netflix, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.netflix.priam.aws;
-
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.s3.model.AmazonS3Exception;
-import com.amazonaws.services.s3.model.BucketLifecycleConfiguration;
-import com.amazonaws.services.s3.model.BucketLifecycleConfiguration.Rule;
-import com.amazonaws.services.s3.model.CompleteMultipartUploadResult;
-import com.google.common.collect.Lists;
-import com.google.common.util.concurrent.RateLimiter;
-import com.google.inject.Provider;
-import com.netflix.priam.config.IConfiguration;
-import com.netflix.priam.backup.AbstractBackupPath;
-import com.netflix.priam.backup.BackupRestoreException;
-import com.netflix.priam.backup.IBackupFileSystem;
-import com.netflix.priam.compress.ICompression;
-import com.netflix.priam.merics.BackupMetrics;
-import com.netflix.priam.notification.BackupEvent;
-import com.netflix.priam.notification.BackupNotificationMgr;
-import com.netflix.priam.notification.EventGenerator;
-import com.netflix.priam.notification.EventObserver;
-import com.netflix.priam.scheduler.BlockingSubmitThreadPoolExecutor;
-import org.apache.commons.lang3.StringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.util.Date;
-import java.util.Iterator;
-import java.util.List;
-import java.util.concurrent.CopyOnWriteArrayList;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicLong;
-
-public abstract class S3FileSystemBase implements IBackupFileSystem, EventGenerator {
- protected static final int MAX_CHUNKS = 10000;
- protected static final long MAX_BUFFERED_IN_STREAM_SIZE = 5 * 1024 * 1024;
- protected static final long UPLOAD_TIMEOUT = (2 * 60 * 60 * 1000L);
- private static final Logger logger = LoggerFactory.getLogger(S3FileSystemBase.class);
- //protected AtomicInteger uploadCount = new AtomicInteger();
- protected AtomicLong bytesUploaded = new AtomicLong(); //bytes uploaded per file
- //protected AtomicInteger downloadCount = new AtomicInteger();
- protected AtomicLong bytesDownloaded = new AtomicLong();
- protected BackupMetrics backupMetrics;
- protected AmazonS3 s3Client;
- protected IConfiguration config;
- protected Provider pathProvider;
- protected ICompression compress;
- protected BlockingSubmitThreadPoolExecutor executor;
- protected RateLimiter rateLimiter; //a throttling mechanism, we can limit the amount of bytes uploaded to endpoint per second.
- private final CopyOnWriteArrayList> observers = new CopyOnWriteArrayList<>();
-
- public S3FileSystemBase(Provider pathProvider,
- ICompression compress,
- final IConfiguration config,
- BackupMetrics backupMetrics,
- BackupNotificationMgr backupNotificationMgr) {
- this.pathProvider = pathProvider;
- this.compress = compress;
- this.config = config;
- this.backupMetrics = backupMetrics;
-
- int threads = config.getMaxBackupUploadThreads();
- LinkedBlockingQueue queue = new LinkedBlockingQueue(threads);
- this.executor = new BlockingSubmitThreadPoolExecutor(threads, queue, UPLOAD_TIMEOUT);
-
- double throttleLimit = config.getUploadThrottle();
- this.rateLimiter = RateLimiter.create(throttleLimit < 1 ? Double.MAX_VALUE : throttleLimit);
- this.addObserver(backupNotificationMgr);
- }
-
- public AmazonS3 getS3Client() {
- return s3Client;
- }
-
- /*
- * A means to change the default handle to the S3 client.
- */
- public void setS3Client(AmazonS3 client) {
- s3Client = client;
- }
-
- /**
- * Get S3 prefix which will be used to locate S3 files
- */
- protected String getPrefix(IConfiguration config) {
- String prefix;
- if (StringUtils.isNotBlank(config.getRestorePrefix()))
- prefix = config.getRestorePrefix();
- else
- prefix = config.getBackupPrefix();
-
- String[] paths = prefix.split(String.valueOf(S3BackupPath.PATH_SEP));
- return paths[0];
- }
-
- @Override
- public void cleanup() {
-
- AmazonS3 s3Client = getS3Client();
- String clusterPath = pathProvider.get().clusterPrefix("");
- logger.debug("Bucket: {}", config.getBackupPrefix());
- BucketLifecycleConfiguration lifeConfig = s3Client.getBucketLifecycleConfiguration(config.getBackupPrefix());
- logger.debug("Got bucket:{} lifecycle.{}", config.getBackupPrefix(), lifeConfig);
- if (lifeConfig == null) {
- lifeConfig = new BucketLifecycleConfiguration();
- List rules = Lists.newArrayList();
- lifeConfig.setRules(rules);
- }
- List rules = lifeConfig.getRules();
- if (updateLifecycleRule(config, rules, clusterPath)) {
- if (rules.size() > 0) {
- lifeConfig.setRules(rules);
- s3Client.setBucketLifecycleConfiguration(config.getBackupPrefix(), lifeConfig);
- } else
- s3Client.deleteBucketLifecycleConfiguration(config.getBackupPrefix());
- }
-
- }
-
- private boolean updateLifecycleRule(IConfiguration config, List rules, String prefix) {
- Rule rule = null;
- for (BucketLifecycleConfiguration.Rule lcRule : rules) {
- if (lcRule.getPrefix().equals(prefix)) {
- rule = lcRule;
- break;
- }
- }
- if (rule == null && config.getBackupRetentionDays() <= 0)
- return false;
- if (rule != null && rule.getExpirationInDays() == config.getBackupRetentionDays()) {
- logger.info("Cleanup rule already set");
- return false;
- }
- if (rule == null) {
- // Create a new rule
- rule = new BucketLifecycleConfiguration.Rule().withExpirationInDays(config.getBackupRetentionDays()).withPrefix(prefix);
- rule.setStatus(BucketLifecycleConfiguration.ENABLED);
- rule.setId(prefix);
- rules.add(rule);
- logger.info("Setting cleanup for {} to {} days", rule.getPrefix(), rule.getExpirationInDays());
- } else if (config.getBackupRetentionDays() > 0) {
- logger.info("Setting cleanup for {} to {} days", rule.getPrefix(), config.getBackupRetentionDays());
- rule.setExpirationInDays(config.getBackupRetentionDays());
- } else {
- logger.info("Removing cleanup rule for {}", rule.getPrefix());
- rules.remove(rule);
- }
- return true;
- }
-
- /*
- @param path - representation of the file uploaded
- @param start time of upload, in millisecs
- @param completion time of upload, in millsecs
- */
- private void postProcessingPerFile(AbstractBackupPath path, long startTimeInMilliSecs, long completedTimeInMilliSecs) {
- //Publish upload rate for each uploaded file
- try {
- long sizeInBytes = path.getSize();
- long elapseTimeInMillisecs = completedTimeInMilliSecs - startTimeInMilliSecs;
- long elapseTimeInSecs = elapseTimeInMillisecs / 1000; //converting millis to seconds as 1000m in 1 second
- long bytesReadPerSec = 0;
- Double speedInKBps = 0.0;
- if (elapseTimeInSecs > 0 && sizeInBytes > 0) {
- bytesReadPerSec = sizeInBytes / elapseTimeInSecs;
- speedInKBps = bytesReadPerSec / 1024D;
- } else {
- bytesReadPerSec = sizeInBytes; //we uploaded the whole file in less than a sec
- speedInKBps = (double) sizeInBytes;
- }
-
- logger.info("Upload rate for file: {}"
- + ", elapsse time in sec(s): {}"
- + ", KB per sec: {}",
- path.getFileName(), elapseTimeInSecs, speedInKBps);
- backupMetrics.recordUploadRate(sizeInBytes);
- } catch (Exception e) {
- logger.error("Post processing of file {} failed, not fatal.", path.getFileName(), e);
- }
- }
-
- /*
- Reinitializtion which should be performed before uploading a file
- */
- protected void reinitialize() {
- bytesUploaded = new AtomicLong(0); //initialize
- }
-
- /*
- @param file uploaded to S3
- @param a list of unique parts uploaded to S3 for file
- */
- protected void logDiagnosticInfo(AbstractBackupPath fileUploaded, CompleteMultipartUploadResult res) {
- File f = fileUploaded.getBackupFile();
- String fName = f.getAbsolutePath();
- logger.info("Uploaded file: {}, object eTag: {}", fName, res.getETag());
- }
-
- @Override
- public void upload(AbstractBackupPath path, InputStream in) throws BackupRestoreException {
- reinitialize(); //perform before file upload
- long chunkSize = config.getBackupChunkSize();
- if (path.getSize() > 0)
- chunkSize = (path.getSize() / chunkSize >= MAX_CHUNKS) ? (path.getSize() / (MAX_CHUNKS - 1)) : chunkSize; //compute the size of each block we will upload to endpoint
-
- logger.info("Uploading to {}/{} with chunk size {}", config.getBackupPrefix(), path.getRemotePath(), chunkSize);
-
- long startTime = System.nanoTime(); //initialize for each file upload
- notifyEventStart(new BackupEvent(path));
- uploadFile(path, in, chunkSize);
- long completedTime = System.nanoTime();
- postProcessingPerFile(path, TimeUnit.NANOSECONDS.toMillis(startTime), TimeUnit.NANOSECONDS.toMillis(completedTime));
- notifyEventSuccess(new BackupEvent(path));
- backupMetrics.incrementValidUploads();
- }
-
- protected void checkSuccessfulUpload(CompleteMultipartUploadResult resultS3MultiPartUploadComplete, AbstractBackupPath path) throws BackupRestoreException {
- if (null != resultS3MultiPartUploadComplete && null != resultS3MultiPartUploadComplete.getETag()) {
- String eTagObjectId = resultS3MultiPartUploadComplete.getETag(); //unique id of the whole object
- logDiagnosticInfo(path, resultS3MultiPartUploadComplete);
- } else {
- this.backupMetrics.incrementInvalidUploads();
- throw new BackupRestoreException("Error uploading file as ETag or CompleteMultipartUploadResult is NULL -" + path.getFileName());
- }
- }
-
-
- protected BackupRestoreException encounterError(AbstractBackupPath path, S3PartUploader s3PartUploader, Exception e) {
- s3PartUploader.abortUpload();
- return encounterError(path, e);
- }
-
- protected BackupRestoreException encounterError(AbstractBackupPath path, Exception e) {
- this.backupMetrics.incrementInvalidUploads();
- if (e instanceof AmazonS3Exception) {
- AmazonS3Exception a = (AmazonS3Exception) e;
- String amazoneErrorCode = a.getErrorCode();
- if (amazoneErrorCode != null && !amazoneErrorCode.isEmpty()) {
- if (amazoneErrorCode.equalsIgnoreCase("slowdown")) {
- backupMetrics.incrementAwsSlowDownException(1);
- logger.warn("Received slow down from AWS when uploading file: {}", path.getFileName());
- }
- }
- }
-
- logger.error("Error uploading file {}, a datapart was not uploaded.", path.getFileName(), e);
- notifyEventFailure(new BackupEvent(path));
- return new BackupRestoreException("Error uploading file " + path.getFileName(), e);
- }
-
- abstract void uploadFile(AbstractBackupPath path, InputStream in, long chunkSize) throws BackupRestoreException;
-
- /**
- * This method does exactly as other download method.(Supposed to be overridden)
- * filePath parameter provides the diskPath of the downloaded file.
- * This path can be used to correlate the files which are Streamed In
- * during Incremental Restores
- */
- @Override
- public void download(AbstractBackupPath path, OutputStream os,
- String filePath) throws BackupRestoreException {
- try {
- // Calling original Download method
- download(path, os);
- } catch (Exception e) {
- throw new BackupRestoreException(e.getMessage(), e);
- }
-
- }
-
- @Override
- public void download(AbstractBackupPath path, OutputStream os) throws BackupRestoreException {
- logger.info("Downloading {} from S3 bucket {}", path.getRemotePath(), getPrefix(this.config));
- long contentLen = s3Client.getObjectMetadata(getPrefix(config), path.getRemotePath()).getContentLength();
- path.setSize(contentLen);
- try {
- downloadFile(path, os);
- bytesDownloaded.addAndGet(contentLen);
- backupMetrics.incrementValidDownloads();
- } catch (BackupRestoreException e) {
- backupMetrics.incrementInvalidDownloads();
- throw e;
- }
- }
-
- protected abstract void downloadFile(AbstractBackupPath path, OutputStream os) throws BackupRestoreException;
-
- @Override
- public long getBytesUploaded() {
- return bytesUploaded.get();
- }
-
- @Override
- public long getAWSSlowDownExceptionCounter() {
- return backupMetrics.getAwsSlowDownException();
- }
-
- public long downloadCount() {
- return backupMetrics.getValidDownloads();
- }
-
- public long uploadCount() {
- return backupMetrics.getValidUploads();
- }
-
- @Override
- public void shutdown() {
- if (executor != null)
- executor.shutdown();
-
- }
-
- @Override
- public Iterator listPrefixes(Date date) {
- return new S3PrefixIterator(config, pathProvider, s3Client, date);
- }
-
- @Override
- public Iterator list(String path, Date start, Date till) {
- return new S3FileIterator(pathProvider, s3Client, path, start, till);
- }
-
-
- @Override
- public final void addObserver(EventObserver observer) {
- if (observer == null)
- throw new NullPointerException("observer must not be null.");
-
- observers.addIfAbsent(observer);
- }
-
- @Override
- public void removeObserver(EventObserver observer) {
- if (observer == null)
- throw new NullPointerException("observer must not be null.");
-
- observers.remove(observer);
- }
-
- @Override
- public void notifyEventStart(BackupEvent event) {
- observers.forEach(eventObserver -> eventObserver.updateEventStart(event));
- }
-
- @Override
- public void notifyEventSuccess(BackupEvent event) {
- observers.forEach(eventObserver -> eventObserver.updateEventSuccess(event));
- }
-
- @Override
- public void notifyEventFailure(BackupEvent event) {
- observers.forEach(eventObserver -> eventObserver.updateEventFailure(event));
- }
-
- @Override
- public void notifyEventStop(BackupEvent event) {
- observers.forEach(eventObserver -> eventObserver.updateEventStop(event));
- }
-}
+/**
+ * Copyright 2017 Netflix, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ *
http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *
Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.netflix.priam.aws;
+
+import com.amazonaws.AmazonClientException;
+import com.amazonaws.services.s3.AmazonS3;
+import com.amazonaws.services.s3.model.BucketLifecycleConfiguration;
+import com.amazonaws.services.s3.model.BucketLifecycleConfiguration.Rule;
+import com.amazonaws.services.s3.model.CompleteMultipartUploadResult;
+import com.amazonaws.services.s3.model.DeleteObjectsRequest;
+import com.amazonaws.services.s3.model.lifecycle.*;
+import com.google.common.collect.Lists;
+import com.google.common.util.concurrent.RateLimiter;
+import com.google.inject.Provider;
+import com.netflix.priam.backup.AbstractBackupPath;
+import com.netflix.priam.backup.AbstractFileSystem;
+import com.netflix.priam.backup.BackupRestoreException;
+import com.netflix.priam.compress.ICompression;
+import com.netflix.priam.config.IConfiguration;
+import com.netflix.priam.merics.BackupMetrics;
+import com.netflix.priam.notification.BackupNotificationMgr;
+import com.netflix.priam.scheduler.BlockingSubmitThreadPoolExecutor;
+import java.nio.file.Path;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Optional;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.stream.Collectors;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public abstract class S3FileSystemBase extends AbstractFileSystem {
+ private static final int MAX_CHUNKS = 9995; // 10K is AWS limit, minus a small buffer
+ private static final Logger logger = LoggerFactory.getLogger(S3FileSystemBase.class);
+ AmazonS3 s3Client;
+ final IConfiguration config;
+ final ICompression compress;
+ final BlockingSubmitThreadPoolExecutor executor;
+ final RateLimiter rateLimiter;
+ private final RateLimiter objectExistLimiter;
+
+ S3FileSystemBase(
+ Provider pathProvider,
+ ICompression compress,
+ final IConfiguration config,
+ BackupMetrics backupMetrics,
+ BackupNotificationMgr backupNotificationMgr) {
+ super(config, backupMetrics, backupNotificationMgr, pathProvider);
+ this.compress = compress;
+ this.config = config;
+
+ int threads = config.getBackupThreads();
+ LinkedBlockingQueue queue = new LinkedBlockingQueue<>(threads);
+ this.executor =
+ new BlockingSubmitThreadPoolExecutor(threads, queue, config.getUploadTimeout());
+
+ // a throttling mechanism, we can limit the amount of bytes uploaded to endpoint per second.
+ this.rateLimiter = RateLimiter.create(1);
+ // a throttling mechanism, we can limit the amount of S3 API calls endpoint per second.
+ this.objectExistLimiter = RateLimiter.create(1);
+ configChangeListener();
+ }
+
+ /*
+ Call this method to change the configuration in runtime via callback.
+ */
+ public void configChangeListener() {
+ int objectExistLimit = config.getRemoteFileSystemObjectExistsThrottle();
+ objectExistLimiter.setRate(objectExistLimit < 1 ? Double.MAX_VALUE : objectExistLimit);
+
+ double throttleLimit = config.getUploadThrottle();
+ rateLimiter.setRate(throttleLimit < 1 ? Double.MAX_VALUE : throttleLimit);
+
+ logger.info(
+ "Updating rateLimiters: s3UploadThrottle: {}, objectExistLimiter: {}",
+ rateLimiter.getRate(),
+ objectExistLimiter.getRate());
+ }
+
+ private AmazonS3 getS3Client() {
+ return s3Client;
+ }
+
+ /*
+ * A means to change the default handle to the S3 client.
+ */
+ public void setS3Client(AmazonS3 client) {
+ s3Client = client;
+ }
+
+ @Override
+ public void cleanup() {
+
+ AmazonS3 s3Client = getS3Client();
+ String clusterPath = pathProvider.get().clusterPrefix("");
+ logger.debug("Bucket: {}", config.getBackupPrefix());
+ BucketLifecycleConfiguration lifeConfig =
+ s3Client.getBucketLifecycleConfiguration(config.getBackupPrefix());
+ logger.debug("Got bucket:{} lifecycle.{}", config.getBackupPrefix(), lifeConfig);
+ if (lifeConfig == null) {
+ lifeConfig = new BucketLifecycleConfiguration();
+ List rules = Lists.newArrayList();
+ lifeConfig.setRules(rules);
+ }
+
+ List rules = lifeConfig.getRules();
+
+ if (updateLifecycleRule(config, rules, clusterPath)) {
+ if (rules.size() > 0) {
+ lifeConfig.setRules(rules);
+ s3Client.setBucketLifecycleConfiguration(config.getBackupPrefix(), lifeConfig);
+ } else s3Client.deleteBucketLifecycleConfiguration(config.getBackupPrefix());
+ }
+ }
+
+ // Dummy class to get Prefix. - Why oh why AWS you can't give the details!!
+ private class PrefixVisitor implements LifecyclePredicateVisitor {
+ String prefix;
+
+ @Override
+ public void visit(LifecyclePrefixPredicate lifecyclePrefixPredicate) {
+ prefix = lifecyclePrefixPredicate.getPrefix();
+ }
+
+ @Override
+ public void visit(LifecycleTagPredicate lifecycleTagPredicate) {}
+
+ @Override
+ public void visit(
+ LifecycleObjectSizeGreaterThanPredicate lifecycleObjectSizeGreaterThanPredicate) {}
+
+ @Override
+ public void visit(LifecycleAndOperator lifecycleAndOperator) {}
+
+ @Override
+ public void visit(
+ LifecycleObjectSizeLessThanPredicate lifecycleObjectSizeLessThanPredicate) {}
+ }
+
+ private Optional getBucketLifecycleRule(List rules, String prefix) {
+ if (rules == null || rules.isEmpty()) return Optional.empty();
+
+ for (Rule rule : rules) {
+ String rulePrefix = "";
+ if (rule.getFilter() != null) {
+ PrefixVisitor prefixVisitor = new PrefixVisitor();
+ rule.getFilter().getPredicate().accept(prefixVisitor);
+ rulePrefix = prefixVisitor.prefix;
+ } else if (rule.getPrefix() != null) {
+ // Being backwards compatible, here.
+ rulePrefix = rule.getPrefix();
+ }
+ if (prefix.equalsIgnoreCase(rulePrefix)) {
+ return Optional.of(rule);
+ }
+ }
+
+ return Optional.empty();
+ }
+
+ private boolean updateLifecycleRule(IConfiguration config, List rules, String prefix) {
+ Optional rule = getBucketLifecycleRule(rules, prefix);
+ // No need to update the rule as it never existed and retention is not set.
+ if (!rule.isPresent() && config.getBackupRetentionDays() <= 0) return false;
+
+ // Rule not required as retention days is zero or negative.
+ if (rule.isPresent() && config.getBackupRetentionDays() <= 0) {
+ logger.warn(
+ "Removing the rule for backup retention on prefix: {} as retention is set to [{}] days. Only positive values are supported by S3!!",
+ prefix,
+ config.getBackupRetentionDays());
+ rules.remove(rule.get());
+ return true;
+ }
+
+ // Rule present and is current.
+ if (rule.isPresent()
+ && rule.get().getExpirationInDays() == config.getBackupRetentionDays()
+ && rule.get().getStatus().equalsIgnoreCase(BucketLifecycleConfiguration.ENABLED)) {
+ logger.info(
+ "Cleanup rule already set on prefix: {} with retention period: [{}] days",
+ prefix,
+ config.getBackupRetentionDays());
+ return false;
+ }
+
+ if (!rule.isPresent()) {
+ // Create a new rule
+ rule = Optional.of(new BucketLifecycleConfiguration.Rule());
+ rules.add(rule.get());
+ }
+
+ rule.get().setStatus(BucketLifecycleConfiguration.ENABLED);
+ rule.get().setExpirationInDays(config.getBackupRetentionDays());
+ rule.get().setFilter(new LifecycleFilter(new LifecyclePrefixPredicate(prefix)));
+ rule.get().setId(prefix);
+ logger.info(
+ "Setting cleanup rule for prefix: {} with retention period: [{}] days",
+ prefix,
+ config.getBackupRetentionDays());
+ return true;
+ }
+
+ void checkSuccessfulUpload(
+ CompleteMultipartUploadResult resultS3MultiPartUploadComplete, Path localPath)
+ throws BackupRestoreException {
+ if (null != resultS3MultiPartUploadComplete
+ && null != resultS3MultiPartUploadComplete.getETag()) {
+ logger.info(
+ "Uploaded file: {}, object eTag: {}",
+ localPath,
+ resultS3MultiPartUploadComplete.getETag());
+ } else {
+ throw new BackupRestoreException(
+ "Error uploading file as ETag or CompleteMultipartUploadResult is NULL -"
+ + localPath);
+ }
+ }
+
+ @Override
+ public long getFileSize(String remotePath) throws BackupRestoreException {
+ return s3Client.getObjectMetadata(getShard(), remotePath).getContentLength();
+ }
+
+ @Override
+ protected boolean doesRemoteFileExist(Path remotePath) {
+ objectExistLimiter.acquire();
+ boolean exists = false;
+ try {
+ exists = s3Client.doesObjectExist(getShard(), remotePath.toString());
+ } catch (AmazonClientException ex) {
+ // No point throwing this exception up.
+ logger.error(
+ "Exception while checking existence of object: {}. Error: {}",
+ remotePath,
+ ex.getMessage());
+ }
+
+ return exists;
+ }
+
+ @Override
+ public void shutdown() {
+ if (executor != null) executor.shutdown();
+ }
+
+ @Override
+ public Iterator listFileSystem(String prefix, String delimiter, String marker) {
+ return new S3Iterator(s3Client, getShard(), prefix, delimiter, marker);
+ }
+
+ @Override
+ public void deleteFiles(List remotePaths) throws BackupRestoreException {
+ if (remotePaths.isEmpty()) return;
+
+ try {
+ List keys =
+ remotePaths
+ .stream()
+ .map(
+ remotePath ->
+ new DeleteObjectsRequest.KeyVersion(
+ remotePath.toString()))
+ .collect(Collectors.toList());
+ s3Client.deleteObjects(
+ new DeleteObjectsRequest(getShard()).withKeys(keys).withQuiet(true));
+ logger.info("Deleted {} objects from S3", remotePaths.size());
+ } catch (Exception e) {
+ logger.error(
+ "Error while trying to delete [{}] the objects from S3: {}",
+ remotePaths.size(),
+ e.getMessage());
+ throw new BackupRestoreException(e + " while trying to delete the objects");
+ }
+ }
+
+ final long getChunkSize(Path path) {
+ return Math.max(path.toFile().length() / MAX_CHUNKS, config.getBackupChunkSize());
+ }
+}
diff --git a/priam/src/main/java/com/netflix/priam/aws/S3FileIterator.java b/priam/src/main/java/com/netflix/priam/aws/S3Iterator.java
similarity index 50%
rename from priam/src/main/java/com/netflix/priam/aws/S3FileIterator.java
rename to priam/src/main/java/com/netflix/priam/aws/S3Iterator.java
index a803216c8..0eaf4052a 100644
--- a/priam/src/main/java/com/netflix/priam/aws/S3FileIterator.java
+++ b/priam/src/main/java/com/netflix/priam/aws/S3Iterator.java
@@ -1,5 +1,5 @@
/*
- * Copyright 2013 Netflix, Inc.
+ * Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,6 +14,7 @@
* limitations under the License.
*
*/
+
package com.netflix.priam.aws;
import com.amazonaws.services.s3.AmazonS3;
@@ -21,40 +22,51 @@
import com.amazonaws.services.s3.model.ObjectListing;
import com.amazonaws.services.s3.model.S3ObjectSummary;
import com.google.common.collect.Lists;
-import com.google.inject.Provider;
-import com.netflix.priam.backup.AbstractBackupPath;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.Date;
import java.util.Iterator;
import java.util.List;
+import org.apache.commons.lang3.StringUtils;
/**
- * Iterator representing list of backup files available on S3
+ * Iterate over the s3 file system. This is really required to find the manifest file for restore
+ * and downloading incrementals. Created by aagrawal on 11/30/18.
*/
-public class S3FileIterator implements Iterator {
- private static final Logger logger = LoggerFactory.getLogger(S3FileIterator.class);
- private final Provider pathProvider;
- private final AmazonS3 s3Client;
- private final Date start;
- private final Date till;
- private Iterator iterator;
+public class S3Iterator implements Iterator {
+ private Iterator iterator;
private ObjectListing objectListing;
+ private final AmazonS3 s3Client;
+ private final String bucket;
+ private final String prefix;
+ private final String delimiter;
+ private final String marker;
- public S3FileIterator(Provider pathProvider, AmazonS3 s3Client, String path, Date start, Date till) {
- this.start = start;
- this.till = till;
- this.pathProvider = pathProvider;
- ListObjectsRequest listReq = new ListObjectsRequest();
- String[] paths = path.split(String.valueOf(S3BackupPath.PATH_SEP));
- listReq.setBucketName(paths[0]);
- listReq.setPrefix(pathProvider.get().remotePrefix(start, till, path));
+ public S3Iterator(
+ AmazonS3 s3Client, String bucket, String prefix, String delimiter, String marker) {
this.s3Client = s3Client;
- objectListing = s3Client.listObjects(listReq);
+ this.bucket = bucket;
+ this.prefix = prefix;
+ this.delimiter = delimiter;
+ this.marker = marker;
iterator = createIterator();
}
+ private void initListing() {
+ ListObjectsRequest listReq = new ListObjectsRequest();
+ listReq.setBucketName(bucket);
+ listReq.setPrefix(prefix);
+ if (StringUtils.isNotBlank(delimiter)) listReq.setDelimiter(delimiter);
+ if (StringUtils.isNotBlank(marker)) listReq.setMarker(marker);
+ objectListing = s3Client.listObjects(listReq);
+ }
+
+ private Iterator createIterator() {
+ if (objectListing == null) initListing();
+ List temp = Lists.newArrayList();
+ for (S3ObjectSummary summary : objectListing.getObjectSummaries()) {
+ temp.add(summary.getKey());
+ }
+ return temp.iterator();
+ }
+
@Override
public boolean hasNext() {
if (iterator.hasNext()) {
@@ -64,32 +76,12 @@ public boolean hasNext() {
objectListing = s3Client.listNextBatchOfObjects(objectListing);
iterator = createIterator();
}
-
}
return iterator.hasNext();
}
- private Iterator createIterator() {
- List temp = Lists.newArrayList();
- for (S3ObjectSummary summary : objectListing.getObjectSummaries()) {
- AbstractBackupPath path = pathProvider.get();
- path.parseRemote(summary.getKey());
- logger.debug("New key {} path = {} start: {} end: {} my {}", summary.getKey(), path.getRemotePath(), start, till, path.getTime());
- if ((path.getTime().after(start) && path.getTime().before(till)) || path.getTime().equals(start)) {
- temp.add(path);
- logger.debug("Added key {}", summary.getKey());
- }
- }
- return temp.iterator();
- }
-
@Override
- public AbstractBackupPath next() {
+ public String next() {
return iterator.next();
}
-
- @Override
- public void remove() {
- throw new IllegalStateException();
- }
}
diff --git a/priam/src/main/java/com/netflix/priam/aws/S3PartUploader.java b/priam/src/main/java/com/netflix/priam/aws/S3PartUploader.java
index 1d7c1250d..5a6a04ae6 100644
--- a/priam/src/main/java/com/netflix/priam/aws/S3PartUploader.java
+++ b/priam/src/main/java/com/netflix/priam/aws/S3PartUploader.java
@@ -20,40 +20,40 @@
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.model.*;
import com.netflix.priam.backup.BackupRestoreException;
-import com.netflix.priam.utils.RetryableCallable;
+import com.netflix.priam.utils.BoundedExponentialRetryCallable;
import com.netflix.priam.utils.SystemUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
import java.io.ByteArrayInputStream;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
-public class S3PartUploader extends RetryableCallable {
+public class S3PartUploader extends BoundedExponentialRetryCallable {
private final AmazonS3 client;
- private DataPart dataPart;
- private List partETags;
- private AtomicInteger partsUploaded = null; //num of data parts successfully uploaded
+ private final DataPart dataPart;
+ private final List partETags;
+ private AtomicInteger partsUploaded = null; // num of data parts successfully uploaded
private static final Logger logger = LoggerFactory.getLogger(S3PartUploader.class);
private static final int MAX_RETRIES = 5;
+ private static final int DEFAULT_MIN_SLEEP_MS = 200;
public S3PartUploader(AmazonS3 client, DataPart dp, List partETags) {
- super(MAX_RETRIES, RetryableCallable.DEFAULT_WAIT_TIME);
+ super(DEFAULT_MIN_SLEEP_MS, BoundedExponentialRetryCallable.MAX_SLEEP, MAX_RETRIES);
this.client = client;
this.dataPart = dp;
this.partETags = partETags;
}
- public S3PartUploader(AmazonS3 client, DataPart dp, List partETags, AtomicInteger partsUploaded) {
- super(MAX_RETRIES, RetryableCallable.DEFAULT_WAIT_TIME);
+ public S3PartUploader(
+ AmazonS3 client, DataPart dp, List partETags, AtomicInteger partsUploaded) {
+ super(DEFAULT_MIN_SLEEP_MS, BoundedExponentialRetryCallable.MAX_SLEEP, MAX_RETRIES);
this.client = client;
this.dataPart = dp;
this.partETags = partETags;
this.partsUploaded = partsUploaded;
}
-
private Void uploadPart() throws AmazonClientException, BackupRestoreException {
UploadPartRequest req = new UploadPartRequest();
req.setBucketName(dataPart.getBucketName());
@@ -66,27 +66,35 @@ private Void uploadPart() throws AmazonClientException, BackupRestoreException {
UploadPartResult res = client.uploadPart(req);
PartETag partETag = res.getPartETag();
if (!partETag.getETag().equals(SystemUtils.toHex(dataPart.getMd5())))
- throw new BackupRestoreException("Unable to match MD5 for part " + dataPart.getPartNo());
+ throw new BackupRestoreException(
+ "Unable to match MD5 for part " + dataPart.getPartNo());
partETags.add(partETag);
- if (this.partsUploaded != null)
- this.partsUploaded.incrementAndGet();
+ if (this.partsUploaded != null) this.partsUploaded.incrementAndGet();
return null;
}
public CompleteMultipartUploadResult completeUpload() throws BackupRestoreException {
- CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(dataPart.getBucketName(), dataPart.getS3key(), dataPart.getUploadID(), partETags);
+ CompleteMultipartUploadRequest compRequest =
+ new CompleteMultipartUploadRequest(
+ dataPart.getBucketName(),
+ dataPart.getS3key(),
+ dataPart.getUploadID(),
+ partETags);
return client.completeMultipartUpload(compRequest);
}
// Abort
public void abortUpload() {
- AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(dataPart.getBucketName(), dataPart.getS3key(), dataPart.getUploadID());
+ AbortMultipartUploadRequest abortRequest =
+ new AbortMultipartUploadRequest(
+ dataPart.getBucketName(), dataPart.getS3key(), dataPart.getUploadID());
client.abortMultipartUpload(abortRequest);
}
@Override
public Void retriableCall() throws AmazonClientException, BackupRestoreException {
- logger.debug("Picked up part {} size {}", dataPart.getPartNo(), dataPart.getPartData().length);
+ logger.debug(
+ "Picked up part {} size {}", dataPart.getPartNo(), dataPart.getPartData().length);
return uploadPart();
}
}
diff --git a/priam/src/main/java/com/netflix/priam/aws/S3PrefixIterator.java b/priam/src/main/java/com/netflix/priam/aws/S3PrefixIterator.java
deleted file mode 100644
index 649486b33..000000000
--- a/priam/src/main/java/com/netflix/priam/aws/S3PrefixIterator.java
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
- * Copyright 2013 Netflix, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package com.netflix.priam.aws;
-
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.s3.model.ListObjectsRequest;
-import com.amazonaws.services.s3.model.ObjectListing;
-import com.google.common.collect.Lists;
-import com.google.inject.Inject;
-import com.google.inject.Provider;
-import com.netflix.priam.config.IConfiguration;
-import com.netflix.priam.backup.AbstractBackupPath;
-import org.apache.commons.lang3.StringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.text.SimpleDateFormat;
-import java.util.Date;
-import java.util.Iterator;
-import java.util.List;
-
-/**
- * Class to iterate over prefixes (S3 Common prefixes) upto
- * the token element in the path. The abstract path generated by this class
- * is partial (does not have all data).
- */
-public class S3PrefixIterator implements Iterator {
- private static final Logger logger = LoggerFactory.getLogger(S3PrefixIterator.class);
- private final IConfiguration config;
- private final AmazonS3 s3Client;
- private final Provider pathProvider;
- private Iterator iterator;
-
- private String bucket = "";
- private String clusterPath = "";
- private SimpleDateFormat datefmt = new SimpleDateFormat("yyyyMMdd");
- private ObjectListing objectListing = null;
- Date date;
-
- @Inject
- public S3PrefixIterator(IConfiguration config, Provider pathProvider, AmazonS3 s3Client, Date date) {
- this.config = config;
- this.pathProvider = pathProvider;
- this.s3Client = s3Client;
- this.date = date;
- String path = "";
- if (StringUtils.isNotBlank(config.getRestorePrefix()))
- path = config.getRestorePrefix();
- else
- path = config.getBackupPrefix();
-
- String[] paths = path.split(String.valueOf(S3BackupPath.PATH_SEP));
- bucket = paths[0];
- this.clusterPath = remotePrefix(path);
- iterator = createIterator();
- }
-
- private void initListing() {
- ListObjectsRequest listReq = new ListObjectsRequest();
- // Get list of tokens
- listReq.setBucketName(bucket);
- listReq.setPrefix(clusterPath);
- listReq.setDelimiter(String.valueOf(AbstractBackupPath.PATH_SEP));
- logger.info("Using cluster prefix for searching tokens: {}", clusterPath);
- objectListing = s3Client.listObjects(listReq);
-
- }
-
- private Iterator createIterator() {
- if (objectListing == null)
- initListing();
- List temp = Lists.newArrayList();
- for (String summary : objectListing.getCommonPrefixes()) {
- if (pathExistsForDate(summary, datefmt.format(date))) {
- AbstractBackupPath path = pathProvider.get();
- path.parsePartialPrefix(summary);
- temp.add(path);
- }
- }
- return temp.iterator();
- }
-
- @Override
- public boolean hasNext() {
- if (iterator.hasNext()) {
- return true;
- } else {
- while (objectListing.isTruncated() && !iterator.hasNext()) {
- objectListing = s3Client.listNextBatchOfObjects(objectListing);
- iterator = createIterator();
- }
- }
- return iterator.hasNext();
- }
-
- @Override
- public AbstractBackupPath next() {
- return iterator.next();
- }
-
- @Override
- public void remove() {
- }
-
- /**
- * Get remote prefix upto the token
- */
- private String remotePrefix(String location) {
- StringBuffer buff = new StringBuffer();
- String[] elements = location.split(String.valueOf(S3BackupPath.PATH_SEP));
- if (elements.length <= 1) {
- buff.append(config.getBackupLocation()).append(S3BackupPath.PATH_SEP);
- buff.append(config.getDC()).append(S3BackupPath.PATH_SEP);
- buff.append(config.getAppName()).append(S3BackupPath.PATH_SEP);
- } else {
- assert elements.length >= 4 : "Too few elements in path " + location;
- buff.append(elements[1]).append(S3BackupPath.PATH_SEP);
- buff.append(elements[2]).append(S3BackupPath.PATH_SEP);
- buff.append(elements[3]).append(S3BackupPath.PATH_SEP);
- }
- return buff.toString();
- }
-
- /**
- * Check to see if the path exists for the date
- */
- private boolean pathExistsForDate(String tprefix, String datestr) {
- ListObjectsRequest listReq = new ListObjectsRequest();
- // Get list of tokens
- listReq.setBucketName(bucket);
- listReq.setPrefix(tprefix + datestr);
- ObjectListing listing;
- listing = s3Client.listObjects(listReq);
- return listing.getObjectSummaries().size() > 0;
- }
-
-}
diff --git a/priam/src/main/java/com/netflix/priam/aws/SDBInstanceData.java b/priam/src/main/java/com/netflix/priam/aws/SDBInstanceData.java
index 128042435..1d0fe8a08 100644
--- a/priam/src/main/java/com/netflix/priam/aws/SDBInstanceData.java
+++ b/priam/src/main/java/com/netflix/priam/aws/SDBInstanceData.java
@@ -25,29 +25,36 @@
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.cred.ICredential;
import com.netflix.priam.identity.PriamInstance;
-
import java.util.*;
-/**
- * DAO for handling Instance identity information such as token, zone, region
- */
+/** DAO for handling Instance identity information such as token, zone, region */
@Singleton
public class SDBInstanceData {
public static class Attributes {
- public final static String APP_ID = "appId";
- public final static String ID = "id";
- public final static String INSTANCE_ID = "instanceId";
- public final static String TOKEN = "token";
- public final static String AVAILABILITY_ZONE = "availabilityZone";
- public final static String ELASTIC_IP = "elasticIP";
- public final static String UPDATE_TS = "updateTimestamp";
- public final static String LOCATION = "location";
- public final static String HOSTNAME = "hostname";
+ public static final String APP_ID = "appId";
+ public static final String ID = "id";
+ public static final String INSTANCE_ID = "instanceId";
+ public static final String TOKEN = "token";
+ public static final String AVAILABILITY_ZONE = "availabilityZone";
+ public static final String ELASTIC_IP = "elasticIP";
+ public static final String UPDATE_TS = "updateTimestamp";
+ public static final String LOCATION = "location";
+ public static final String HOSTNAME = "hostname";
}
public static final String DOMAIN = "InstanceIdentity";
- public static final String ALL_QUERY = "select * from " + DOMAIN + " where " + Attributes.APP_ID + "='%s'";
- public static final String INSTANCE_QUERY = "select * from " + DOMAIN + " where " + Attributes.APP_ID + "='%s' and " + Attributes.LOCATION + "='%s' and " + Attributes.ID + "='%d'";
+ public static final String ALL_QUERY =
+ "select * from " + DOMAIN + " where " + Attributes.APP_ID + "='%s'";
+ public static final String INSTANCE_QUERY =
+ "select * from "
+ + DOMAIN
+ + " where "
+ + Attributes.APP_ID
+ + "='%s' and "
+ + Attributes.LOCATION
+ + "='%s' and "
+ + Attributes.ID
+ + "='%d'";
private final ICredential provider;
private final IConfiguration configuration;
@@ -67,10 +74,11 @@ public SDBInstanceData(ICredential provider, IConfiguration configuration) {
*/
public PriamInstance getInstance(String app, String dc, int id) {
AmazonSimpleDB simpleDBClient = getSimpleDBClient();
- SelectRequest request = new SelectRequest(String.format(INSTANCE_QUERY, app, dc, id));
+ SelectRequest request =
+ new SelectRequest(String.format(INSTANCE_QUERY, app, dc, id))
+ .withConsistentRead(true);
SelectResult result = simpleDBClient.select(request);
- if (result.getItems().size() == 0)
- return null;
+ if (result.getItems().size() == 0) return null;
return transform(result.getItems().get(0));
}
@@ -82,16 +90,17 @@ public PriamInstance getInstance(String app, String dc, int id) {
*/
public Set getAllIds(String app) {
AmazonSimpleDB simpleDBClient = getSimpleDBClient();
- Set inslist = new HashSet();
+ Set inslist = new HashSet<>();
String nextToken = null;
do {
- SelectRequest request = new SelectRequest(String.format(ALL_QUERY, app));
- request.setNextToken(nextToken);
+ SelectRequest request =
+ new SelectRequest(String.format(ALL_QUERY, app))
+ .withConsistentRead(true)
+ .withNextToken(nextToken);
SelectResult result = simpleDBClient.select(request);
nextToken = result.getNextToken();
- Iterator- itemiter = result.getItems().iterator();
- while (itemiter.hasNext()) {
- inslist.add(transform(itemiter.next()));
+ for (Item item : result.getItems()) {
+ inslist.add(transform(item));
}
} while (nextToken != null);
@@ -101,24 +110,36 @@ public Set getAllIds(String app) {
/**
* Create a new instance entry in SimpleDB
*
- * @param instance
- * @throws AmazonServiceException
+ * @param orig Original instance used for validation
+ * @param inst Instance entry to be created.
+ * @throws AmazonServiceException If unable to write to Simple DB because of any error.
*/
- public void createInstance(PriamInstance instance) throws AmazonServiceException {
- AmazonSimpleDB simpleDBClient = getSimpleDBClient();
- PutAttributesRequest putReq = new PutAttributesRequest(DOMAIN, getKey(instance), createAttributesToRegister(instance));
- simpleDBClient.putAttributes(putReq);
+ public void updateInstance(PriamInstance orig, PriamInstance inst)
+ throws AmazonServiceException {
+ PutAttributesRequest putReq =
+ new PutAttributesRequest(DOMAIN, getKey(inst), createAttributesToRegister(inst))
+ .withExpected(
+ new UpdateCondition()
+ .withName(Attributes.INSTANCE_ID)
+ .withValue(orig.getInstanceId()))
+ .withExpected(
+ new UpdateCondition()
+ .withName(Attributes.TOKEN)
+ .withValue(orig.getToken()));
+ getSimpleDBClient().putAttributes(putReq);
}
/**
* Register a new instance. Registration will fail if a prior entry exists
*
- * @param instance
- * @throws AmazonServiceException
+ * @param instance Instance entry to be registered.
+ * @throws AmazonServiceException If unable to write to Simple DB because of any error.
*/
public void registerInstance(PriamInstance instance) throws AmazonServiceException {
AmazonSimpleDB simpleDBClient = getSimpleDBClient();
- PutAttributesRequest putReq = new PutAttributesRequest(DOMAIN, getKey(instance), createAttributesToRegister(instance));
+ PutAttributesRequest putReq =
+ new PutAttributesRequest(
+ DOMAIN, getKey(instance), createAttributesToRegister(instance));
UpdateCondition expected = new UpdateCondition();
expected.setName(Attributes.INSTANCE_ID);
expected.setExists(false);
@@ -134,27 +155,33 @@ public void registerInstance(PriamInstance instance) throws AmazonServiceExcepti
*/
public void deregisterInstance(PriamInstance instance) throws AmazonServiceException {
AmazonSimpleDB simpleDBClient = getSimpleDBClient();
- DeleteAttributesRequest delReq = new DeleteAttributesRequest(DOMAIN, getKey(instance), createAttributesToDeRegister(instance));
+ DeleteAttributesRequest delReq =
+ new DeleteAttributesRequest(
+ DOMAIN, getKey(instance), createAttributesToDeRegister(instance));
simpleDBClient.deleteAttributes(delReq);
}
protected List createAttributesToRegister(PriamInstance instance) {
instance.setUpdatetime(new Date().getTime());
- List attrs = new ArrayList();
- attrs.add(new ReplaceableAttribute(Attributes.INSTANCE_ID, instance.getInstanceId(), false));
+ List attrs = new ArrayList<>();
+ attrs.add(
+ new ReplaceableAttribute(Attributes.INSTANCE_ID, instance.getInstanceId(), false));
attrs.add(new ReplaceableAttribute(Attributes.TOKEN, instance.getToken(), true));
attrs.add(new ReplaceableAttribute(Attributes.APP_ID, instance.getApp(), true));
- attrs.add(new ReplaceableAttribute(Attributes.ID, Integer.toString(instance.getId()), true));
+ attrs.add(
+ new ReplaceableAttribute(Attributes.ID, Integer.toString(instance.getId()), true));
attrs.add(new ReplaceableAttribute(Attributes.AVAILABILITY_ZONE, instance.getRac(), true));
attrs.add(new ReplaceableAttribute(Attributes.ELASTIC_IP, instance.getHostIP(), true));
attrs.add(new ReplaceableAttribute(Attributes.HOSTNAME, instance.getHostName(), true));
attrs.add(new ReplaceableAttribute(Attributes.LOCATION, instance.getDC(), true));
- attrs.add(new ReplaceableAttribute(Attributes.UPDATE_TS, Long.toString(instance.getUpdatetime()), true));
+ attrs.add(
+ new ReplaceableAttribute(
+ Attributes.UPDATE_TS, Long.toString(instance.getUpdatetime()), true));
return attrs;
}
protected List createAttributesToDeRegister(PriamInstance instance) {
- List attrs = new ArrayList();
+ List attrs = new ArrayList<>();
attrs.add(new Attribute(Attributes.INSTANCE_ID, instance.getInstanceId()));
attrs.add(new Attribute(Attributes.TOKEN, instance.getToken()));
attrs.add(new Attribute(Attributes.APP_ID, instance.getApp()));
@@ -175,25 +202,16 @@ protected List createAttributesToDeRegister(PriamInstance instance) {
*/
public PriamInstance transform(Item item) {
PriamInstance ins = new PriamInstance();
- Iterator attrs = item.getAttributes().iterator();
- while (attrs.hasNext()) {
- Attribute att = attrs.next();
- if (att.getName().equals(Attributes.INSTANCE_ID))
- ins.setInstanceId(att.getValue());
- else if (att.getName().equals(Attributes.TOKEN))
- ins.setToken(att.getValue());
- else if (att.getName().equals(Attributes.APP_ID))
- ins.setApp(att.getValue());
+ for (Attribute att : item.getAttributes()) {
+ if (att.getName().equals(Attributes.INSTANCE_ID)) ins.setInstanceId(att.getValue());
+ else if (att.getName().equals(Attributes.TOKEN)) ins.setToken(att.getValue());
+ else if (att.getName().equals(Attributes.APP_ID)) ins.setApp(att.getValue());
else if (att.getName().equals(Attributes.ID))
ins.setId(Integer.parseInt(att.getValue()));
- else if (att.getName().equals(Attributes.AVAILABILITY_ZONE))
- ins.setRac(att.getValue());
- else if (att.getName().equals(Attributes.ELASTIC_IP))
- ins.setHostIP(att.getValue());
- else if (att.getName().equals(Attributes.HOSTNAME))
- ins.setHost(att.getValue());
- else if (att.getName().equals(Attributes.LOCATION))
- ins.setDC(att.getValue());
+ else if (att.getName().equals(Attributes.AVAILABILITY_ZONE)) ins.setRac(att.getValue());
+ else if (att.getName().equals(Attributes.ELASTIC_IP)) ins.setHostIP(att.getValue());
+ else if (att.getName().equals(Attributes.HOSTNAME)) ins.setHost(att.getValue());
+ else if (att.getName().equals(Attributes.LOCATION)) ins.setDC(att.getValue());
else if (att.getName().equals(Attributes.UPDATE_TS))
ins.setUpdatetime(Long.parseLong(att.getValue()));
}
@@ -205,7 +223,10 @@ private String getKey(PriamInstance instance) {
}
private AmazonSimpleDB getSimpleDBClient() {
- //Create per request
- return AmazonSimpleDBClient.builder().withCredentials(provider.getAwsCredentialProvider()).withRegion(configuration.getSDBInstanceIdentityRegion()).build();
+ // Create per request
+ return AmazonSimpleDBClient.builder()
+ .withCredentials(provider.getAwsCredentialProvider())
+ .withRegion(configuration.getSDBInstanceIdentityRegion())
+ .build();
}
}
diff --git a/priam/src/main/java/com/netflix/priam/aws/SDBInstanceFactory.java b/priam/src/main/java/com/netflix/priam/aws/SDBInstanceFactory.java
index a10cd5878..df80c0bb5 100644
--- a/priam/src/main/java/com/netflix/priam/aws/SDBInstanceFactory.java
+++ b/priam/src/main/java/com/netflix/priam/aws/SDBInstanceFactory.java
@@ -17,41 +17,40 @@
package com.netflix.priam.aws;
import com.amazonaws.AmazonServiceException;
+import com.google.common.collect.ImmutableSet;
import com.google.inject.Inject;
import com.google.inject.Singleton;
-import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.identity.IPriamInstanceFactory;
import com.netflix.priam.identity.PriamInstance;
+import com.netflix.priam.identity.config.InstanceInfo;
+import java.util.*;
+import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import java.util.*;
-
/**
- * SimpleDB based instance factory. Requires 'InstanceIdentity' domain to be
- * created ahead
+ * SimpleDB based instance instanceIdentity. Requires 'InstanceIdentity' domain to be created ahead
*/
@Singleton
-public class SDBInstanceFactory implements IPriamInstanceFactory {
+public class SDBInstanceFactory implements IPriamInstanceFactory {
private static final Logger logger = LoggerFactory.getLogger(SDBInstanceFactory.class);
- private final IConfiguration config;
private final SDBInstanceData dao;
+ private final InstanceInfo instanceInfo;
@Inject
- public SDBInstanceFactory(IConfiguration config, SDBInstanceData dao) {
- this.config = config;
+ public SDBInstanceFactory(SDBInstanceData dao, InstanceInfo instanceInfo) {
this.dao = dao;
+ this.instanceInfo = instanceInfo;
}
@Override
- public List getAllIds(String appName) {
- List return_ = new ArrayList();
- for (PriamInstance instance : dao.getAllIds(appName)) {
- return_.add(instance);
- }
- sort(return_);
- return return_;
+ public ImmutableSet getAllIds(String appName) {
+ return ImmutableSet.copyOf(
+ dao.getAllIds(appName)
+ .stream()
+ .sorted((Comparator.comparingInt(PriamInstance::getId)))
+ .collect(Collectors.toList()));
}
@Override
@@ -60,18 +59,29 @@ public PriamInstance getInstance(String appName, String dc, int id) {
}
@Override
- public PriamInstance create(String app, int id, String instanceID, String hostname, String ip, String rac, Map volumes, String token) {
+ public PriamInstance create(
+ String app,
+ int id,
+ String instanceID,
+ String hostname,
+ String ip,
+ String rac,
+ Map volumes,
+ String token) {
try {
- PriamInstance ins = makePriamInstance(app, id, instanceID, hostname, ip, rac, volumes, token);
+ PriamInstance ins =
+ makePriamInstance(app, id, instanceID, hostname, ip, rac, volumes, token);
// remove old data node which are dead.
if (app.endsWith("-dead")) {
try {
- PriamInstance oldData = dao.getInstance(app, config.getDC(), id);
+ PriamInstance oldData = dao.getInstance(app, instanceInfo.getRegion(), id);
// clean up a very old data...
- if (null != oldData && oldData.getUpdatetime() < (System.currentTimeMillis() - (3 * 60 * 1000)))
+ if (null != oldData
+ && oldData.getUpdatetime()
+ < (System.currentTimeMillis() - (3 * 60 * 1000)))
dao.deregisterInstance(oldData);
} catch (Exception ex) {
- //Do nothing
+ // Do nothing
logger.error(ex.getMessage(), ex);
}
}
@@ -93,36 +103,24 @@ public void delete(PriamInstance inst) {
}
@Override
- public void update(PriamInstance inst) {
+ public void update(PriamInstance orig, PriamInstance inst) {
try {
- dao.createInstance(inst);
+ dao.updateInstance(orig, inst);
} catch (AmazonServiceException e) {
throw new RuntimeException("Unable to update/create priam instance", e);
}
}
- @Override
- public void sort(List return_) {
- Comparator super PriamInstance> comparator = new Comparator() {
-
- @Override
- public int compare(PriamInstance o1, PriamInstance o2) {
-
- Integer c1 = o1.getId();
- Integer c2 = o2.getId();
- return c1.compareTo(c2);
- }
- };
- Collections.sort(return_, comparator);
- }
-
- @Override
- public void attachVolumes(PriamInstance instance, String mountPath, String device) {
- // TODO Auto-generated method stub
- }
-
- private PriamInstance makePriamInstance(String app, int id, String instanceID, String hostname, String ip, String rac, Map volumes, String token) {
- Map v = (volumes == null) ? new HashMap() : volumes;
+ private PriamInstance makePriamInstance(
+ String app,
+ int id,
+ String instanceID,
+ String hostname,
+ String ip,
+ String rac,
+ Map volumes,
+ String token) {
+ Map v = (volumes == null) ? new HashMap<>() : volumes;
PriamInstance ins = new PriamInstance();
ins.setApp(app);
ins.setRac(rac);
@@ -130,7 +128,7 @@ private PriamInstance makePriamInstance(String app, int id, String instanceID, S
ins.setHostIP(ip);
ins.setId(id);
ins.setInstanceId(instanceID);
- ins.setDC(config.getDC());
+ ins.setDC(instanceInfo.getRegion());
ins.setToken(token);
ins.setVolumes(v);
return ins;
diff --git a/priam/src/main/java/com/netflix/priam/aws/UpdateCleanupPolicy.java b/priam/src/main/java/com/netflix/priam/aws/UpdateCleanupPolicy.java
index 41034a5f2..56ca053ba 100644
--- a/priam/src/main/java/com/netflix/priam/aws/UpdateCleanupPolicy.java
+++ b/priam/src/main/java/com/netflix/priam/aws/UpdateCleanupPolicy.java
@@ -19,21 +19,18 @@
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.google.inject.name.Named;
-import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.backup.IBackupFileSystem;
+import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.scheduler.SimpleTimer;
import com.netflix.priam.scheduler.Task;
import com.netflix.priam.scheduler.TaskTimer;
import com.netflix.priam.utils.RetryableCallable;
-/**
- * Updates the cleanup policy for the bucket
- *
- */
+/** Updates the cleanup policy for the bucket */
@Singleton
public class UpdateCleanupPolicy extends Task {
public static final String JOBNAME = "UpdateCleanupPolicy";
- private IBackupFileSystem fs;
+ private final IBackupFileSystem fs;
@Inject
public UpdateCleanupPolicy(IConfiguration config, @Named("backup") IBackupFileSystem fs) {
@@ -61,5 +58,4 @@ public String getName() {
public static TaskTimer getTimer() {
return new SimpleTimer(JOBNAME);
}
-
}
diff --git a/priam/src/main/java/com/netflix/priam/aws/UpdateSecuritySettings.java b/priam/src/main/java/com/netflix/priam/aws/UpdateSecuritySettings.java
deleted file mode 100644
index 21c407223..000000000
--- a/priam/src/main/java/com/netflix/priam/aws/UpdateSecuritySettings.java
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * Copyright 2013 Netflix, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package com.netflix.priam.aws;
-
-import com.google.common.collect.Lists;
-import com.google.inject.Inject;
-import com.google.inject.Singleton;
-import com.netflix.priam.config.IConfiguration;
-import com.netflix.priam.identity.IMembership;
-import com.netflix.priam.identity.IPriamInstanceFactory;
-import com.netflix.priam.identity.InstanceIdentity;
-import com.netflix.priam.identity.PriamInstance;
-import com.netflix.priam.scheduler.SimpleTimer;
-import com.netflix.priam.scheduler.Task;
-import com.netflix.priam.scheduler.TaskTimer;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.HashSet;
-import java.util.List;
-import java.util.Random;
-import java.util.Set;
-
-/**
- * this class will associate an Public IP's with a new instance so they can talk
- * across the regions.
- *
- * Requirement: 1) Nodes in the same region needs to be able to talk to each
- * other. 2) Nodes in other regions needs to be able to talk to t`he others in
- * the other region.
- *
- * Assumption: 1) IPriamInstanceFactory will provide the membership... and will
- * be visible across the regions 2) IMembership amazon or any other
- * implementation which can tell if the instance is part of the group (ASG in
- * amazons case).
- *
- */
-@Singleton
-public class UpdateSecuritySettings extends Task {
- private static final Logger logger = LoggerFactory.getLogger(UpdateSecuritySettings.class);
- public static final String JOBNAME = "Update_SG";
- public static boolean firstTimeUpdated = false;
-
- private static final Random ran = new Random();
- private final IMembership membership;
- private final IPriamInstanceFactory factory;
-
- @Inject
- //Note: do not parameterized the generic type variable to an implementation as it confuses Guice in the binding.
- public UpdateSecuritySettings(IConfiguration config, IMembership membership, IPriamInstanceFactory factory) {
- super(config);
- this.membership = membership;
- this.factory = factory;
- }
-
- /**
- * Seeds nodes execute this at the specifed interval.
- * Other nodes run only on startup.
- * Seeds in cassandra are the first node in each Availablity Zone.
- */
- @Override
- public void execute() {
- // if seed dont execute.
- int port = config.getSSLStoragePort();
- List acls = membership.listACL(port, port);
- List instances = factory.getAllIds(config.getAppName());
-
- // iterate to add...
- Set add = new HashSet();
- List allInstances = factory.getAllIds(config.getAppName());
- for (PriamInstance instance : allInstances) {
- String range = instance.getHostIP() + "/32";
- if (!acls.contains(range))
- add.add(range);
- }
- if (add.size() > 0) {
- membership.addACL(add, port, port);
- firstTimeUpdated = true;
- }
-
- // just iterate to generate ranges.
- List currentRanges = Lists.newArrayList();
- for (PriamInstance instance : instances) {
- String range = instance.getHostIP() + "/32";
- currentRanges.add(range);
- }
-
- // iterate to remove...
- List remove = Lists.newArrayList();
- for (String acl : acls)
- if (!currentRanges.contains(acl)) // if not found then remove....
- remove.add(acl);
- if (remove.size() > 0) {
- membership.removeACL(remove, port, port);
- firstTimeUpdated = true;
- }
- }
-
- public static TaskTimer getTimer(InstanceIdentity id) {
- SimpleTimer return_;
- if (id.isSeed()) {
- logger.info("Seed node. Instance id: {}"
- + ", host ip: {}"
- + ", host name: {}",
- id.getInstance().getInstanceId(), id.getInstance().getHostIP(), id.getInstance().getHostName());
- return_ = new SimpleTimer(JOBNAME, 120 * 1000 + ran.nextInt(120 * 1000));
- } else
- return_ = new SimpleTimer(JOBNAME);
- return return_;
- }
-
- @Override
- public String getName() {
- return JOBNAME;
- }
-}
diff --git a/priam/src/main/java/com/netflix/priam/aws/auth/EC2RoleAssumptionCredential.java b/priam/src/main/java/com/netflix/priam/aws/auth/EC2RoleAssumptionCredential.java
index f19805256..c01c92442 100644
--- a/priam/src/main/java/com/netflix/priam/aws/auth/EC2RoleAssumptionCredential.java
+++ b/priam/src/main/java/com/netflix/priam/aws/auth/EC2RoleAssumptionCredential.java
@@ -1,16 +1,14 @@
/**
* Copyright 2017 Netflix, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
+ *
+ *
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ *
http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *
Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.aws.auth;
@@ -20,20 +18,21 @@
import com.google.inject.Inject;
import com.netflix.priam.config.IConfiguration;
import com.netflix.priam.cred.ICredential;
-import com.netflix.priam.identity.InstanceEnvIdentity;
+import com.netflix.priam.identity.config.InstanceInfo;
public class EC2RoleAssumptionCredential implements ICredential {
private static final String AWS_ROLE_ASSUMPTION_SESSION_NAME = "AwsRoleAssumptionSession";
- private ICredential cred;
- private IConfiguration config;
- private InstanceEnvIdentity insEnvIdentity;
+ private final ICredential cred;
+ private final IConfiguration config;
+ private final InstanceInfo instanceInfo;
private AWSCredentialsProvider stsSessionCredentialsProvider;
@Inject
- public EC2RoleAssumptionCredential(ICredential cred, IConfiguration config, InstanceEnvIdentity insEnvIdentity) {
+ public EC2RoleAssumptionCredential(
+ ICredential cred, IConfiguration config, InstanceInfo instanceInfo) {
this.cred = cred;
this.config = config;
- this.insEnvIdentity = insEnvIdentity;
+ this.instanceInfo = instanceInfo;
}
@Override
@@ -42,40 +41,48 @@ public AWSCredentialsProvider getAwsCredentialProvider() {
synchronized (this) {
if (this.stsSessionCredentialsProvider == null) {
- String roleArn = null;
+ String roleArn;
/**
- * Create the assumed IAM role based on the environment.
- * For example, if the current environment is VPC,
- * then the assumed role is for EC2 classic, and vice versa.
+ * Create the assumed IAM role based on the environment. For example, if the
+ * current environment is VPC, then the assumed role is for EC2 classic, and
+ * vice versa.
*/
- if (this.insEnvIdentity.isClassic()) {
- roleArn = this.config.getClassicEC2RoleAssumptionArn(); // Env is EC2 classic --> IAM assumed role for VPC created
+ if (instanceInfo.getInstanceEnvironment()
+ == InstanceInfo.InstanceEnvironment.CLASSIC) {
+ roleArn = this.config.getClassicEC2RoleAssumptionArn();
+ // Env is EC2 classic --> IAM assumed role for VPC created
} else {
- roleArn = this.config.getVpcEC2RoleAssumptionArn(); // Env is VPC --> IAM assumed role for EC2 classic created
+ roleArn = this.config.getVpcEC2RoleAssumptionArn();
+ // Env is VPC --> IAM assumed role for EC2 classic created.
}
//
if (roleArn == null || roleArn.isEmpty())
- throw new NullPointerException("Role ARN is null or empty probably due to missing config entry");
-
+ throw new NullPointerException(
+ "Role ARN is null or empty probably due to missing config entry");
/**
- * Get handle to an implementation that uses AWS Security Token Service (STS) to create temporary,
- * short-lived session with explicit refresh for session/token expiration.
+ * Get handle to an implementation that uses AWS Security Token Service (STS) to
+ * create temporary, short-lived session with explicit refresh for session/token
+ * expiration.
*/
try {
- this.stsSessionCredentialsProvider = new STSAssumeRoleSessionCredentialsProvider(this.cred.getAwsCredentialProvider(), roleArn, AWS_ROLE_ASSUMPTION_SESSION_NAME);
+ this.stsSessionCredentialsProvider =
+ new STSAssumeRoleSessionCredentialsProvider(
+ this.cred.getAwsCredentialProvider(),
+ roleArn,
+ AWS_ROLE_ASSUMPTION_SESSION_NAME);
} catch (Exception ex) {
- throw new IllegalStateException("Exception in getting handle to AWS Security Token Service (STS). Msg: " + ex.getLocalizedMessage(), ex);
+ throw new IllegalStateException(
+ "Exception in getting handle to AWS Security Token Service (STS). Msg: "
+ + ex.getLocalizedMessage(),
+ ex);
}
-
}
-
}
}
return this.stsSessionCredentialsProvider;
-
}
-}
\ No newline at end of file
+}
diff --git a/priam/src/main/java/com/netflix/priam/aws/auth/IS3Credential.java b/priam/src/main/java/com/netflix/priam/aws/auth/IS3Credential.java
index 266cf1eb3..86ede4744 100755
--- a/priam/src/main/java/com/netflix/priam/aws/auth/IS3Credential.java
+++ b/priam/src/main/java/com/netflix/priam/aws/auth/IS3Credential.java
@@ -1,16 +1,14 @@
/**
* Copyright 2017 Netflix, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
+ *
+ *
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ *
http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *
Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.aws.auth;
diff --git a/priam/src/main/java/com/netflix/priam/aws/auth/S3InstanceCredential.java b/priam/src/main/java/com/netflix/priam/aws/auth/S3InstanceCredential.java
index 09f172ef1..03b4bf939 100755
--- a/priam/src/main/java/com/netflix/priam/aws/auth/S3InstanceCredential.java
+++ b/priam/src/main/java/com/netflix/priam/aws/auth/S3InstanceCredential.java
@@ -1,16 +1,14 @@
/**
* Copyright 2017 Netflix, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
+ *
+ *
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ *
http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *
Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.aws.auth;
@@ -24,10 +22,10 @@
*/
public class S3InstanceCredential implements IS3Credential {
- private InstanceProfileCredentialsProvider credentialsProvider;
+ private final InstanceProfileCredentialsProvider credentialsProvider;
public S3InstanceCredential() {
- this.credentialsProvider = new InstanceProfileCredentialsProvider();
+ this.credentialsProvider = InstanceProfileCredentialsProvider.getInstance();
}
@Override
@@ -39,6 +37,4 @@ public AWSCredentials getCredentials() throws Exception {
public AWSCredentialsProvider getAwsCredentialProvider() {
return this.credentialsProvider;
}
-
-
-}
\ No newline at end of file
+}
diff --git a/priam/src/main/java/com/netflix/priam/aws/auth/S3RoleAssumptionCredential.java b/priam/src/main/java/com/netflix/priam/aws/auth/S3RoleAssumptionCredential.java
index 1296f7ea7..21befc17b 100755
--- a/priam/src/main/java/com/netflix/priam/aws/auth/S3RoleAssumptionCredential.java
+++ b/priam/src/main/java/com/netflix/priam/aws/auth/S3RoleAssumptionCredential.java
@@ -1,16 +1,14 @@
/**
* Copyright 2017 Netflix, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
+ *
+ *
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ *
http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *
Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.aws.auth;
@@ -30,8 +28,8 @@ public class S3RoleAssumptionCredential implements IS3Credential {
private static final String AWS_ROLE_ASSUMPTION_SESSION_NAME = "S3RoleAssumptionSession";
private static final Logger logger = LoggerFactory.getLogger(S3RoleAssumptionCredential.class);
- private ICredential cred;
- private IConfiguration config;
+ private final ICredential cred;
+ private final IConfiguration config;
private AWSCredentialsProvider stsSessionCredentialsProvider;
@Inject
@@ -68,27 +66,37 @@ public AWSCredentialsProvider getAwsCredentialProvider() {
synchronized (this) {
if (this.stsSessionCredentialsProvider == null) {
- final String roleArn = this.config.getAWSRoleAssumptionArn(); //IAM role created for bucket own by account "awsprodbackup"
+ final String roleArn = this.config.getAWSRoleAssumptionArn();
+ // IAM role created for bucket own by account "awsprodbackup"
if (roleArn == null || roleArn.isEmpty()) {
- logger.warn("Role ARN is null or empty probably due to missing config entry. Falling back to instance level credentials");
+ logger.warn(
+ "Role ARN is null or empty probably due to missing config entry. Falling back to instance level credentials");
this.stsSessionCredentialsProvider = this.cred.getAwsCredentialProvider();
- //throw new NullPointerException("Role ARN is null or empty probably due to missing config entry");
+ // throw new NullPointerException("Role ARN is null or empty probably due to
+ // missing config entry");
} else {
- //== Get handle to an implementation that uses AWS Security Token Service (STS) to create temporary, short-lived session with explicit refresh for session/token expiration.
+ // Get handle to an implementation that uses AWS Security Token Service
+ // (STS) to create temporary, short-lived session with explicit refresh for
+ // session/token expiration.
try {
- this.stsSessionCredentialsProvider = new STSAssumeRoleSessionCredentialsProvider(this.cred.getAwsCredentialProvider(), roleArn, AWS_ROLE_ASSUMPTION_SESSION_NAME);
+ this.stsSessionCredentialsProvider =
+ new STSAssumeRoleSessionCredentialsProvider(
+ this.cred.getAwsCredentialProvider(),
+ roleArn,
+ AWS_ROLE_ASSUMPTION_SESSION_NAME);
} catch (Exception ex) {
- throw new IllegalStateException("Exception in getting handle to AWS Security Token Service (STS). Msg: " + ex.getLocalizedMessage(), ex);
+ throw new IllegalStateException(
+ "Exception in getting handle to AWS Security Token Service (STS). Msg: "
+ + ex.getLocalizedMessage(),
+ ex);
}
}
-
}
}
}
return this.stsSessionCredentialsProvider;
}
-
-}
\ No newline at end of file
+}
diff --git a/priam/src/main/java/com/netflix/priam/backup/AbstractBackup.java b/priam/src/main/java/com/netflix/priam/backup/AbstractBackup.java
index 611aa6ab8..bd8cab2cc 100644
--- a/priam/src/main/java/com/netflix/priam/backup/AbstractBackup.java
+++ b/priam/src/main/java/com/netflix/priam/backup/AbstractBackup.java
@@ -16,181 +16,125 @@
*/
package com.netflix.priam.backup;
-import com.google.common.collect.Lists;
import com.google.inject.Inject;
-import com.google.inject.Provider;
import com.netflix.priam.config.IConfiguration;
-import com.netflix.priam.backup.AbstractBackupPath.BackupFileType;
import com.netflix.priam.scheduler.Task;
-import com.netflix.priam.utils.RetryableCallable;
import com.netflix.priam.utils.SystemUtils;
+import java.io.File;
+import java.io.FileFilter;
+import java.nio.file.DirectoryStream;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.HashSet;
+import java.util.Optional;
+import java.util.Set;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import java.io.File;
-import java.util.List;
-
-/**
- * Abstract Backup class for uploading files to backup location
- */
-public abstract class AbstractBackup extends Task{
+/** Abstract Backup class for uploading files to backup location */
+public abstract class AbstractBackup extends Task {
private static final Logger logger = LoggerFactory.getLogger(AbstractBackup.class);
- public static final String INCREMENTAL_BACKUP_FOLDER = "backups";
+ static final String INCREMENTAL_BACKUP_FOLDER = "backups";
public static final String SNAPSHOT_FOLDER = "snapshots";
- protected final Provider pathFactory;
-
- protected IBackupFileSystem fs;
-
@Inject
- public AbstractBackup(IConfiguration config, IFileSystemContext backupFileSystemCtx,
- Provider pathFactory) {
+ public AbstractBackup(IConfiguration config) {
super(config);
- this.pathFactory = pathFactory;
- this.fs = backupFileSystemCtx.getFileStrategy(config);
- }
-
- /**
- * A means to override the type of backup strategy chosen via BackupFileSystemContext
- */
- protected void setFileSystem(IBackupFileSystem fs) {
- this.fs = fs;
- }
-
- /**
- * Upload files in the specified dir. Does not delete the file in case of
- * error. The files are uploaded serially.
- *
- * @param parent Parent dir
- * @param type Type of file (META, SST, SNAP etc)
- * @return List of files that are successfully uploaded as part of backup
- * @throws Exception when there is failure in uploading files.
- */
- List upload(File parent, final BackupFileType type) throws Exception {
- final List bps = Lists.newArrayList();
- for (final File file : parent.listFiles()) {
- //== decorate file with metadata
- final AbstractBackupPath bp = pathFactory.get();
- bp.parseLocal(file, type);
-
- try {
- logger.info("About to upload file {} for backup", file.getCanonicalFile());
-
- AbstractBackupPath abp = new RetryableCallable(3, RetryableCallable.DEFAULT_WAIT_TIME) {
- public AbstractBackupPath retriableCall() throws Exception {
- upload(bp);
- file.delete();
- return bp;
- }
- }.call();
-
- if (abp != null)
- bps.add(abp);
-
- addToRemotePath(abp.getRemotePath());
- } catch (Exception e) {
- //Throw exception to the caller. This will allow them to take appropriate decision.
- logger.error("Failed to upload local file {} within CF {}.", file.getCanonicalFile(), parent.getAbsolutePath(), e);
- throw e;
- }
- }
- return bps;
}
-
- /**
- * Upload specified file (RandomAccessFile) with retries
- *
- * @param bp backup path to be uploaded.
- */
- protected void upload(final AbstractBackupPath bp) throws Exception {
- new RetryableCallable() {
- @Override
- public Void retriableCall() throws Exception {
- java.io.InputStream is = null;
- try {
- is = bp.localReader();
- if (is == null) {
- throw new NullPointerException("Unable to get handle on file: " + bp.fileName);
- }
- fs.upload(bp, is);
- bp.setCompressedFileSize(fs.getBytesUploaded());
- return null;
- } catch (Exception e) {
- logger.error("Exception uploading local file {}, releasing handle, and will retry.", bp.backupFile.getCanonicalFile());
- if (is != null) {
- is.close();
- }
- throw e;
- }
-
- }
- }.call();
- }
-
- protected final void initiateBackup(String monitoringFolder, BackupRestoreUtil backupRestoreUtil) throws Exception {
+ protected final void initiateBackup(
+ String monitoringFolder, BackupRestoreUtil backupRestoreUtil) throws Exception {
File dataDir = new File(config.getDataFileLocation());
- if (!dataDir.exists()) {
- throw new IllegalArgumentException("The configured 'data file location' does not exist: "
- + config.getDataFileLocation());
+ if (!dataDir.exists() || !dataDir.isDirectory()) {
+ throw new IllegalArgumentException(
+ "The configured 'data file location' does not exist or is not a directory: "
+ + config.getDataFileLocation());
}
logger.debug("Scanning for backup in: {}", dataDir.getAbsolutePath());
- for (File keyspaceDir : dataDir.listFiles()) {
- if (keyspaceDir.isFile())
- continue;
+ File[] keyspaceDirectories = dataDir.listFiles();
+ if (keyspaceDirectories == null) return;
+
+ for (File keyspaceDir : keyspaceDirectories) {
+ if (keyspaceDir.isFile()) continue;
logger.debug("Entering {} keyspace..", keyspaceDir.getName());
+ File[] columnFamilyDirectories = keyspaceDir.listFiles();
+ if (columnFamilyDirectories == null) continue;
- for (File columnFamilyDir : keyspaceDir.listFiles()) {
+ for (File columnFamilyDir : columnFamilyDirectories) {
File backupDir = new File(columnFamilyDir, monitoringFolder);
-
- if (!isValidBackupDir(keyspaceDir, backupDir)) {
- continue;
- }
-
- String columnFamilyName = columnFamilyDir.getName().split("-")[0];
- if (backupRestoreUtil.isFiltered(keyspaceDir.getName(), columnFamilyDir.getName())) {
- //Clean the backup/snapshot directory else files will keep getting accumulated.
- SystemUtils.cleanupDir(backupDir.getAbsolutePath(), null);
- continue;
+ if (isAReadableDirectory(backupDir)) {
+ String columnFamilyName = getColumnFamily(backupDir);
+ if (backupRestoreUtil.isFiltered(keyspaceDir.getName(), columnFamilyName)) {
+ // Clean the backup/snapshot directory else files will keep getting
+ // accumulated.
+ SystemUtils.cleanupDir(backupDir.getAbsolutePath(), null);
+ } else {
+ processColumnFamily(backupDir);
+ }
}
+ } // end processing all CFs for keyspace
+ } // end processing keyspaces under the C* data dir
+ }
- processColumnFamily(keyspaceDir.getName(), columnFamilyName, backupDir);
-
- } //end processing all CFs for keyspace
- } //end processing keyspaces under the C* data dir
+ protected String getColumnFamily(File backupDir) {
+ return backupDir.getParentFile().getName().split("-")[0];
+ }
+ protected String getKeyspace(File backupDir) {
+ return backupDir.getParentFile().getParentFile().getName();
}
/**
* Process the columnfamily in a given snapshot/backup directory.
*
- * @param keyspace Name of the keyspace
- * @param columnFamily Name of the columnfamily
- * @param backupDir Location of the backup/snapshot directory in that columnfamily.
+ * @param backupDir Location of the backup/snapshot directory in that columnfamily.
* @throws Exception throws exception if there is any error in process the directory.
*/
- protected abstract void processColumnFamily(String keyspace, String columnFamily, File backupDir) throws Exception;
-
+ protected abstract void processColumnFamily(File backupDir) throws Exception;
/**
- * Filters unwanted keyspaces
+ * Get all the backup directories for Cassandra.
+ *
+ * @param config to get the location of the data folder.
+ * @param monitoringFolder folder where cassandra backup's are configured.
+ * @return Set of the path(s) containing the backup folder for each columnfamily.
+ * @throws Exception incase of IOException.
*/
- private boolean isValidBackupDir(File keyspaceDir, File backupDir) {
- if (!backupDir.isDirectory() && !backupDir.exists())
- return false;
- String keyspaceName = keyspaceDir.getName();
- if (BackupRestoreUtil.FILTER_KEYSPACE.contains(keyspaceName)) {
- logger.debug("{} is not consider a valid keyspace backup directory, will be bypass.", keyspaceName);
- return false;
- }
+ public static Set getBackupDirectories(IConfiguration config, String monitoringFolder)
+ throws Exception {
+ HashSet backupPaths = new HashSet<>();
+ if (config.getDataFileLocation() == null) return backupPaths;
+ Path dataPath = Paths.get(config.getDataFileLocation());
+ if (Files.exists(dataPath) && Files.isDirectory(dataPath))
+ try (DirectoryStream directoryStream =
+ Files.newDirectoryStream(dataPath, path -> Files.isDirectory(path))) {
+ for (Path keyspaceDirPath : directoryStream) {
+ try (DirectoryStream keyspaceStream =
+ Files.newDirectoryStream(
+ keyspaceDirPath, path -> Files.isDirectory(path))) {
+ for (Path columnfamilyDirPath : keyspaceStream) {
+ Path backupDirPath =
+ Paths.get(columnfamilyDirPath.toString(), monitoringFolder);
+ if (Files.exists(backupDirPath) && Files.isDirectory(backupDirPath)) {
+ logger.debug("Backup folder: {}", backupDirPath);
+ backupPaths.add(backupDirPath);
+ }
+ }
+ }
+ }
+ }
+ return backupPaths;
+ }
- return true;
+ protected static File[] getSecondaryIndexDirectories(File backupDir) {
+ FileFilter filter = (file) -> file.getName().startsWith(".") && isAReadableDirectory(file);
+ return Optional.ofNullable(backupDir.listFiles(filter)).orElse(new File[] {});
}
- /**
- * Adds Remote path to the list of Remote Paths
- */
- protected abstract void addToRemotePath(String remotePath);
+ protected static boolean isAReadableDirectory(File dir) {
+ return dir.exists() && dir.isDirectory() && dir.canRead();
+ }
}
diff --git a/priam/src/main/java/com/netflix/priam/backup/AbstractBackupPath.java b/priam/src/main/java/com/netflix/priam/backup/AbstractBackupPath.java
index 28b049e51..e5c9a69fa 100644
--- a/priam/src/main/java/com/netflix/priam/backup/AbstractBackupPath.java
+++ b/priam/src/main/java/com/netflix/priam/backup/AbstractBackupPath.java
@@ -16,40 +16,66 @@
*/
package com.netflix.priam.backup;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Joiner;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
import com.google.inject.ImplementedBy;
+import com.netflix.priam.aws.RemoteBackupPath;
+import com.netflix.priam.compress.CompressionType;
+import com.netflix.priam.config.BackupsToCompress;
import com.netflix.priam.config.IConfiguration;
-import com.netflix.priam.aws.S3BackupPath;
+import com.netflix.priam.cryptography.CryptographyAlgorithm;
import com.netflix.priam.identity.InstanceIdentity;
-import org.apache.commons.io.IOUtils;
-import org.apache.commons.lang3.StringUtils;
-import org.joda.time.DateTime;
-import org.joda.time.format.DateTimeFormat;
-import org.joda.time.format.DateTimeFormatter;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
+import com.netflix.priam.utils.DateUtil;
import java.io.File;
import java.io.IOException;
-import java.io.InputStream;
-import java.io.RandomAccessFile;
-import java.text.ParseException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.attribute.BasicFileAttributes;
+import java.time.Instant;
import java.util.Date;
+import java.util.Optional;
+import org.apache.commons.lang3.StringUtils;
-@ImplementedBy(S3BackupPath.class)
+@ImplementedBy(RemoteBackupPath.class)
public abstract class AbstractBackupPath implements Comparable {
- private static final Logger logger = LoggerFactory.getLogger(AbstractBackupPath.class);
- private static final String FMT = "yyyyMMddHHmm";
- private static final DateTimeFormatter DATE_FORMAT = DateTimeFormat.forPattern(FMT);
public static final char PATH_SEP = File.separatorChar;
+ public static final Joiner PATH_JOINER = Joiner.on(PATH_SEP);
+ private static final ImmutableMap FOLDER_POSITIONS =
+ ImmutableMap.of(BackupFolder.BACKUPS, 3, BackupFolder.SNAPSHOTS, 4);
public enum BackupFileType {
- SNAP, SST, CL, META, META_V2;
+ CL,
+ META,
+ META_V2,
+ SECONDARY_INDEX_V2,
+ SNAP,
+ SNAPSHOT_VERIFIED,
+ SST,
+ SST_V2;
+
+ private static ImmutableSet DATA_FILE_TYPES =
+ ImmutableSet.of(SECONDARY_INDEX_V2, SNAP, SST, SST_V2);
+
+ private static ImmutableSet V2_FILE_TYPES =
+ ImmutableSet.of(SECONDARY_INDEX_V2, SST_V2, META_V2);
+
+ public static boolean isDataFile(BackupFileType type) {
+ return DATA_FILE_TYPES.contains(type);
+ }
- public static boolean isDataFile(BackupFileType type){
- if (type != BackupFileType.META && type != BackupFileType.META_V2 && type != BackupFileType.CL)
- return true;
+ public static boolean isV2(BackupFileType type) {
+ return V2_FILE_TYPES.contains(type);
+ }
- return false;
+ public static BackupFileType fromString(String s) throws BackupRestoreException {
+ try {
+ return BackupFileType.valueOf(s);
+ } catch (IllegalArgumentException e) {
+ throw new BackupRestoreException(String.format("Unknown BackupFileType %s", s));
+ }
}
}
@@ -61,100 +87,111 @@ public static boolean isDataFile(BackupFileType type){
protected String baseDir;
protected String token;
protected String region;
+ protected String indexDir;
protected Date time;
- protected long size; //uncompressed file size
- protected long compressedFileSize = 0;
- protected boolean isCassandra1_0;
-
- protected final InstanceIdentity factory;
+ private long size; // uncompressed file size
+ private long compressedFileSize = 0;
+ protected final InstanceIdentity instanceIdentity;
protected final IConfiguration config;
protected File backupFile;
- protected Date uploadedTs;
-
- public AbstractBackupPath(IConfiguration config, InstanceIdentity factory) {
- this.factory = factory;
+ private Instant lastModified;
+ private Instant creationTime;
+ private Date uploadedTs;
+ private CompressionType compression;
+ private CryptographyAlgorithm encryption = CryptographyAlgorithm.PLAINTEXT;
+ private boolean isIncremental;
+
+ public AbstractBackupPath(IConfiguration config, InstanceIdentity instanceIdentity) {
+ this.instanceIdentity = instanceIdentity;
this.config = config;
+ this.compression =
+ config.getBackupsToCompress() == BackupsToCompress.NONE
+ ? CompressionType.NONE
+ : CompressionType.SNAPPY;
}
- public static String formatDate(Date d) {
- return new DateTime(d).toString(FMT);
- }
-
- public Date parseDate(String s) {
- return DATE_FORMAT.parseDateTime(s).toDate();
- }
-
- public InputStream localReader() throws IOException {
- assert backupFile != null;
- return new RafInputStream(new RandomAccessFile(backupFile, "r"));
- }
-
- public void parseLocal(File file, BackupFileType type) throws ParseException {
- // TODO cleanup.
+ public void parseLocal(File file, BackupFileType type) {
this.backupFile = file;
-
- String rpath = new File(config.getDataFileLocation()).toURI().relativize(file.toURI()).getPath();
- String[] elements = rpath.split("" + PATH_SEP);
- this.clusterName = config.getAppName();
this.baseDir = config.getBackupLocation();
- this.region = config.getDC();
- this.token = factory.getInstance().getToken();
+ this.clusterName = config.getAppName();
+ this.fileName = file.getName();
+ BasicFileAttributes fileAttributes;
+ try {
+ fileAttributes = Files.readAttributes(file.toPath(), BasicFileAttributes.class);
+ this.lastModified = fileAttributes.lastModifiedTime().toInstant();
+ this.creationTime = fileAttributes.creationTime().toInstant();
+ this.size = fileAttributes.size();
+ } catch (IOException e) {
+ this.lastModified = Instant.ofEpochMilli(0L);
+ this.creationTime = Instant.ofEpochMilli(0L);
+ this.size = 0L;
+ }
+ this.region = instanceIdentity.getInstanceInfo().getRegion();
+ this.token = instanceIdentity.getInstance().getToken();
this.type = type;
+
+ String rpath =
+ new File(config.getDataFileLocation()).toURI().relativize(file.toURI()).getPath();
+ String[] parts = rpath.split("" + PATH_SEP);
if (BackupFileType.isDataFile(type)) {
- this.keyspace = elements[0];
- if (!isCassandra1_0)
- this.columnFamily = elements[1];
+ this.keyspace = parts[0];
+ this.columnFamily = parts[1];
+ }
+ if (BackupFileType.isDataFile(type)) {
+ Optional folder = BackupFolder.fromName(parts[2]);
+ this.isIncremental = folder.filter(BackupFolder.BACKUPS::equals).isPresent();
+ if (type == BackupFileType.SECONDARY_INDEX_V2) {
+ Integer index = folder.map(FOLDER_POSITIONS::get).orElse(null);
+ Preconditions.checkNotNull(index, "Unrecognized backup folder " + parts[2]);
+ this.indexDir = parts[index];
+ }
}
- if (type == BackupFileType.SNAP)
- time = parseDate(elements[3]);
- if (type == BackupFileType.SST || type == BackupFileType.CL)
- time = new Date(file.lastModified());
- this.fileName = file.getName();
- this.size = file.length();
- }
- /**
- * Given a date range, find a common string prefix Eg: 20120212, 20120213 =
- * 2012021
- */
- public String match(Date start, Date end) {
- String sString = formatDate(start);
- String eString = formatDate(end);
+ /*
+ 1. For old style snapshots, make this value to time at which backup was executed.
+ 2. This is to ensure that all the files from the snapshot are uploaded under single directory in remote file system.
+ 3. For META files we always override the time field via @link{Metadata#decorateMetaJson}
+ */
+ this.time =
+ type == BackupFileType.SNAP
+ ? DateUtil.getDate(parts[3])
+ : new Date(lastModified.toEpochMilli());
+ }
+
+ /** Given a date range, find a common string prefix Eg: 20120212, 20120213 = 2012021 */
+ protected String match(Date start, Date end) {
+ String sString = DateUtil.formatyyyyMMddHHmm(start); // formatDate(start);
+ String eString = DateUtil.formatyyyyMMddHHmm(end); // formatDate(end);
int diff = StringUtils.indexOfDifference(sString, eString);
- if (diff < 0)
- return sString;
+ if (diff < 0) return sString;
return sString.substring(0, diff);
}
- /**
- * Local restore file
- */
+ /** Local restore file */
public File newRestoreFile() {
- StringBuffer buff = new StringBuffer();
- if (type == BackupFileType.CL) {
- buff.append(config.getBackupCommitLogLocation()).append(PATH_SEP);
- } else {
-
- buff.append(config.getDataFileLocation()).append(PATH_SEP);
- if (type != BackupFileType.META && type != BackupFileType.META_V2) {
- if (isCassandra1_0)
- buff.append(keyspace).append(PATH_SEP);
- else
- buff.append(keyspace).append(PATH_SEP).append(columnFamily).append(PATH_SEP);
- }
+ File return_;
+ String dataDir = config.getDataFileLocation();
+ switch (type) {
+ case CL:
+ return_ = new File(PATH_JOINER.join(config.getBackupCommitLogLocation(), fileName));
+ break;
+ case SECONDARY_INDEX_V2:
+ String restoreFileName =
+ PATH_JOINER.join(dataDir, keyspace, columnFamily, indexDir, fileName);
+ return_ = new File(restoreFileName);
+ break;
+ case META:
+ case META_V2:
+ return_ = new File(PATH_JOINER.join(config.getDataFileLocation(), fileName));
+ break;
+ default:
+ return_ = new File(PATH_JOINER.join(dataDir, keyspace, columnFamily, fileName));
}
-
- buff.append(fileName);
-
- File return_ = new File(buff.toString());
File parent = new File(return_.getParent());
- if (!parent.exists())
- parent.mkdirs();
+ if (!parent.exists()) parent.mkdirs();
return return_;
}
-
-
@Override
public int compareTo(AbstractBackupPath o) {
return getRemotePath().compareTo(o.getRemotePath());
@@ -162,35 +199,27 @@ public int compareTo(AbstractBackupPath o) {
@Override
public boolean equals(Object obj) {
- if (!obj.getClass().equals(this.getClass()))
- return false;
- return getRemotePath().equals(((AbstractBackupPath) obj).getRemotePath());
+ return obj.getClass().equals(this.getClass())
+ && getRemotePath().equals(((AbstractBackupPath) obj).getRemotePath());
}
- /**
- * Get remote prefix for this path object
- */
+ /** Get remote prefix for this path object */
public abstract String getRemotePath();
- /**
- * Parses a fully constructed remote path
- */
+ /** Parses a fully constructed remote path */
public abstract void parseRemote(String remoteFilePath);
- /**
- * Parses paths with just token prefixes
- */
+ /** Parses paths with just token prefixes */
public abstract void parsePartialPrefix(String remoteFilePath);
/**
- * Provides a common prefix that matches all objects that fall between
- * the start and end time
+ * Provides a common prefix that matches all objects that fall between the start and end time
*/
public abstract String remotePrefix(Date start, Date end, String location);
- /**
- * Provides the cluster prefix
- */
+ public abstract Path remoteV2Prefix(Path location, BackupFileType fileType);
+
+ /** Provides the cluster prefix */
public abstract String clusterPrefix(String location);
public BackupFileType getType() {
@@ -217,10 +246,6 @@ public String getFileName() {
return fileName;
}
- public String getBaseDir() {
- return baseDir;
- }
-
public String getToken() {
return token;
}
@@ -233,6 +258,10 @@ public Date getTime() {
return time;
}
+ public void setTime(Date time) {
+ this.time = time;
+ }
+
/*
@return original, uncompressed file size
*/
@@ -256,20 +285,12 @@ public File getBackupFile() {
return backupFile;
}
- public boolean isCassandra1_0() {
- return isCassandra1_0;
- }
-
- public void setCassandra1_0(boolean isCassandra1_0) {
- this.isCassandra1_0 = isCassandra1_0;
- }
-
public void setFileName(String fileName) {
this.fileName = fileName;
}
public InstanceIdentity getInstanceIdentity() {
- return this.factory;
+ return this.instanceIdentity;
}
public void setUploadedTs(Date uploadedTs) {
@@ -280,27 +301,41 @@ public Date getUploadedTs() {
return this.uploadedTs;
}
- public static class RafInputStream extends InputStream {
- private RandomAccessFile raf;
+ public Instant getLastModified() {
+ return lastModified;
+ }
- public RafInputStream(RandomAccessFile raf) {
- this.raf = raf;
- }
+ public void setLastModified(Instant instant) {
+ this.lastModified = instant;
+ }
- @Override
- public synchronized int read(byte[] bytes, int off, int len) throws IOException {
- return raf.read(bytes, off, len);
- }
+ public Instant getCreationTime() {
+ return creationTime;
+ }
- @Override
- public void close() {
- IOUtils.closeQuietly(raf);
- }
+ @VisibleForTesting
+ public void setCreationTime(Instant instant) {
+ this.creationTime = instant;
+ }
- @Override
- public int read() throws IOException {
- return 0;
- }
+ public CompressionType getCompression() {
+ return compression;
+ }
+
+ public void setCompression(CompressionType compressionType) {
+ this.compression = compressionType;
+ }
+
+ public CryptographyAlgorithm getEncryption() {
+ return encryption;
+ }
+
+ public void setEncryption(String encryption) {
+ this.encryption = CryptographyAlgorithm.valueOf(encryption);
+ }
+
+ public boolean isIncremental() {
+ return isIncremental;
}
@Override
diff --git a/priam/src/main/java/com/netflix/priam/backup/AbstractFileSystem.java b/priam/src/main/java/com/netflix/priam/backup/AbstractFileSystem.java
new file mode 100644
index 000000000..a39190ab5
--- /dev/null
+++ b/priam/src/main/java/com/netflix/priam/backup/AbstractFileSystem.java
@@ -0,0 +1,386 @@
+/*
+ * Copyright 2018 Netflix, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.netflix.priam.backup;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.ListeningExecutorService;
+import com.google.common.util.concurrent.MoreExecutors;
+import com.google.inject.Inject;
+import com.google.inject.Provider;
+import com.netflix.priam.backup.AbstractBackupPath.BackupFileType;
+import com.netflix.priam.config.IConfiguration;
+import com.netflix.priam.merics.BackupMetrics;
+import com.netflix.priam.notification.BackupEvent;
+import com.netflix.priam.notification.BackupNotificationMgr;
+import com.netflix.priam.notification.EventGenerator;
+import com.netflix.priam.notification.EventObserver;
+import com.netflix.priam.scheduler.BlockingSubmitThreadPoolExecutor;
+import com.netflix.priam.utils.BoundedExponentialRetryCallable;
+import com.netflix.spectator.api.patterns.PolledMeter;
+import java.io.File;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.time.Instant;
+import java.util.Date;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.*;
+import org.apache.commons.collections4.iterators.FilterIterator;
+import org.apache.commons.collections4.iterators.TransformIterator;
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.lang3.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This class is responsible for managing parallelism and orchestrating the upload and download, but
+ * the subclasses actually implement the details of uploading a file.
+ *
+ * Created by aagrawal on 8/30/18.
+ */
+public abstract class AbstractFileSystem implements IBackupFileSystem, EventGenerator {
+ private static final Logger logger = LoggerFactory.getLogger(AbstractFileSystem.class);
+ protected final Provider pathProvider;
+ private final CopyOnWriteArrayList> observers =
+ new CopyOnWriteArrayList<>();
+ private final IConfiguration configuration;
+ protected final BackupMetrics backupMetrics;
+ private final Set tasksQueued;
+ private final ListeningExecutorService fileUploadExecutor;
+ private final ThreadPoolExecutor fileDownloadExecutor;
+
+ // This is going to be a write-thru cache containing the most frequently used items from remote
+ // file system. This is to ensure that we don't make too many API calls to remote file system.
+ private final Cache objectCache;
+
+ @Inject
+ public AbstractFileSystem(
+ IConfiguration configuration,
+ BackupMetrics backupMetrics,
+ BackupNotificationMgr backupNotificationMgr,
+ Provider pathProvider) {
+ this.configuration = configuration;
+ this.backupMetrics = backupMetrics;
+ this.pathProvider = pathProvider;
+ // Add notifications.
+ this.addObserver(backupNotificationMgr);
+ this.objectCache =
+ CacheBuilder.newBuilder().maximumSize(configuration.getBackupQueueSize()).build();
+ tasksQueued = new ConcurrentHashMap<>().newKeySet();
+ /*
+ Note: We are using different queue for upload and download as with Backup V2.0 we might download all the meta
+ files for "sync" feature which might compete with backups for scheduling.
+ Also, we may want to have different TIMEOUT for each kind of operation (upload/download) based on our file system choices.
+ */
+ BlockingQueue uploadQueue =
+ new ArrayBlockingQueue<>(configuration.getBackupQueueSize());
+ PolledMeter.using(backupMetrics.getRegistry())
+ .withName(backupMetrics.uploadQueueSize)
+ .monitorSize(uploadQueue);
+ this.fileUploadExecutor =
+ MoreExecutors.listeningDecorator(
+ new BlockingSubmitThreadPoolExecutor(
+ configuration.getBackupThreads(),
+ uploadQueue,
+ configuration.getUploadTimeout()));
+
+ BlockingQueue downloadQueue =
+ new ArrayBlockingQueue<>(configuration.getDownloadQueueSize());
+ PolledMeter.using(backupMetrics.getRegistry())
+ .withName(backupMetrics.downloadQueueSize)
+ .monitorSize(downloadQueue);
+ this.fileDownloadExecutor =
+ new BlockingSubmitThreadPoolExecutor(
+ configuration.getRestoreThreads(),
+ downloadQueue,
+ configuration.getDownloadTimeout());
+ }
+
+ @Override
+ public Future asyncDownloadFile(final AbstractBackupPath path, final int retry)
+ throws RejectedExecutionException {
+ return fileDownloadExecutor.submit(
+ () -> {
+ downloadFile(path, "" /* suffix */, retry);
+ return Paths.get(path.getRemotePath());
+ });
+ }
+
+ @Override
+ public void downloadFile(final AbstractBackupPath path, String suffix, final int retry)
+ throws BackupRestoreException {
+ // TODO: Should we download the file if localPath already exists?
+ String remotePath = path.getRemotePath();
+ String localPath = path.newRestoreFile().getAbsolutePath() + suffix;
+ logger.info("Downloading file: {} to location: {}", path.getRemotePath(), localPath);
+ try {
+ new BoundedExponentialRetryCallable(500, 10000, retry) {
+ @Override
+ public Void retriableCall() throws Exception {
+ downloadFileImpl(path, suffix);
+ return null;
+ }
+ }.call();
+ // Note we only downloaded the bytes which are represented on file system (they are
+ // compressed and maybe encrypted).
+ // File size after decompression or decryption might be more/less.
+ backupMetrics.recordDownloadRate(getFileSize(remotePath));
+ backupMetrics.incrementValidDownloads();
+ logger.info("Successfully downloaded file: {} to location: {}", remotePath, localPath);
+ } catch (Exception e) {
+ backupMetrics.incrementInvalidDownloads();
+ logger.error("Error while downloading file: {} to location: {}", remotePath, localPath);
+ throw new BackupRestoreException(e.getMessage());
+ }
+ }
+
+ protected abstract void downloadFileImpl(final AbstractBackupPath path, String suffix)
+ throws BackupRestoreException;
+
+ @Override
+ public ListenableFuture uploadAndDelete(
+ final AbstractBackupPath path, Instant target, boolean async)
+ throws RejectedExecutionException, BackupRestoreException {
+ if (async) {
+ return fileUploadExecutor.submit(
+ () -> uploadAndDeleteInternal(path, target, 10 /* retries */));
+ } else {
+ return Futures.immediateFuture(uploadAndDeleteInternal(path, target, 10 /* retries */));
+ }
+ }
+
+ @VisibleForTesting
+ public AbstractBackupPath uploadAndDeleteInternal(
+ final AbstractBackupPath path, Instant target, int retry)
+ throws RejectedExecutionException, BackupRestoreException {
+ Path localPath = Paths.get(path.getBackupFile().getAbsolutePath());
+ File localFile = localPath.toFile();
+ Preconditions.checkArgument(
+ localFile.exists(), String.format("Can't upload nonexistent %s", localPath));
+ Preconditions.checkArgument(
+ !localFile.isDirectory(),
+ String.format("Can only upload files %s is a directory", localPath));
+ Path remotePath = Paths.get(path.getRemotePath());
+
+ if (tasksQueued.add(localPath)) {
+ logger.info("Uploading file: {} to location: {}", localPath, remotePath);
+ try {
+ long uploadedFileSize;
+
+ // Upload file if it not present at remote location.
+ if (path.getType() != BackupFileType.SST_V2 || !checkObjectExists(remotePath)) {
+ notifyEventStart(new BackupEvent(path));
+ uploadedFileSize =
+ new BoundedExponentialRetryCallable(
+ 500 /* minSleep */, 10000 /* maxSleep */, retry) {
+ @Override
+ public Long retriableCall() throws Exception {
+ return uploadFileImpl(path, target);
+ }
+ }.call();
+
+ // Add to cache after successful upload.
+ // We only add SST_V2 as other file types are usually not checked, so no point
+ // evicting our SST_V2 results.
+ if (path.getType() == BackupFileType.SST_V2) addObjectCache(remotePath);
+
+ backupMetrics.recordUploadRate(uploadedFileSize);
+ backupMetrics.incrementValidUploads();
+ path.setCompressedFileSize(uploadedFileSize);
+ notifyEventSuccess(new BackupEvent(path));
+ } else {
+ // file is already uploaded to remote file system.
+ logger.info("File: {} already present on remoteFileSystem.", remotePath);
+ }
+
+ logger.info(
+ "Successfully uploaded file: {} to location: {}", localPath, remotePath);
+
+ if (!FileUtils.deleteQuietly(localFile))
+ logger.warn(
+ String.format(
+ "Failed to delete local file %s.",
+ localFile.getAbsolutePath()));
+
+ } catch (Exception e) {
+ backupMetrics.incrementInvalidUploads();
+ notifyEventFailure(new BackupEvent(path));
+ logger.error(
+ "Error while uploading file: {} to location: {}. Exception: Msg: [{}], Trace: {}",
+ localPath,
+ remotePath,
+ e.getMessage(),
+ e.getStackTrace());
+ throw new BackupRestoreException(e.getMessage());
+ } finally {
+ // Remove the task from the list so if we try to upload file ever again, we can.
+ tasksQueued.remove(localPath);
+ }
+ } else logger.info("Already in queue, no-op. File: {}", localPath);
+ return path;
+ }
+
+ private void addObjectCache(Path remotePath) {
+ objectCache.put(remotePath, Boolean.TRUE);
+ }
+
+ @Override
+ public boolean checkObjectExists(Path remotePath) {
+ // Check in cache, if remote file exists.
+ Boolean cacheResult = objectCache.getIfPresent(remotePath);
+
+ // Cache hit. Return the value.
+ if (cacheResult != null) return cacheResult;
+
+ // Cache miss - Check remote file system if object exist.
+ boolean remoteFileExist = doesRemoteFileExist(remotePath);
+
+ if (remoteFileExist) addObjectCache(remotePath);
+
+ return remoteFileExist;
+ }
+
+ @Override
+ public void deleteRemoteFiles(List remotePaths) throws BackupRestoreException {
+ if (remotePaths == null) return;
+
+ // Note that we are trying to implement write-thru cache here so it is good idea to
+ // invalidate the cache first. This is important so that if there is any issue (because file
+ // was deleted), it is caught by our snapshot job we can re-upload the file. This will also
+ // help in ensuring that our validation job fails if there are any error caused due to TTL
+ // of a file.
+ objectCache.invalidateAll(remotePaths);
+ deleteFiles(remotePaths);
+ }
+
+ protected abstract void deleteFiles(List remotePaths) throws BackupRestoreException;
+
+ protected abstract boolean doesRemoteFileExist(Path remotePath);
+
+ protected abstract long uploadFileImpl(final AbstractBackupPath path, Instant target)
+ throws BackupRestoreException;
+
+ @Override
+ public String getShard() {
+ return getPrefix().getName(0).toString();
+ }
+
+ @Override
+ public Path getPrefix() {
+ Path prefix = Paths.get(configuration.getBackupPrefix());
+
+ if (StringUtils.isNotBlank(configuration.getRestorePrefix())) {
+ prefix = Paths.get(configuration.getRestorePrefix());
+ }
+
+ return prefix;
+ }
+
+ @Override
+ public Iterator listPrefixes(Date date) {
+ String prefix = pathProvider.get().clusterPrefix(getPrefix().toString());
+ Iterator fileIterator = listFileSystem(prefix, File.pathSeparator, null);
+
+ //noinspection unchecked
+ return new TransformIterator(
+ fileIterator,
+ remotePath -> {
+ AbstractBackupPath abstractBackupPath = pathProvider.get();
+ abstractBackupPath.parsePartialPrefix(remotePath.toString());
+ return abstractBackupPath;
+ });
+ }
+
+ @Override
+ public Iterator list(String path, Date start, Date till) {
+ String prefix = pathProvider.get().remotePrefix(start, till, path);
+ Iterator fileIterator = listFileSystem(prefix, null, null);
+
+ @SuppressWarnings("unchecked")
+ TransformIterator transformIterator =
+ new TransformIterator(
+ fileIterator,
+ remotePath -> {
+ AbstractBackupPath abstractBackupPath = pathProvider.get();
+ abstractBackupPath.parseRemote(remotePath.toString());
+ return abstractBackupPath;
+ });
+
+ return new FilterIterator<>(
+ transformIterator,
+ abstractBackupPath ->
+ (abstractBackupPath.getTime().after(start)
+ && abstractBackupPath.getTime().before(till))
+ || abstractBackupPath.getTime().equals(start));
+ }
+
+ @Override
+ public final void addObserver(EventObserver observer) {
+ if (observer == null) throw new NullPointerException("observer must not be null.");
+
+ observers.addIfAbsent(observer);
+ }
+
+ @Override
+ public void removeObserver(EventObserver observer) {
+ if (observer == null) throw new NullPointerException("observer must not be null.");
+
+ observers.remove(observer);
+ }
+
+ @Override
+ public void notifyEventStart(BackupEvent event) {
+ observers.forEach(eventObserver -> eventObserver.updateEventStart(event));
+ }
+
+ @Override
+ public void notifyEventSuccess(BackupEvent event) {
+ observers.forEach(eventObserver -> eventObserver.updateEventSuccess(event));
+ }
+
+ @Override
+ public void notifyEventFailure(BackupEvent event) {
+ observers.forEach(eventObserver -> eventObserver.updateEventFailure(event));
+ }
+
+ @Override
+ public void notifyEventStop(BackupEvent event) {
+ observers.forEach(eventObserver -> eventObserver.updateEventStop(event));
+ }
+
+ @Override
+ public int getUploadTasksQueued() {
+ return tasksQueued.size();
+ }
+
+ @Override
+ public int getDownloadTasksQueued() {
+ return fileDownloadExecutor.getQueue().size();
+ }
+
+ @Override
+ public void clearCache() {
+ objectCache.invalidateAll();
+ }
+}
diff --git a/priam/src/main/java/com/netflix/priam/backup/BackupDynamicRateLimiter.java b/priam/src/main/java/com/netflix/priam/backup/BackupDynamicRateLimiter.java
new file mode 100644
index 000000000..4b301e321
--- /dev/null
+++ b/priam/src/main/java/com/netflix/priam/backup/BackupDynamicRateLimiter.java
@@ -0,0 +1,52 @@
+package com.netflix.priam.backup;
+
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.RateLimiter;
+import com.netflix.priam.config.IConfiguration;
+import java.time.Clock;
+import java.time.Duration;
+import java.time.Instant;
+import javax.inject.Inject;
+
+public class BackupDynamicRateLimiter implements DynamicRateLimiter {
+
+ private final Clock clock;
+ private final IConfiguration config;
+ private final DirectorySize dirSize;
+ private final RateLimiter rateLimiter;
+
+ @Inject
+ BackupDynamicRateLimiter(IConfiguration config, Clock clock, DirectorySize dirSize) {
+ this.clock = clock;
+ this.config = config;
+ this.dirSize = dirSize;
+ this.rateLimiter = RateLimiter.create(Double.MAX_VALUE);
+ }
+
+ @Override
+ public void acquire(AbstractBackupPath path, Instant target, int permits) {
+ if (target.equals(Instant.EPOCH)
+ || !path.getBackupFile()
+ .getAbsolutePath()
+ .contains(AbstractBackup.SNAPSHOT_FOLDER)) {
+ return;
+ }
+ long secondsRemaining = Duration.between(clock.instant(), target).getSeconds();
+ if (secondsRemaining < 1) {
+ // skip file system checks when unnecessary
+ return;
+ }
+ int backupThreads = config.getBackupThreads();
+ Preconditions.checkState(backupThreads > 0);
+ long bytesPerThread = this.dirSize.getBytes(config.getDataFileLocation()) / backupThreads;
+ if (bytesPerThread < 1) {
+ return;
+ }
+ double newRate = (double) bytesPerThread / secondsRemaining;
+ double oldRate = rateLimiter.getRate();
+ if ((Math.abs(newRate - oldRate) / oldRate) > config.getRateLimitChangeThreshold()) {
+ rateLimiter.setRate(newRate);
+ }
+ rateLimiter.acquire(permits);
+ }
+}
diff --git a/priam/src/main/java/com/netflix/priam/backup/BackupFileSystemAdapter.java b/priam/src/main/java/com/netflix/priam/backup/BackupFileSystemAdapter.java
deleted file mode 100644
index 075677523..000000000
--- a/priam/src/main/java/com/netflix/priam/backup/BackupFileSystemAdapter.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- * Copyright 2017 Netflix, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.netflix.priam.backup;
-
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.util.Date;
-import java.util.Iterator;
-
-public abstract class BackupFileSystemAdapter implements IBackupFileSystem {
-
- public void download(AbstractBackupPath path, OutputStream os) throws BackupRestoreException {
- }
-
- public void upload(AbstractBackupPath path, InputStream in) throws BackupRestoreException {
- }
-
- public Iterator list(String path, Date start, Date till) {
- return null;
- }
-
- public Iterator listPrefixes(Date date) {
- return null;
- }
-
- public void cleanup() {
- }
-
- public int getActivecount() {
- return 0;
- }
-
- public void shutdown() {
- }
-}
diff --git a/priam/src/main/java/com/netflix/priam/backup/BackupFileSystemContext.java b/priam/src/main/java/com/netflix/priam/backup/BackupFileSystemContext.java
index 0db3d72df..c8b319258 100755
--- a/priam/src/main/java/com/netflix/priam/backup/BackupFileSystemContext.java
+++ b/priam/src/main/java/com/netflix/priam/backup/BackupFileSystemContext.java
@@ -1,16 +1,14 @@
/**
* Copyright 2017 Netflix, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
+ *
+ *
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ *
http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *
Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.backup;
@@ -24,7 +22,9 @@ public class BackupFileSystemContext implements IFileSystemContext {
private IBackupFileSystem fs = null, encryptedFs = null;
@Inject
- public BackupFileSystemContext(@Named("backup") IBackupFileSystem fs, @Named("encryptedbackup") IBackupFileSystem encryptedFs) {
+ public BackupFileSystemContext(
+ @Named("backup") IBackupFileSystem fs,
+ @Named("encryptedbackup") IBackupFileSystem encryptedFs) {
this.fs = fs;
this.encryptedFs = encryptedFs;
@@ -41,4 +41,4 @@ public IBackupFileSystem getFileStrategy(IConfiguration config) {
return this.encryptedFs;
}
}
-}
\ No newline at end of file
+}
diff --git a/priam/src/main/java/com/netflix/priam/backup/BackupFolder.java b/priam/src/main/java/com/netflix/priam/backup/BackupFolder.java
new file mode 100644
index 000000000..14a4cf6b1
--- /dev/null
+++ b/priam/src/main/java/com/netflix/priam/backup/BackupFolder.java
@@ -0,0 +1,18 @@
+package com.netflix.priam.backup;
+
+import java.util.Arrays;
+import java.util.Optional;
+
+public enum BackupFolder {
+ SNAPSHOTS("snapshots"),
+ BACKUPS("backups");
+ private String name;
+
+ BackupFolder(String name) {
+ this.name = name;
+ }
+
+ public static Optional fromName(String name) {
+ return Arrays.stream(values()).filter(b -> b.name.equals(name)).findFirst();
+ }
+}
diff --git a/priam/src/main/java/com/netflix/priam/backup/BackupHelper.java b/priam/src/main/java/com/netflix/priam/backup/BackupHelper.java
new file mode 100644
index 000000000..38a1e98b1
--- /dev/null
+++ b/priam/src/main/java/com/netflix/priam/backup/BackupHelper.java
@@ -0,0 +1,29 @@
+package com.netflix.priam.backup;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.inject.ImplementedBy;
+import java.io.File;
+import java.io.IOException;
+import java.time.Instant;
+
+@ImplementedBy(BackupHelperImpl.class)
+public interface BackupHelper {
+
+ default ImmutableList> uploadAndDeleteAllFiles(
+ final File parent, final AbstractBackupPath.BackupFileType type, boolean async)
+ throws Exception {
+ return uploadAndDeleteAllFiles(parent, type, Instant.EPOCH, async);
+ }
+
+ ImmutableList> uploadAndDeleteAllFiles(
+ final File parent,
+ final AbstractBackupPath.BackupFileType type,
+ Instant target,
+ boolean async)
+ throws Exception;
+
+ ImmutableSet getBackupPaths(
+ File dir, AbstractBackupPath.BackupFileType type) throws IOException;
+}
diff --git a/priam/src/main/java/com/netflix/priam/backup/BackupHelperImpl.java b/priam/src/main/java/com/netflix/priam/backup/BackupHelperImpl.java
new file mode 100644
index 000000000..c51de5e0a
--- /dev/null
+++ b/priam/src/main/java/com/netflix/priam/backup/BackupHelperImpl.java
@@ -0,0 +1,112 @@
+package com.netflix.priam.backup;
+
+import static java.util.stream.Collectors.toSet;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.inject.Inject;
+import com.google.inject.Provider;
+import com.netflix.priam.compress.CompressionType;
+import com.netflix.priam.config.BackupsToCompress;
+import com.netflix.priam.config.IConfiguration;
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.time.Instant;
+import java.util.Set;
+import java.util.stream.Stream;
+
+public class BackupHelperImpl implements BackupHelper {
+
+ private static final String COMPRESSION_SUFFIX = "-CompressionInfo.db";
+ private static final String DATA_SUFFIX = "-Data.db";
+ private final Provider pathFactory;
+ private final IBackupFileSystem fs;
+ private final IConfiguration config;
+
+ @Inject
+ public BackupHelperImpl(
+ IConfiguration config,
+ IFileSystemContext backupFileSystemCtx,
+ Provider pathFactory) {
+ this.config = config;
+ this.pathFactory = pathFactory;
+ this.fs = backupFileSystemCtx.getFileStrategy(config);
+ }
+
+ /**
+ * Upload files in the specified dir. Does not delete the file in case of error. The files are
+ * uploaded serially or async based on flag provided.
+ *
+ * @param parent Parent dir
+ * @param type Type of file (META, SST, SNAP etc)
+ * @param async Upload the file(s) in async fashion if enabled.
+ * @return List of files that are successfully uploaded as part of backup
+ * @throws Exception when there is failure in uploading files.
+ */
+ @Override
+ public ImmutableList> uploadAndDeleteAllFiles(
+ final File parent,
+ final AbstractBackupPath.BackupFileType type,
+ Instant target,
+ boolean async)
+ throws Exception {
+ final ImmutableList.Builder> futures =
+ ImmutableList.builder();
+ for (AbstractBackupPath bp : getBackupPaths(parent, type)) {
+ futures.add(fs.uploadAndDelete(bp, target, async));
+ }
+ return futures.build();
+ }
+
+ @Override
+ public ImmutableSet getBackupPaths(
+ File dir, AbstractBackupPath.BackupFileType type) throws IOException {
+ Set files;
+ try (Stream pathStream = Files.list(dir.toPath())) {
+ files = pathStream.map(Path::toFile).filter(File::isFile).collect(toSet());
+ }
+ Set compressedFilePrefixes =
+ files.stream()
+ .map(File::getName)
+ .filter(name -> name.endsWith(COMPRESSION_SUFFIX))
+ .map(name -> name.substring(0, name.lastIndexOf('-')))
+ .collect(toSet());
+ final ImmutableSet.Builder bps = ImmutableSet.builder();
+ ImmutableSet.Builder dataFiles = ImmutableSet.builder();
+ for (File file : files) {
+ final AbstractBackupPath bp = pathFactory.get();
+ bp.parseLocal(file, type);
+ bp.setCompression(getCorrectCompressionAlgorithm(bp, compressedFilePrefixes));
+ (file.getAbsolutePath().endsWith(DATA_SUFFIX) ? dataFiles : bps).add(bp);
+ }
+ bps.addAll(dataFiles.build());
+ return bps.build();
+ }
+
+ private CompressionType getCorrectCompressionAlgorithm(
+ AbstractBackupPath path, Set compressedFiles) {
+ if (!AbstractBackupPath.BackupFileType.isV2(path.getType())
+ || path.getLastModified().toEpochMilli()
+ < config.getCompressionTransitionEpochMillis()) {
+ return CompressionType.SNAPPY;
+ }
+ String file = path.getFileName();
+ BackupsToCompress which = config.getBackupsToCompress();
+ switch (which) {
+ case NONE:
+ return CompressionType.NONE;
+ case ALL:
+ return CompressionType.SNAPPY;
+ case IF_REQUIRED:
+ int splitIndex = file.lastIndexOf('-');
+ return splitIndex >= 0 && compressedFiles.contains(file.substring(0, splitIndex))
+ ? CompressionType.NONE
+ : CompressionType.SNAPPY;
+ default:
+ throw new IllegalArgumentException("NONE, ALL, UNCOMPRESSED only. Saw: " + which);
+ }
+ }
+}
diff --git a/priam/src/main/java/com/netflix/priam/backup/BackupMetadata.java b/priam/src/main/java/com/netflix/priam/backup/BackupMetadata.java
index e6101c796..295078721 100644
--- a/priam/src/main/java/com/netflix/priam/backup/BackupMetadata.java
+++ b/priam/src/main/java/com/netflix/priam/backup/BackupMetadata.java
@@ -1,52 +1,51 @@
/**
* Copyright 2017 Netflix, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
+ *
+ *
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ *
http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *
Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.backup;
-
import com.netflix.priam.utils.DateUtil;
import com.netflix.priam.utils.GsonJsonSerializer;
+import java.io.Serializable;
+import java.util.Date;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import java.io.Serializable;
-import java.util.Date;
-
-/**
- * POJO to encapsulate the metadata for a snapshot
- * Created by aagrawal on 1/31/17.
- */
-
-final public class BackupMetadata implements Serializable {
+/** POJO to encapsulate the metadata for a snapshot Created by aagrawal on 1/31/17. */
+public final class BackupMetadata implements Serializable {
private static final Logger logger = LoggerFactory.getLogger(BackupMetadata.class);
private String snapshotDate;
private String token;
private Date start, completed;
private Status status;
+ private boolean cassandraSnapshotSuccess;
+ private Date lastValidated;
+ private BackupVersion backupVersion;
private String snapshotLocation;
- public BackupMetadata(String token, Date start) throws Exception {
+ public BackupMetadata(BackupVersion backupVersion, String token, Date start) throws Exception {
if (start == null || token == null || StringUtils.isEmpty(token))
- throw new Exception(String.format("Invalid Input: Token: {} or start date:{} is null or empty.", token, start));
-
+ throw new Exception(
+ String.format(
+ "Invalid Input: Token: %s or start date: %s is null or empty.",
+ token, start));
+ this.backupVersion = backupVersion;
this.snapshotDate = DateUtil.formatyyyyMMdd(start);
this.token = token;
this.start = start;
this.status = Status.STARTED;
+ this.cassandraSnapshotSuccess = false;
}
@Override
@@ -56,9 +55,10 @@ public boolean equals(Object o) {
BackupMetadata that = (BackupMetadata) o;
- if (!this.snapshotDate.equals(that.snapshotDate)) return false;
- if (!this.token.equals(that.token)) return false;
- return this.start.equals(that.start);
+ return this.snapshotDate.equals(that.snapshotDate)
+ && this.token.equals(that.token)
+ && this.start.equals(that.start)
+ && this.backupVersion.equals(that.backupVersion);
}
@Override
@@ -66,6 +66,7 @@ public int hashCode() {
int result = this.snapshotDate.hashCode();
result = 31 * result + this.token.hashCode();
result = 31 * result + this.start.hashCode();
+ result = 31 * result + this.backupVersion.hashCode();
return result;
}
@@ -101,7 +102,6 @@ public Date getStart() {
*
* @return completion date of snapshot.
*/
-
public Date getCompleted() {
return this.completed;
}
@@ -151,9 +151,56 @@ public void setSnapshotLocation(String snapshotLocation) {
this.snapshotLocation = snapshotLocation;
}
+ /**
+ * Find if cassandra snapshot was successful or not. This is a JMX operation and it is possible
+ * that this operation failed.
+ *
+ * @return cassandra snapshot status.
+ */
+ public boolean isCassandraSnapshotSuccess() {
+ return cassandraSnapshotSuccess;
+ }
+
+ /**
+ * Set the cassandra snapshot status to be either finished successfully or fail.
+ *
+ * @param cassandraSnapshotSuccess is set to success if JMX operation for snapshot is
+ * successful.
+ */
+ public void setCassandraSnapshotSuccess(boolean cassandraSnapshotSuccess) {
+ this.cassandraSnapshotSuccess = cassandraSnapshotSuccess;
+ }
+
+ /**
+ * Get the backup version for the snapshot.
+ *
+ * @return backup version of the snapshot.
+ */
+ public BackupVersion getBackupVersion() {
+ return backupVersion;
+ }
+
+ /**
+ * Return the last validation timestamp of this backup metadata. Validation of backup implies
+ * finding if all the files are successfully stored in remote file system.
+ *
+ * @return date of last backup validation.
+ */
+ public Date getLastValidated() {
+ return lastValidated;
+ }
+
+ /**
+ * Set the last validation date of backup metadata.
+ *
+ * @param lastValidated date value of backup validation.
+ */
+ public void setLastValidated(Date lastValidated) {
+ this.lastValidated = lastValidated;
+ }
+
@Override
- public String toString()
- {
+ public String toString() {
return GsonJsonSerializer.getGson().toJson(this);
}
}
diff --git a/priam/src/main/java/com/netflix/priam/backup/BackupRestoreException.java b/priam/src/main/java/com/netflix/priam/backup/BackupRestoreException.java
index 8f5321154..356ad2d7a 100644
--- a/priam/src/main/java/com/netflix/priam/backup/BackupRestoreException.java
+++ b/priam/src/main/java/com/netflix/priam/backup/BackupRestoreException.java
@@ -27,5 +27,4 @@ public BackupRestoreException(String message) {
public BackupRestoreException(String message, Exception e) {
super(message, e);
}
-
}
diff --git a/priam/src/main/java/com/netflix/priam/backup/BackupRestoreUtil.java b/priam/src/main/java/com/netflix/priam/backup/BackupRestoreUtil.java
index 37fcc9179..2293a2694 100644
--- a/priam/src/main/java/com/netflix/priam/backup/BackupRestoreUtil.java
+++ b/priam/src/main/java/com/netflix/priam/backup/BackupRestoreUtil.java
@@ -19,179 +19,169 @@
import com.google.common.collect.ImmutableMap;
import com.google.inject.Inject;
+import com.google.inject.Provider;
+import com.netflix.priam.backupv2.IMetaProxy;
+import com.netflix.priam.backupv2.MetaV2Proxy;
+import com.netflix.priam.utils.DateUtil;
+import java.nio.file.Path;
+import java.time.Instant;
+import java.util.*;
+import java.util.regex.Pattern;
+import java.util.stream.Collectors;
+import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import java.util.*;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-/**
- * Created by aagrawal on 8/14/17.
- */
+/** Created by aagrawal on 8/14/17. */
public class BackupRestoreUtil {
private static final Logger logger = LoggerFactory.getLogger(BackupRestoreUtil.class);
- private static final String JOBNAME = "BackupRestoreUtil";
-
- private final Map> columnFamilyFilter = new HashMap<>(); //key: keyspace, value: a list of CFs within the keyspace
- private final Map keyspaceFilter = new HashMap<>(); //key: keyspace, value: null
+ private static final Pattern columnFamilyFilterPattern = Pattern.compile(".\\..");
+ private Map> includeFilter;
+ private Map> excludeFilter;
- private final Pattern columnFamilyFilterPattern = Pattern.compile(".\\..");
- private String configKeyspaceFilter;
- private String configColumnfamilyFilter;
-
- public static final List FILTER_KEYSPACE = Arrays.asList("OpsCenter");
- private static final Map> FILTER_COLUMN_FAMILY = ImmutableMap.of("system", Arrays.asList("local", "peers", "compactions_in_progress", "LocationInfo"));
+ public static final List FILTER_KEYSPACE = Collections.singletonList("OpsCenter");
+ private static final Map> FILTER_COLUMN_FAMILY =
+ ImmutableMap.of(
+ "system",
+ Arrays.asList(
+ "local", "peers", "hints", "compactions_in_progress", "LocationInfo"));
@Inject
- public BackupRestoreUtil(String configKeyspaceFilter, String configColumnfamilyFilter) {
- setFilters(configKeyspaceFilter, configColumnfamilyFilter);
+ public BackupRestoreUtil(String configIncludeFilter, String configExcludeFilter) {
+ setFilters(configIncludeFilter, configExcludeFilter);
}
- private BackupRestoreUtil setFilters(String configKeyspaceFilter, String configColumnfamilyFilter) {
- this.configColumnfamilyFilter = configColumnfamilyFilter;
- this.configKeyspaceFilter = configKeyspaceFilter;
- populateFilters();
+ public BackupRestoreUtil setFilters(String configIncludeFilter, String configExcludeFilter) {
+ includeFilter = getFilter(configIncludeFilter);
+ excludeFilter = getFilter(configExcludeFilter);
+ logger.info("Exclude filter set: {}", configExcludeFilter);
+ logger.info("Include filter set: {}", configIncludeFilter);
return this;
}
- /**
- * Search for "1:* alphanumeric chars including special chars""literal period"" 1:* alphanumeric chars including special chars"
- *
- * @param cfFilter input string
- * @return true if input string matches search pattern; otherwise, false
- */
- private final boolean isValidCFFilterFormat(String cfFilter) {
- return columnFamilyFilterPattern.matcher(cfFilter).find();
- }
+ public static Optional getLatestValidMetaPath(
+ IMetaProxy metaProxy, DateUtil.DateRange dateRange) {
+ // Get a list of manifest files.
+ List metas = metaProxy.findMetaFiles(dateRange);
- /**
- * Populate the filters for backup/restore as configured for internal use.
- */
- private final void populateFilters() {
- //Clear the filters as we will (re)populate the filters.
- keyspaceFilter.clear();
- columnFamilyFilter.clear();
-
- if (configKeyspaceFilter == null || configKeyspaceFilter.isEmpty()) {
- logger.info("No keyspace filter set for {}.", JOBNAME);
- } else {
- String[] keyspaces = configKeyspaceFilter.split(",");
- for (int i = 0; i < keyspaces.length; i++) {
- logger.info("Adding {} keyspace filter: {}", JOBNAME, keyspaces[i]);
- this.keyspaceFilter.put(keyspaces[i], null);
+ // Find a valid manifest file.
+ for (AbstractBackupPath meta : metas) {
+ BackupVerificationResult result = metaProxy.isMetaFileValid(meta);
+ if (result.valid) {
+ return Optional.of(meta);
}
-
}
- if (configColumnfamilyFilter == null || configColumnfamilyFilter.isEmpty()) {
-
- logger.info("No column family filter set for {}.", JOBNAME);
-
- } else {
+ return Optional.empty();
+ }
- String[] filters = configColumnfamilyFilter.split(",");
- for (int i = 0; i < filters.length; i++) { //process each filter
- if (isValidCFFilterFormat(filters[i])) {
+ public static List getAllFiles(
+ AbstractBackupPath latestValidMetaFile,
+ DateUtil.DateRange dateRange,
+ IMetaProxy metaProxy,
+ Provider pathProvider)
+ throws Exception {
+ // Download the meta.json file.
+ Path metaFile = metaProxy.downloadMetaFile(latestValidMetaFile);
+ // Parse meta.json file to find the files required to download from this snapshot.
+ List allFiles =
+ metaProxy
+ .getSSTFilesFromMeta(metaFile)
+ .stream()
+ .map(
+ value -> {
+ AbstractBackupPath path = pathProvider.get();
+ path.parseRemote(value);
+ return path;
+ })
+ .collect(Collectors.toList());
+
+ FileUtils.deleteQuietly(metaFile.toFile());
+
+ // Download incremental SSTables after the snapshot meta file.
+ Instant snapshotTime;
+ if (metaProxy instanceof MetaV2Proxy) snapshotTime = latestValidMetaFile.getLastModified();
+ else snapshotTime = latestValidMetaFile.getTime().toInstant();
+
+ DateUtil.DateRange incrementalDateRange =
+ new DateUtil.DateRange(snapshotTime, dateRange.getEndTime());
+ Iterator incremental = metaProxy.getIncrementals(incrementalDateRange);
+ while (incremental.hasNext()) allFiles.add(incremental.next());
+
+ return allFiles;
+ }
- String[] filter = filters[i].split("\\.");
- String ksName = filter[0];
- String cfName = filter[1];
- logger.info("Adding {} CF filter: {}.{}", JOBNAME, ksName, cfName);
+ public static final Map> getFilter(String inputFilter)
+ throws IllegalArgumentException {
+ if (StringUtils.isEmpty(inputFilter)) return null;
- if (this.columnFamilyFilter.containsKey(ksName)) {
- //add cf to existing filter
- List columnfamilies = this.columnFamilyFilter.get(ksName);
- columnfamilies.add(cfName);
- this.columnFamilyFilter.put(ksName, columnfamilies);
+ final Map> columnFamilyFilter =
+ new HashMap<>(); // key: keyspace, value: a list of CFs within the keyspace
- } else {
+ String[] filters = inputFilter.split(",");
+ for (String cfFilter :
+ filters) { // process filter of form keyspace.* or keyspace.columnfamily
+ if (columnFamilyFilterPattern.matcher(cfFilter).find()) {
- List cfs = new ArrayList();
- cfs.add(cfName);
- this.columnFamilyFilter.put(ksName, cfs);
+ String[] filter = cfFilter.split("\\.");
+ String keyspaceName = filter[0];
+ String columnFamilyName = filter[1];
- }
+ if (columnFamilyName.contains("-"))
+ columnFamilyName = columnFamilyName.substring(0, columnFamilyName.indexOf("-"));
- } else {
- throw new IllegalArgumentException("Column family filter format is not valid. Format needs to be \"keyspace.columnfamily\". Invalid input: " + filters[i]);
- }
- } //end processing each filter
+ List existingCfs =
+ columnFamilyFilter.getOrDefault(keyspaceName, new ArrayList<>());
+ if (!columnFamilyName.equalsIgnoreCase("*")) existingCfs.add(columnFamilyName);
+ columnFamilyFilter.put(keyspaceName, existingCfs);
+ } else {
+ throw new IllegalArgumentException(
+ "Column family filter format is not valid. Format needs to be \"keyspace.columnfamily\". Invalid input: "
+ + cfFilter);
+ }
}
+ return columnFamilyFilter;
}
/**
* Returns if provided keyspace and/or columnfamily is filtered for backup or restore.
+ *
* @param keyspace name of the keyspace in consideration
* @param columnFamilyDir name of the columnfamily directory in consideration
* @return true if directory should be filter from processing; otherwise, false.
*/
- public final boolean isFiltered(String keyspace, String columnFamilyDir){
- if (StringUtils.isEmpty(keyspace) || StringUtils.isEmpty(columnFamilyDir))
- return false;
+ public final boolean isFiltered(String keyspace, String columnFamilyDir) {
+ if (StringUtils.isEmpty(keyspace) || StringUtils.isEmpty(columnFamilyDir)) return false;
String columnFamilyName = columnFamilyDir.split("-")[0];
- //column family is in list of global CF filter
- if (FILTER_COLUMN_FAMILY.containsKey(keyspace) && FILTER_COLUMN_FAMILY.get(keyspace).contains(columnFamilyName))
- return true;
-
- if (isFiltered(BackupRestoreUtil.DIRECTORYTYPE.KEYSPACE, keyspace) || //keyspace is filtered
- isFiltered(BackupRestoreUtil.DIRECTORYTYPE.CF, keyspace, columnFamilyDir) //columnfamily is filtered
- ) {
- logger.debug("Skipping: keyspace: {}, CF: {} is part of filter list.", keyspace, columnFamilyName);
- return true;
- }
-
- return false;
- }
-
- /**
- * @param directoryType keyspace or columnfamily directory type.
- * @return true if directory should be filter from processing; otherwise, false.
- */
- private final boolean isFiltered(DIRECTORYTYPE directoryType, String... args) {
-
- if (directoryType.equals(DIRECTORYTYPE.KEYSPACE)) { //start with filtering the parent (keyspace)
- //Apply each keyspace filter to input string
- String keyspaceName = args[0];
-
- java.util.Set ksFilters = keyspaceFilter.keySet();
- Iterator it = ksFilters.iterator();
- while (it.hasNext()) {
- String ksFilter = it.next();
- Pattern pattern = Pattern.compile(ksFilter);
- Matcher matcher = pattern.matcher(keyspaceName);
- if (matcher.find()) {
- logger.debug("Keyspace: {} matched filter: {}", keyspaceName, ksFilter);
- return true;
- }
+ // column family is in list of global CF filter
+ if (FILTER_COLUMN_FAMILY.containsKey(keyspace)
+ && FILTER_COLUMN_FAMILY.get(keyspace).contains(columnFamilyName)) return true;
+
+ if (excludeFilter != null)
+ if (excludeFilter.containsKey(keyspace)
+ && (excludeFilter.get(keyspace).isEmpty()
+ || excludeFilter.get(keyspace).contains(columnFamilyName))) {
+ logger.debug(
+ "Skipping: keyspace: {}, CF: {} is part of exclude list.",
+ keyspace,
+ columnFamilyName);
+ return true;
}
- }
- if (directoryType.equals(DIRECTORYTYPE.CF)) { //parent (keyspace) is not filtered, now see if the child (CF) is filtered
- String keyspaceName = args[0];
- if (!columnFamilyFilter.containsKey(keyspaceName)) {
- return false;
+ if (includeFilter != null)
+ if (!(includeFilter.containsKey(keyspace)
+ && (includeFilter.get(keyspace).isEmpty()
+ || includeFilter.get(keyspace).contains(columnFamilyName)))) {
+ logger.debug(
+ "Skipping: keyspace: {}, CF: {} is not part of include list.",
+ keyspace,
+ columnFamilyName);
+ return true;
}
- String cfName = args[1];
- List cfsFilter = columnFamilyFilter.get(keyspaceName);
- for (int i = 0; i < cfsFilter.size(); i++) {
- Pattern pattern = Pattern.compile(cfsFilter.get(i));
- Matcher matcher = pattern.matcher(cfName);
- if (matcher.find()) {
- logger.debug("{}.{} matched filter", keyspaceName, cfName);
- return true;
- }
- }
- }
-
- return false; //if here, current input are not part of keyspae and cf filters
- }
-
- public enum DIRECTORYTYPE {
- KEYSPACE, CF
+ return false;
}
}
diff --git a/priam/src/main/java/com/netflix/priam/backup/BackupService.java b/priam/src/main/java/com/netflix/priam/backup/BackupService.java
new file mode 100644
index 000000000..64c750f8d
--- /dev/null
+++ b/priam/src/main/java/com/netflix/priam/backup/BackupService.java
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2019 Netflix, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.netflix.priam.backup;
+
+import com.google.inject.Inject;
+import com.netflix.priam.aws.UpdateCleanupPolicy;
+import com.netflix.priam.config.IBackupRestoreConfig;
+import com.netflix.priam.config.IConfiguration;
+import com.netflix.priam.defaultimpl.IService;
+import com.netflix.priam.scheduler.PriamScheduler;
+import com.netflix.priam.scheduler.TaskTimer;
+import com.netflix.priam.tuner.CassandraTunerService;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Encapsulate the backup service 1.0 - Execute all the tasks required to run backup service.
+ *
+ * Created by aagrawal on 3/9/19.
+ */
+public class BackupService implements IService {
+ private final PriamScheduler scheduler;
+ private final IConfiguration config;
+ private final IBackupRestoreConfig backupRestoreConfig;
+ private final CassandraTunerService cassandraTunerService;
+ private static final Logger logger = LoggerFactory.getLogger(BackupService.class);
+
+ @Inject
+ public BackupService(
+ IConfiguration config,
+ IBackupRestoreConfig backupRestoreConfig,
+ PriamScheduler priamScheduler,
+ CassandraTunerService cassandraTunerService) {
+ this.config = config;
+ this.backupRestoreConfig = backupRestoreConfig;
+ this.scheduler = priamScheduler;
+ this.cassandraTunerService = cassandraTunerService;
+ }
+
+ @Override
+ public void scheduleService() throws Exception {
+ // Start the snapshot backup schedule - Always run this. (If you want to
+ // set it off, set backup hour to -1) or set backup cron to "-1"
+ TaskTimer snapshotTimer = SnapshotBackup.getTimer(config);
+ scheduleTask(scheduler, SnapshotBackup.class, snapshotTimer);
+
+ if (snapshotTimer != null) {
+ // Set cleanup
+ scheduleTask(scheduler, UpdateCleanupPolicy.class, UpdateCleanupPolicy.getTimer());
+ // Schedule commit log task
+ scheduleTask(
+ scheduler, CommitLogBackupTask.class, CommitLogBackupTask.getTimer(config));
+ }
+
+ // Start the Incremental backup schedule if enabled
+ scheduleTask(
+ scheduler,
+ IncrementalBackup.class,
+ IncrementalBackup.getTimer(config, backupRestoreConfig));
+ }
+
+ @Override
+ public void updateServicePre() throws Exception {
+ // Run the task to tune Cassandra
+ cassandraTunerService.onChangeUpdateService();
+ }
+
+ @Override
+ public void updateServicePost() throws Exception {}
+}
diff --git a/priam/src/main/java/com/netflix/priam/backup/BackupStatusMgr.java b/priam/src/main/java/com/netflix/priam/backup/BackupStatusMgr.java
index f645901a3..36489e022 100644
--- a/priam/src/main/java/com/netflix/priam/backup/BackupStatusMgr.java
+++ b/priam/src/main/java/com/netflix/priam/backup/BackupStatusMgr.java
@@ -1,16 +1,14 @@
/**
* Copyright 2017 Netflix, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
+ *
+ *
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ *
http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *
Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.backup;
@@ -20,12 +18,14 @@
import com.netflix.priam.health.InstanceState;
import com.netflix.priam.utils.DateUtil;
import com.netflix.priam.utils.MaxSizeHashMap;
+import java.time.Instant;
+import java.time.temporal.ChronoUnit;
+import java.util.*;
+import java.util.stream.Collectors;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import java.util.*;
-
/*
* A means to manage metadata for various types of backups (snapshots, incrementals)
*/
@@ -35,17 +35,19 @@ public abstract class BackupStatusMgr implements IBackupStatusMgr {
private static final Logger logger = LoggerFactory.getLogger(BackupStatusMgr.class);
/**
- * Map: Map of completed snapshots represented by its snapshot day (yyyymmdd)
- * and a list of snapshots started on that day
- * Note: A {@link LinkedList} was chosen for fastest retrieval of latest snapshot.
+ * Map: Map of completed snapshots represented by its
+ * snapshot day (yyyymmdd) and a list of snapshots started on that day Note: A {@link
+ * LinkedList} was chosen for fastest retrieval of latest snapshot.
*/
Map> backupMetadataMap;
- int capacity;
- private InstanceState instanceState;
+
+ final int capacity;
+ private final InstanceState instanceState;
/**
* @param capacity Capacity to hold in-memory snapshot status days.
- * @param instanceState Status of the instance encapsulating health and other metadata of Priam and Cassandra.
+ * @param instanceState Status of the instance encapsulating health and other metadata of Priam
+ * and Cassandra.
*/
@Inject
public BackupStatusMgr(int capacity, InstanceState instanceState) {
@@ -73,16 +75,14 @@ public LinkedList locate(Date snapshotDate) {
@Override
public LinkedList locate(String snapshotDate) {
- if (StringUtils.isEmpty(snapshotDate))
- return null;
+ if (StringUtils.isEmpty(snapshotDate)) return null;
// See if in memory
- if (backupMetadataMap.containsKey(snapshotDate))
- return backupMetadataMap.get(snapshotDate);
+ if (backupMetadataMap.containsKey(snapshotDate)) return backupMetadataMap.get(snapshotDate);
LinkedList metadataLinkedList = fetch(snapshotDate);
- //Save the result in local cache so we don't hit data store/file.
+ // Save the result in local cache so we don't hit data store/file.
backupMetadataMap.put(snapshotDate, metadataLinkedList);
return metadataLinkedList;
@@ -99,64 +99,75 @@ public void start(BackupMetadata backupMetadata) {
metadataLinkedList.addFirst(backupMetadata);
backupMetadataMap.put(backupMetadata.getSnapshotDate(), metadataLinkedList);
instanceState.setBackupStatus(backupMetadata);
- //Save the backupMetaDataMap
+ // Save the backupMetaDataMap
save(backupMetadata);
}
@Override
public void finish(BackupMetadata backupMetadata) {
- //validate that it has actually finished. If not, then set the status and current date.
+ // validate that it has actually finished. If not, then set the status and current date.
if (backupMetadata.getStatus() != Status.FINISHED)
backupMetadata.setStatus(Status.FINISHED);
if (backupMetadata.getCompleted() == null)
- backupMetadata.setCompleted(Calendar.getInstance(TimeZone.getTimeZone("GMT")).getTime());
+ backupMetadata.setCompleted(
+ Calendar.getInstance(TimeZone.getTimeZone("GMT")).getTime());
instanceState.setBackupStatus(backupMetadata);
+ update(backupMetadata);
+ }
- //Retrieve the snapshot metadata and then update the finish date/status.
+ @Override
+ public void update(BackupMetadata backupMetadata) {
+ // Retrieve the snapshot metadata and then update the finish date/status.
retrieveAndUpdate(backupMetadata);
- //Save the backupMetaDataMap
+ // Save the backupMetaDataMap
save(backupMetadata);
-
}
private void retrieveAndUpdate(final BackupMetadata backupMetadata) {
- //Retrieve the snapshot metadata and then update the date/status.
+ // Retrieve the snapshot metadata and then update the date/status.
LinkedList metadataLinkedList = locate(backupMetadata.getSnapshotDate());
- if (metadataLinkedList == null || metadataLinkedList.isEmpty()) {
- logger.error("No previous backupMetaData found. This should not happen. Creating new to ensure app keeps running.");
+ if (metadataLinkedList == null) {
+ logger.error(
+ "No previous backupMetaData found. This should not happen. Creating new to ensure app keeps running.");
metadataLinkedList = new LinkedList<>();
- metadataLinkedList.addFirst(backupMetadata);
+ backupMetadataMap.put(backupMetadata.getSnapshotDate(), metadataLinkedList);
}
- metadataLinkedList.forEach(backupMetadata1 -> {
- if (backupMetadata1.equals(backupMetadata)) {
- backupMetadata1.setCompleted(backupMetadata.getCompleted());
- backupMetadata1.setStatus(backupMetadata.getStatus());
- }
- });
+ Optional searchedData =
+ metadataLinkedList
+ .stream()
+ .filter(backupMetadata1 -> backupMetadata.equals(backupMetadata1))
+ .findFirst();
+ if (!searchedData.isPresent()) {
+ metadataLinkedList.addFirst(backupMetadata);
+ }
+ searchedData.ifPresent(
+ backupMetadata1 -> {
+ backupMetadata1.setCompleted(backupMetadata.getCompleted());
+ backupMetadata1.setStatus(backupMetadata.getStatus());
+ backupMetadata1.setCassandraSnapshotSuccess(
+ backupMetadata.isCassandraSnapshotSuccess());
+ backupMetadata1.setSnapshotLocation(backupMetadata.getSnapshotLocation());
+ backupMetadata1.setLastValidated(backupMetadata.getLastValidated());
+ });
}
@Override
public void failed(BackupMetadata backupMetadata) {
- //validate that it has actually failed. If not, then set the status and current date.
+ // validate that it has actually failed. If not, then set the status and current date.
if (backupMetadata.getCompleted() == null)
- backupMetadata.setCompleted(Calendar.getInstance(TimeZone.getTimeZone("GMT")).getTime());
+ backupMetadata.setCompleted(
+ Calendar.getInstance(TimeZone.getTimeZone("GMT")).getTime());
- //Set this later to ensure the status
- if (backupMetadata.getStatus() != Status.FAILED)
- backupMetadata.setStatus(Status.FAILED);
+ // Set this later to ensure the status
+ if (backupMetadata.getStatus() != Status.FAILED) backupMetadata.setStatus(Status.FAILED);
instanceState.setBackupStatus(backupMetadata);
-
- //Retrieve the snapshot metadata and then update the failure date/status.
- retrieveAndUpdate(backupMetadata);
-
- //Save the backupMetaDataMap
- save(backupMetadata);
+ update(backupMetadata);
}
/**
@@ -170,16 +181,59 @@ public void failed(BackupMetadata backupMetadata) {
* Implementation on how to retrieve the backup metadata(s) for a given date from store.
*
* @param snapshotDate Snapshot date to be retrieved from datastore in format of yyyyMMdd
- * @return The list of snapshots started on the snapshot day in descending order of snapshot start time.
+ * @return The list of snapshots started on the snapshot day in descending order of snapshot
+ * start time.
*/
protected abstract LinkedList fetch(String snapshotDate);
+ public List getLatestBackupMetadata(
+ BackupVersion backupVersion, DateUtil.DateRange dateRange) {
+ Instant startDay = dateRange.getStartTime().truncatedTo(ChronoUnit.DAYS);
+ Instant endDay = dateRange.getEndTime().truncatedTo(ChronoUnit.DAYS);
+
+ List allBackups = new ArrayList<>();
+ Instant previousDay = endDay;
+ do {
+ // We need to find the latest backupmetadata in this date range.
+ logger.info(
+ "Will try to find snapshot for : {}",
+ DateUtil.formatInstant(DateUtil.yyyyMMddHHmm, previousDay));
+ List backupsForDate = locate(new Date(previousDay.toEpochMilli()));
+ if (backupsForDate != null) allBackups.addAll(backupsForDate);
+ previousDay = previousDay.minus(1, ChronoUnit.DAYS);
+ } while (!previousDay.isBefore(startDay));
+
+ // Return all the backups which are FINISHED and were "started" in the dateRange provided.
+ // Do not compare the end time of snapshot as it may take random amount of time to finish
+ // the snapshot.
+ return allBackups
+ .stream()
+ .filter(Objects::nonNull)
+ .filter(backupMetadata -> backupMetadata.getStatus() == Status.FINISHED)
+ .filter(backupMetadata -> backupMetadata.getBackupVersion().equals(backupVersion))
+ .filter(
+ backupMetadata ->
+ backupMetadata
+ .getStart()
+ .toInstant()
+ .compareTo(dateRange.getStartTime())
+ >= 0
+ && backupMetadata
+ .getStart()
+ .toInstant()
+ .compareTo(dateRange.getEndTime())
+ <= 0)
+ .sorted(Comparator.comparing(BackupMetadata::getStart).reversed())
+ .collect(Collectors.toList());
+ }
+
@Override
public String toString() {
- final StringBuffer sb = new StringBuffer("BackupStatusMgr{");
- sb.append("backupMetadataMap=").append(backupMetadataMap);
- sb.append(", capacity=").append(capacity);
- sb.append('}');
- return sb.toString();
+ return "BackupStatusMgr{"
+ + "backupMetadataMap="
+ + backupMetadataMap
+ + ", capacity="
+ + capacity
+ + '}';
}
}
diff --git a/priam/src/main/java/com/netflix/priam/backup/BackupVerification.java b/priam/src/main/java/com/netflix/priam/backup/BackupVerification.java
index 03c896f3e..3dfc1bdfb 100644
--- a/priam/src/main/java/com/netflix/priam/backup/BackupVerification.java
+++ b/priam/src/main/java/com/netflix/priam/backup/BackupVerification.java
@@ -1,152 +1,165 @@
/**
* Copyright 2017 Netflix, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
+ *
+ *
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ *
http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *
Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.priam.backup;
import com.google.inject.Inject;
+import com.google.inject.Provider;
import com.google.inject.Singleton;
import com.google.inject.name.Named;
-import com.netflix.priam.config.IConfiguration;
+import com.netflix.priam.backupv2.IMetaProxy;
+import com.netflix.priam.scheduler.UnsupportedTypeException;
import com.netflix.priam.utils.DateUtil;
-import org.apache.commons.collections4.CollectionUtils;
-import org.json.simple.parser.JSONParser;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.FileOutputStream;
-import java.io.FileReader;
-import java.nio.file.FileSystems;
+import com.netflix.priam.utils.DateUtil.DateRange;
import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.time.Instant;
import java.util.*;
-import java.util.stream.Collectors;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
- * Created by aagrawal on 2/16/17.
- * This class validates the backup by doing listing of files in the backup destination and comparing with meta.json by downloading from the location.
- * Input: BackupMetadata that needs to be verified.
- * Since one backupmetadata can have multiple start time, provide one startTime if interested in verifying one particular backup.
- * Leave startTime as null to get the latest snapshot for the provided BackupMetadata.
+ * Created by aagrawal on 2/16/17. This class validates the backup by doing listing of files in the
+ * backup destination and comparing with meta.json by downloading from the location. Input:
+ * BackupMetadata that needs to be verified.
*/
@Singleton
public class BackupVerification {
private static final Logger logger = LoggerFactory.getLogger(BackupVerification.class);
- private IBackupFileSystem bkpStatusFs;
- private IConfiguration config;
+ private final IMetaProxy metaV1Proxy;
+ private final IMetaProxy metaV2Proxy;
+ private final IBackupStatusMgr backupStatusMgr;
+ private final Provider abstractBackupPathProvider;
+ private BackupVerificationResult latestResult;
@Inject
- BackupVerification(@Named("backup_status") IBackupFileSystem bkpStatusFs, IConfiguration config) {
- this.bkpStatusFs = bkpStatusFs;
- this.config = config;
+ BackupVerification(
+ @Named("v1") IMetaProxy metaV1Proxy,
+ @Named("v2") IMetaProxy metaV2Proxy,
+ IBackupStatusMgr backupStatusMgr,
+ Provider abstractBackupPathProvider) {
+ this.metaV1Proxy = metaV1Proxy;
+ this.metaV2Proxy = metaV2Proxy;
+ this.backupStatusMgr = backupStatusMgr;
+ this.abstractBackupPathProvider = abstractBackupPathProvider;
}
- public BackupVerificationResult verifyBackup(List metadata, Date startTime) {
- BackupVerificationResult result = new BackupVerificationResult();
+ public IMetaProxy getMetaProxy(BackupVersion backupVersion) {
+ switch (backupVersion) {
+ case SNAPSHOT_BACKUP:
+ return metaV1Proxy;
+ case SNAPSHOT_META_SERVICE:
+ return metaV2Proxy;
+ }
- if (metadata == null || metadata.isEmpty())
- return result;
+ return null;
+ }
- result.snapshotAvailable = true;
- // All the dates should be same.
- result.selectedDate = metadata.get(0).getSnapshotDate();
+ public Optional verifyBackup(
+ BackupVersion backupVersion, boolean force, DateRange dateRange)
+ throws UnsupportedTypeException, IllegalArgumentException {
+ IMetaProxy metaProxy = getMetaProxy(backupVersion);
+ if (metaProxy == null) {
+ throw new UnsupportedTypeException(
+ "BackupVersion type: " + backupVersion + " is not supported");
+ }
- List backups = metadata.stream().map(backupMetadata ->
- DateUtil.formatyyyyMMddHHmm(backupMetadata.getStart())).collect(Collectors.toList());
- logger.info("Snapshots found for {} : [{}]", result.selectedDate, backups);
+ if (dateRange == null) {
+ throw new IllegalArgumentException("dateRange provided is null");
+ }
- //find the latest date (default) or verify if one provided
- Date latestDate = null;
+ List metadata =
+ backupStatusMgr.getLatestBackupMetadata(backupVersion, dateRange);
+ if (metadata == null || metadata.isEmpty()) return Optional.empty();
for (BackupMetadata backupMetadata : metadata) {
- if (latestDate == null || latestDate.before(backupMetadata.getStart()))
- latestDate = backupMetadata.getStart();
-
- if (startTime != null &&
- DateUtil.formatyyyyMMddHHmm(backupMetadata.getStart()).equals(DateUtil.formatyyyyMMddHHmm(startTime))) {
- latestDate = startTime;
- break;
+ if (backupMetadata.getLastValidated() != null && !force) {
+ // Backup is already validated. Nothing to do.
+ latestResult = new BackupVerificationResult();
+ latestResult.valid = true;
+ latestResult.manifestAvailable = true;
+ latestResult.snapshotInstant = backupMetadata.getStart().toInstant();
+ Path snapshotLocation = Paths.get(backupMetadata.getSnapshotLocation());
+ latestResult.remotePath =
+ snapshotLocation.subpath(1, snapshotLocation.getNameCount()).toString();
+ return Optional.of(latestResult);
+ }
+ BackupVerificationResult backupVerificationResult =
+ verifyBackup(metaProxy, backupMetadata);
+ if (logger.isDebugEnabled())
+ logger.debug(
+ "BackupVerification: metadata: {}, result: {}",
+ backupMetadata,
+ backupVerificationResult);
+ if (backupVerificationResult.valid) {
+ backupMetadata.setLastValidated(new Date(DateUtil.getInstant().toEpochMilli()));
+ backupStatusMgr.update(backupMetadata);
+ latestResult = backupVerificationResult;
+ return Optional.of(backupVerificationResult);
}
}
+ latestResult = null;
+ return Optional.empty();
+ }
- result.snapshotTime = DateUtil.formatyyyyMMddHHmm(latestDate);
- logger.info("Latest/Requested snapshot date found: {}, for selected/provided date: {}", result.snapshotTime, result.selectedDate);
-
- //Get Backup File Iterator
- String prefix = config.getBackupPrefix();
- logger.info("Looking for meta file in the location: {}", prefix);
-
- Date strippedMsSnapshotTime = DateUtil.getDate(result.snapshotTime);
- Iterator