diff --git a/.gitignore b/.gitignore
index b17f2a0..3253525 100644
--- a/.gitignore
+++ b/.gitignore
@@ -5,4 +5,5 @@ obj/
csx/
Profiles/
*.ncrunch*
-packages/
\ No newline at end of file
+packages/
+.vs/
diff --git a/AzureDirectory.Tests/AzureDirectory.Tests.csproj b/AzureDirectory.Tests/AzureDirectory.Tests.csproj
index 771cec3..9ab8696 100644
--- a/AzureDirectory.Tests/AzureDirectory.Tests.csproj
+++ b/AzureDirectory.Tests/AzureDirectory.Tests.csproj
@@ -1,7 +1,7 @@
- netcoreapp2.2
+ netcoreapp3.1
false
diff --git a/AzureDirectory.Tests/IntegrationTests.cs b/AzureDirectory.Tests/IntegrationTests.cs
index 89e8ab9..fe72054 100644
--- a/AzureDirectory.Tests/IntegrationTests.cs
+++ b/AzureDirectory.Tests/IntegrationTests.cs
@@ -3,8 +3,8 @@
using Lucene.Net.Index;
using Lucene.Net.Search;
using Lucene.Net.Store.Azure;
-using Microsoft.Azure.Storage;
-using Microsoft.Azure.Storage.Blob;
+using Azure;
+using Azure.Storage.Blobs;
using Microsoft.VisualStudio.TestTools.UnitTesting;
using System;
using System.Text;
@@ -13,108 +13,111 @@
namespace AzureDirectory.Tests
{
- [TestClass]
- public class IntegrationTests
- {
- private readonly string connectionString;
-
- public IntegrationTests()
- {
- this.connectionString = System.Environment.GetEnvironmentVariable("DataConnectionString") ?? "UseDevelopmentStorage=true";
- }
-
-
- [TestMethod]
- public void TestReadAndWrite()
+ [TestClass]
+ public class IntegrationTests
{
- var cloudStorageAccount = CloudStorageAccount.Parse(connectionString);
-
- const string containerName = "testcatalog2";
+ private readonly string connectionString;
- var azureDirectory = new Lucene.Net.Store.Azure.AzureDirectory(cloudStorageAccount, "temp", containerName: containerName);
-
- var indexWriterConfig = new IndexWriterConfig(
- Lucene.Net.Util.LuceneVersion.LUCENE_48,
- new StandardAnalyzer(Lucene.Net.Util.LuceneVersion.LUCENE_48));
-
- int dog = 0, cat = 0, car = 0;
-
- using (var indexWriter = new IndexWriter(azureDirectory, indexWriterConfig))
- {
-
- for (var iDoc = 0; iDoc < 10000; iDoc++)
+ public IntegrationTests()
{
- var bodyText = GeneratePhrase(40);
- var doc = new Document {
- new TextField("id", DateTime.Now.ToFileTimeUtc() + "-" + iDoc, Field.Store.YES),
- new TextField("Title", GeneratePhrase(10), Field.Store.YES),
- new TextField("Body", bodyText, Field.Store.YES)
- };
- dog += bodyText.Contains(" dog ") ? 1 : 0;
- cat += bodyText.Contains(" cat ") ? 1 : 0;
- car += bodyText.Contains(" car ") ? 1 : 0;
- indexWriter.AddDocument(doc);
+ this.connectionString = Environment.GetEnvironmentVariable("DataConnectionString") ?? "UseDevelopmentStorage=true";
}
- Console.WriteLine("Total docs is {0}, {1} dog, {2} cat, {3} car", indexWriter.NumDocs, dog, cat, car);
- }
- try
- {
- var ireader = DirectoryReader.Open(azureDirectory);
- for (var i = 0; i < 100; i++)
+ [TestMethod]
+ public void TestReadAndWrite()
{
- var searcher = new IndexSearcher(ireader);
- var searchForPhrase = SearchForPhrase(searcher, "dog");
- Assert.AreEqual(dog, searchForPhrase);
- searchForPhrase = SearchForPhrase(searcher, "cat");
- Assert.AreEqual(cat, searchForPhrase);
- searchForPhrase = SearchForPhrase(searcher, "car");
- Assert.AreEqual(car, searchForPhrase);
+ //var cloudStorageAccount = CloudStorageAccount.Parse(connectionString);
+
+ const string containerName = "testcatalog2";
+
+ //var azureDirectory = new Lucene.Net.Store.Azure.AzureDirectory(cloudStorageAccount, "temp", containerName: containerName);
+ var azureDirectory = new Lucene.Net.Store.Azure.AzureDirectory(connectionString, "temp", containerName: containerName);
+
+ var indexWriterConfig = new IndexWriterConfig(
+ Lucene.Net.Util.LuceneVersion.LUCENE_48,
+ new StandardAnalyzer(Lucene.Net.Util.LuceneVersion.LUCENE_48, StandardAnalyzer.STOP_WORDS_SET));
+
+ int dog = 0, cat = 0, car = 0;
+
+ using (var indexWriter = new IndexWriter(azureDirectory, indexWriterConfig))
+ {
+
+ for (var iDoc = 0; iDoc < 10000; iDoc++)
+ {
+ var bodyText = GeneratePhrase(40);
+ var doc = new Document {
+ new TextField("id", DateTime.Now.ToFileTimeUtc() + "-" + iDoc, Field.Store.YES),
+ new TextField("Title", GeneratePhrase(10), Field.Store.YES),
+ new TextField("Body", bodyText, Field.Store.YES)
+ };
+ dog += bodyText.Contains(" dog ") ? 1 : 0;
+ cat += bodyText.Contains(" cat ") ? 1 : 0;
+ car += bodyText.Contains(" car ") ? 1 : 0;
+ indexWriter.AddDocument(doc);
+ }
+
+ Console.WriteLine("Total docs is {0}, {1} dog, {2} cat, {3} car", indexWriter.NumDocs, dog, cat, car);
+ }
+ try
+ {
+
+ var ireader = DirectoryReader.Open(azureDirectory);
+ for (var i = 0; i < 100; i++)
+ {
+ var searcher = new IndexSearcher(ireader);
+ var searchForPhrase = SearchForPhrase(searcher, "dog");
+ Assert.AreEqual(dog, searchForPhrase);
+ searchForPhrase = SearchForPhrase(searcher, "cat");
+ Assert.AreEqual(cat, searchForPhrase);
+ searchForPhrase = SearchForPhrase(searcher, "car");
+ Assert.AreEqual(car, searchForPhrase);
+ }
+ Console.WriteLine("Tests passsed");
+ }
+ catch (Exception x)
+ {
+ Console.WriteLine("Tests failed:\n{0}", x);
+ }
+ finally
+ {
+ // check the container exists, and delete it
+ //var blobClient = cloudStorageAccount.CreateBlobServiceClient();
+ var blobClient = new BlobServiceClient(connectionString);
+ //var container = blobClient.GetContainerReference(containerName);
+ var container = blobClient.GetBlobContainerClient(containerName);
+ Assert.IsTrue(container.Exists()); // check the container exists
+ container.Delete();
+ }
}
- Console.WriteLine("Tests passsed");
- }
- catch (Exception x)
- {
- Console.WriteLine("Tests failed:\n{0}", x);
- }
- finally
- {
- // check the container exists, and delete it
- var blobClient = cloudStorageAccount.CreateCloudBlobClient();
- var container = blobClient.GetContainerReference(containerName);
- Assert.IsTrue(container.Exists()); // check the container exists
- container.Delete();
- }
- }
- private static int SearchForPhrase(IndexSearcher searcher, string phrase)
- {
- var parser = new Lucene.Net.QueryParsers.Classic.QueryParser(Lucene.Net.Util.LuceneVersion.LUCENE_48, "Body", new StandardAnalyzer(Lucene.Net.Util.LuceneVersion.LUCENE_48));
- var query = parser.Parse(phrase);
- var topDocs = searcher.Search(query, 100);
- return topDocs.TotalHits;
- }
+ private static int SearchForPhrase(IndexSearcher searcher, string phrase)
+ {
+ var parser = new Lucene.Net.QueryParsers.Classic.QueryParser(Lucene.Net.Util.LuceneVersion.LUCENE_48, "Body", new StandardAnalyzer(Lucene.Net.Util.LuceneVersion.LUCENE_48));
+ var query = parser.Parse(phrase);
+ var topDocs = searcher.Search(query, 100);
+ return topDocs.TotalHits;
+ }
- static Random rand = new Random();
+ static Random rand = new Random();
- static string[] sampleTerms =
- {
+ static string[] sampleTerms =
+ {
"dog","cat","car","horse","door","tree","chair","microsoft","apple","adobe","google","golf","linux","windows","firefox","mouse","hornet","monkey","giraffe","computer","monitor",
"steve","fred","lili","albert","tom","shane","gerald","chris",
"love","hate","scared","fast","slow","new","old"
};
- private static string GeneratePhrase(int MaxTerms)
- {
- var phrase = new StringBuilder();
- int nWords = 2 + rand.Next(MaxTerms);
- for (int i = 0; i < nWords; i++)
- {
- phrase.AppendFormat(" {0} {1}", sampleTerms[rand.Next(sampleTerms.Length)], rand.Next(32768).ToString());
- }
- return phrase.ToString();
- }
+ private static string GeneratePhrase(int MaxTerms)
+ {
+ var phrase = new StringBuilder();
+ int nWords = 2 + rand.Next(MaxTerms);
+ for (int i = 0; i < nWords; i++)
+ {
+ phrase.AppendFormat(" {0} {1}", sampleTerms[rand.Next(sampleTerms.Length)], rand.Next(32768).ToString());
+ }
+ return phrase.ToString();
+ }
- }
+ }
}
diff --git a/AzureDirectory/AzureDirectory.cs b/AzureDirectory/AzureDirectory.cs
index 37feca2..2210904 100644
--- a/AzureDirectory/AzureDirectory.cs
+++ b/AzureDirectory/AzureDirectory.cs
@@ -1,342 +1,316 @@
-using Microsoft.Azure.Storage;
-using Microsoft.Azure.Storage.Blob;
+using Azure;
+using Azure.Storage.Blobs;
+using Azure.Storage.Blobs.Specialized;
using System;
using System.Collections.Generic;
-using System.Diagnostics;
using System.IO;
using System.Linq;
namespace Lucene.Net.Store.Azure
{
- public class AzureDirectory : Directory
- {
- private string _containerName;
- private string _rootFolder;
- private CloudBlobClient _blobClient;
- private CloudBlobContainer _blobContainer;
- private Directory _cacheDirectory;
- private LockFactory _lockFactory = new NativeFSLockFactory();
- public override LockFactory LockFactory => _lockFactory;
- public string CacheDirectoryPath { get; set; }
- public string CatalogPath { get; set; }
-
- ///
- /// Create an AzureDirectory
- ///
- /// storage account to use
- /// name of container (folder in blob storage)
- /// local Directory object to use for local cache
- /// path of the root folder inside the container
- public AzureDirectory(
- CloudStorageAccount storageAccount,
- string cacheDirectoryPath,
- string containerName = null,
- //Directory cacheDirectory = null,
- bool compressBlobs = false,
- string rootFolder = null
- )
+ public class AzureDirectory : Directory
{
- CacheDirectoryPath = cacheDirectoryPath;
- if (storageAccount == null)
- throw new ArgumentNullException("storageAccount");
+ private string _containerName;
+ private string _rootFolder;
+ private BlobServiceClient _blobClient;
+ private BlobContainerClient _blobContainer;
+ private Directory _cacheDirectory;
+ private LockFactory _lockFactory = new NativeFSLockFactory();
+ public override LockFactory LockFactory => _lockFactory;
+ public string CacheDirectoryPath { get; set; }
+ public string CatalogPath { get; set; }
+
+ ///
+ /// Create an AzureDirectory
+ ///
+ /// storage account connection string to use
+ /// name of container (folder in blob storage)
+ /// Whether to compress the blobs or not.cache
+ /// path of the root folder inside the container
+ public AzureDirectory(
+ string connectionString,
+ string cacheDirectoryPath,
+ string containerName = null,
+ bool compressBlobs = false,
+ string rootFolder = null
+ )
+ {
+ CacheDirectoryPath = cacheDirectoryPath;
+ if (connectionString == null)
+ throw new ArgumentNullException(nameof(connectionString));
- if (string.IsNullOrEmpty(containerName))
- _containerName = "lucene";
- else
- _containerName = containerName.ToLower();
+ if (string.IsNullOrEmpty(containerName))
+ _containerName = "lucene";
+ else
+ _containerName = containerName.ToLower();
- if (string.IsNullOrEmpty(rootFolder))
- _rootFolder = string.Empty;
- else
- {
- rootFolder = rootFolder.Trim('/');
- _rootFolder = rootFolder + "/";
- }
+ if (string.IsNullOrEmpty(rootFolder))
+ _rootFolder = string.Empty;
+ else
+ {
+ rootFolder = rootFolder.Trim('/');
+ _rootFolder = rootFolder + "/";
+ }
- _blobClient = storageAccount.CreateCloudBlobClient();
- //_initCacheDirectory(cacheDirectory);
- _initCacheDirectory(null);
- this.CompressBlobs = compressBlobs;
- }
+ _blobClient = new BlobServiceClient(connectionString);
+ _initCacheDirectory(null);
+ CompressBlobs = compressBlobs;
+ }
- public CloudBlobContainer BlobContainer
- {
- get
- {
- return _blobContainer;
- }
- }
+ public BlobContainerClient BlobContainer => _blobContainer;
- public bool CompressBlobs
- {
- get;
- set;
- }
+ public bool CompressBlobs { get; set; }
- public void ClearCache()
- {
- foreach (string file in _cacheDirectory.ListAll())
- {
- _cacheDirectory.DeleteFile(file);
- }
- }
-
- public Directory CacheDirectory
- {
- get
- {
- return _cacheDirectory;
- }
- set
- {
- _cacheDirectory = value;
- }
- }
+ public void ClearCache()
+ {
+ foreach(var file in _cacheDirectory.ListAll())
+ _cacheDirectory.DeleteFile(file);
+ }
- private void _initCacheDirectory(Directory cacheDirectory)
- {
- if (cacheDirectory != null)
- {
- // save it off
- _cacheDirectory = cacheDirectory;
- }
- else
- {
- var cachePath = CacheDirectoryPath;
- if (string.IsNullOrEmpty(cachePath))
- Path.Combine(Path.GetPathRoot(Environment.SystemDirectory), "AzureDirectory");
- var azureDir = new DirectoryInfo(cachePath);
- if (!azureDir.Exists)
- azureDir.Create();
-
- CatalogPath = Path.Combine(cachePath, _containerName);
- var catalogDir = new DirectoryInfo(CatalogPath);
- if (!catalogDir.Exists)
- catalogDir.Create();
-
- _cacheDirectory = FSDirectory.Open(CatalogPath);
- }
-
- CreateContainer();
- }
+ public Directory CacheDirectory
+ {
+ get
+ {
+ return _cacheDirectory;
+ }
+ set
+ {
+ _cacheDirectory = value;
+ }
+ }
- public void CreateContainer()
- {
- _blobContainer = _blobClient.GetContainerReference(_containerName);
- _blobContainer.CreateIfNotExistsAsync().Wait();
- }
+ private void _initCacheDirectory(Directory cacheDirectory)
+ {
+ if (cacheDirectory != null)
+ {
+ // save it off
+ _cacheDirectory = cacheDirectory;
+ }
+ else
+ {
+ var cachePath = CacheDirectoryPath;
+ if (string.IsNullOrEmpty(cachePath))
+ Path.Combine(Path.GetPathRoot(Environment.SystemDirectory), "AzureDirectory");
+ var azureDir = new DirectoryInfo(cachePath);
+ if (!azureDir.Exists)
+ azureDir.Create();
+
+ CatalogPath = Path.Combine(cachePath, _containerName);
+ var catalogDir = new DirectoryInfo(CatalogPath);
+ if (!catalogDir.Exists)
+ catalogDir.Create();
+
+ _cacheDirectory = FSDirectory.Open(CatalogPath);
+ }
+
+ CreateContainer();
+ }
- /// Returns an array of strings, one for each file in the directory.
- public override String[] ListAll()
- {
- var results = from blob in _blobContainer.ListBlobs(_rootFolder)
- select blob.Uri.AbsolutePath.Substring(blob.Uri.AbsolutePath.LastIndexOf('/') + 1);
- return results.ToArray();
- }
+ public void CreateContainer()
+ {
+ _blobContainer = _blobClient.GetBlobContainerClient(_containerName);
+ _blobContainer.CreateIfNotExistsAsync().Wait();
+ }
- /// Returns true if a file with the given name exists.
- public override bool FileExists(String name)
- {
- // this always comes from the server
- try
- {
- return _blobContainer.GetBlockBlobReference(_rootFolder + name).Exists();
- }
- catch (Exception)
- {
- return false;
- }
- }
+ /// Returns an array of strings, one for each file in the directory.
+ public override string[] ListAll()
+ => _blobContainer.GetBlobsByHierarchy(prefix: _rootFolder)
+ //.Select(blob => blob.Uri.AbsolutePath.Substring(blob.Uri.AbsolutePath.LastIndexOf('/') + 1))
+ .Select(blob => blob.Blob.Name)
+ .ToArray();
- /// Returns the time the named file was last modified.
- public /*override*/ long FileModified(String name)
- {
- // this always has to come from the server
- try
- {
- var blob = _blobContainer.GetBlockBlobReference(_rootFolder + name);
- blob.FetchAttributes();
- return blob.Properties.LastModified.Value.UtcDateTime.ToFileTimeUtc();
- }
- catch
- {
- return 0;
- }
- }
+ /// Returns true if a file with the given name exists.
+ public override bool FileExists(string name)
+ {
+ // this always comes from the server
+ try
+ {
+ return _blobContainer.GetBlockBlobClient(_rootFolder + name).Exists();
+ }
+ catch (Exception)
+ {
+ return false;
+ }
+ }
- /// Set the modified time of an existing file to now.
- //public override void TouchFile(System.String name)
- //{
- // //BlobProperties props = _blobContainer.GetBlobProperties(_rootFolder + name);
- // //_blobContainer.UpdateBlobMetadata(props);
- // // I have no idea what the semantics of this should be...hmmmm...
- // // we never seem to get called
- // _cacheDirectory.TouchFile(name);
- // //SetCachedBlobProperties(props);
- //}
-
- /// Removes an existing file in the directory.
- public override void DeleteFile(System.String name)
- {
- // We're going to try to remove this from the cache directory first,
- // because the IndexFileDeleter will call this file to remove files
- // but since some files will be in use still, it will retry when a reader/searcher
- // is refreshed until the file is no longer locked. So we need to try to remove
- // from local storage first and if it fails, let it keep throwing the IOExpception
- // since that is what Lucene is expecting in order for it to retry.
- // If we remove the main storage file first, then this will never retry to clean out
- // local storage because the FileExist method will always return false.
- try
- {
- if (_cacheDirectory.FileExists(name + ".blob"))
+ /// Returns the time the named file was last modified.
+ public /*override*/ long FileModified(string name)
{
- _cacheDirectory.DeleteFile(name + ".blob");
+ // this always has to come from the server
+ try
+ {
+ return _blobContainer.GetBlockBlobClient(_rootFolder + name)
+ .GetProperties().Value
+ .LastModified.UtcDateTime.ToFileTimeUtc();
+ }
+ catch
+ {
+ return 0;
+ }
}
- if (_cacheDirectory.FileExists(name))
+ /// Set the modified time of an existing file to now.
+ //public override void TouchFile(System.String name)
+ //{
+ // //BlobProperties props = _blobContainer.GetBlobProperties(_rootFolder + name);
+ // //_blobContainer.UpdateBlobMetadata(props);
+ // // I have no idea what the semantics of this should be...hmmmm...
+ // // we never seem to get called
+ // _cacheDirectory.TouchFile(name);
+ // //SetCachedBlobProperties(props);
+ //}
+
+ /// Removes an existing file in the directory.
+ public override void DeleteFile(string name)
{
- _cacheDirectory.DeleteFile(name);
+ // We're going to try to remove this from the cache directory first,
+ // because the IndexFileDeleter will call this file to remove files
+ // but since some files will be in use still, it will retry when a reader/searcher
+ // is refreshed until the file is no longer locked. So we need to try to remove
+ // from local storage first and if it fails, let it keep throwing the IOExpception
+ // since that is what Lucene is expecting in order for it to retry.
+ // If we remove the main storage file first, then this will never retry to clean out
+ // local storage because the FileExist method will always return false.
+ try
+ {
+ if (_cacheDirectory.FileExists(name + ".blob"))
+ {
+ _cacheDirectory.DeleteFile(name + ".blob");
+ }
+
+ if (_cacheDirectory.FileExists(name))
+ {
+ _cacheDirectory.DeleteFile(name);
+ }
+ }
+ catch (IOException ex)
+ {
+ // This will occur because this file is locked, when this is the case, we don't really want to delete it from the master either because
+ // if we do that then this file will never get removed from the cache folder either! This is based on the Deletion Policy which the
+ // IndexFileDeleter uses. We could implement our own one of those to deal with this scenario too but it seems the easiest way it to just
+ // let this throw so Lucene will retry when it can and when that is successful we'll also clear it from the master
+ throw;
+ }
+
+ //if we've made it this far then the cache directly file has been successfully removed so now we'll do the master
+
+ var blob = _blobContainer.GetBlockBlobClient(_rootFolder + name);
+ blob.DeleteIfExistsAsync().Wait();
}
- }
- catch (IOException ex)
- {
- // This will occur because this file is locked, when this is the case, we don't really want to delete it from the master either because
- // if we do that then this file will never get removed from the cache folder either! This is based on the Deletion Policy which the
- // IndexFileDeleter uses. We could implement our own one of those to deal with this scenario too but it seems the easiest way it to just
- // let this throw so Lucene will retry when it can and when that is successful we'll also clear it from the master
- throw;
- }
-
- //if we've made it this far then the cache directly file has been successfully removed so now we'll do the master
-
- var blob = _blobContainer.GetBlockBlobReference(_rootFolder + name);
- blob.DeleteIfExistsAsync().Wait();
- }
- /// Returns the length of a file in the directory.
- public override long FileLength(String name)
- {
- var blob = _blobContainer.GetBlockBlobReference(_rootFolder + name);
- blob.FetchAttributes();
-
- // index files may be compressed so the actual length is stored in metatdata
- string blobLegthMetadata;
- bool hasMetadataValue = blob.Metadata.TryGetValue("CachedLength", out blobLegthMetadata);
-
- long blobLength;
- if (hasMetadataValue && long.TryParse(blobLegthMetadata, out blobLength))
- {
- return blobLength;
- }
- return blob.Properties.Length; // fall back to actual blob size
- }
+ /// Returns the length of a file in the directory.
+ public override long FileLength(string name)
+ {
+ var blob = _blobContainer.GetBlockBlobClient(_rootFolder + name);
+
+ // index files may be compressed so the actual length is stored in metatdata
+ string blobLegthMetadata;
+ var properties = blob.GetProperties().Value;
+ bool hasMetadataValue = properties.Metadata.TryGetValue("CachedLength", out blobLegthMetadata);
+
+ long blobLength;
+ if (hasMetadataValue && long.TryParse(blobLegthMetadata, out blobLength))
+ {
+ return blobLength;
+ }
+ return properties.ContentLength; // fall back to actual blob size
+ }
- /// Creates a new, empty file in the directory with the given name.
- /// Returns a stream writing this file.
- ///
- public override IndexOutput CreateOutput(System.String name, IOContext context)
- {
- var blob = _blobContainer.GetBlockBlobReference(_rootFolder + name);
- return new AzureIndexOutput(this, blob);
- }
+ /// Creates a new, empty file in the directory with the given name.
+ /// Returns a stream writing this file.
+ ///
+ public override IndexOutput CreateOutput(string name, IOContext context)
+ {
+ var blob = _blobContainer.GetBlockBlobClient(_rootFolder + name);
+ return new AzureIndexOutput(this, blob);
+ }
- /// Returns a stream reading an existing file.
- public override IndexInput OpenInput(System.String name, IOContext context)
- {
- try
- {
- var blob = _blobContainer.GetBlockBlobReference(_rootFolder + name);
- blob.FetchAttributes();
- return new AzureIndexInput(this, blob, "azureDirectory");
- }
- catch (Exception err)
- {
- throw new FileNotFoundException(name, err);
- }
- }
+ /// Returns a stream reading an existing file.
+ public override IndexInput OpenInput(string name, IOContext context)
+ {
+ try
+ {
+ var blob = _blobContainer.GetBlobClient(_rootFolder + name);
+ return new AzureIndexInput(this, blob, "azureDirectory");
+ }
+ catch (Exception err)
+ {
+ throw new FileNotFoundException(name, err);
+ }
+ }
- private Dictionary _locks = new Dictionary();
+ private Dictionary _locks = new Dictionary();
- /// Construct a {@link Lock}.
- /// the name of the lock file
- ///
- public override Lock MakeLock(System.String name)
- {
- lock (_locks)
- {
- if (!_locks.ContainsKey(name))
+ /// Construct a {@link Lock}.
+ /// the name of the lock file
+ ///
+ public override Lock MakeLock(string name)
{
- _locks.Add(name, new AzureLock(_rootFolder + name, this));
+ lock (_locks)
+ {
+ if (!_locks.ContainsKey(name))
+ {
+ _locks.Add(name, new AzureLock(_rootFolder + name, this));
+ }
+ return _locks[name];
+ }
}
- return _locks[name];
- }
- }
- public override void ClearLock(string name)
- {
- lock (_locks)
- {
- if (_locks.ContainsKey(name))
+ public override void ClearLock(string name)
{
- _locks[name].BreakLock();
+ lock (_locks)
+ {
+ if (_locks.ContainsKey(name))
+ {
+ _locks[name].BreakLock();
+ }
+ }
+ _cacheDirectory.ClearLock(name);
}
- }
- _cacheDirectory.ClearLock(name);
- }
- /// Closes the store.
- protected override void Dispose(bool disposing)
- {
- _blobContainer = null;
- _blobClient = null;
- }
+ /// Closes the store.
+ protected override void Dispose(bool disposing)
+ {
+ _blobContainer = null;
+ _blobClient = null;
+ }
- public virtual bool ShouldCompressFile(string path)
- {
- if (!CompressBlobs)
- return false;
-
- var ext = System.IO.Path.GetExtension(path);
- switch (ext)
- {
- case ".cfs":
- case ".fdt":
- case ".fdx":
- case ".frq":
- case ".tis":
- case ".tii":
- case ".nrm":
- case ".tvx":
- case ".tvd":
- case ".tvf":
- case ".prx":
- return true;
- default:
- return false;
- };
- }
- public StreamInput OpenCachedInputAsStream(string name)
- {
- return new StreamInput(CacheDirectory.OpenInput(name, IOContext.DEFAULT));
- }
+ public virtual bool ShouldCompressFile(string path)
+ {
+ if (!CompressBlobs)
+ return false;
+
+ var ext = Path.GetExtension(path);
+ switch (ext)
+ {
+ case ".cfs":
+ case ".fdt":
+ case ".fdx":
+ case ".frq":
+ case ".tis":
+ case ".tii":
+ case ".nrm":
+ case ".tvx":
+ case ".tvd":
+ case ".tvf":
+ case ".prx":
+ return true;
+ default:
+ return false;
+ };
+ }
- public StreamOutput CreateCachedOutputAsStream(string name)
- {
- return new StreamOutput(CacheDirectory.CreateOutput(name, IOContext.DEFAULT));
- }
+ public StreamInput OpenCachedInputAsStream(string name) => new StreamInput(CacheDirectory.OpenInput(name, IOContext.DEFAULT));
+ public StreamOutput CreateCachedOutputAsStream(string name) => new StreamOutput(CacheDirectory.CreateOutput(name, IOContext.DEFAULT));
- public override void Sync(ICollection names)
- {
- //throw new NotImplementedException();
- }
+ public override void Sync(ICollection names)
+ {
+ //throw new NotImplementedException();
+ }
- public override void SetLockFactory(LockFactory lockFactory)
- {
- _lockFactory = lockFactory;
+ public override void SetLockFactory(LockFactory lockFactory) => _lockFactory = lockFactory;
}
- }
-
-}
+}
\ No newline at end of file
diff --git a/AzureDirectory/AzureDirectory.csproj b/AzureDirectory/AzureDirectory.csproj
index f89a4ac..547cfdb 100644
--- a/AzureDirectory/AzureDirectory.csproj
+++ b/AzureDirectory/AzureDirectory.csproj
@@ -2,11 +2,14 @@
netstandard2.0
+ true
+ 1.0.1
+ v1.0.1 - Updated to Azure.Store.Blobs v12.8.0
-
-
+
+
diff --git a/AzureDirectory/AzureIndexInput.cs b/AzureDirectory/AzureIndexInput.cs
index b0636a6..f78b637 100644
--- a/AzureDirectory/AzureIndexInput.cs
+++ b/AzureDirectory/AzureIndexInput.cs
@@ -1,4 +1,4 @@
-using Microsoft.Azure.Storage.Blob;
+using Azure.Storage.Blobs;
using System;
using System.Diagnostics;
using System.IO;
@@ -7,260 +7,256 @@
namespace Lucene.Net.Store.Azure
{
- ///
- /// Implements IndexInput semantics for a read only blob
- ///
- public class AzureIndexInput : IndexInput
- {
- private AzureDirectory _azureDirectory;
- private CloudBlobContainer _blobContainer;
- private ICloudBlob _blob;
- private string _name;
+ ///
+ /// Implements IndexInput semantics for a read only blob
+ ///
+ public class AzureIndexInput : IndexInput
+ {
+ private AzureDirectory _azureDirectory;
+ private BlobContainerClient _blobContainer;
+ private BlobClient _blob;
+ private string _name;
- private IndexInput _indexInput;
- private Mutex _fileMutex;
+ private IndexInput _indexInput;
+ private Mutex _fileMutex;
- public Lucene.Net.Store.Directory CacheDirectory { get { return _azureDirectory.CacheDirectory; } }
+ public Directory CacheDirectory { get { return _azureDirectory.CacheDirectory; } }
- public AzureIndexInput(AzureDirectory azuredirectory, ICloudBlob blob, string resourceDescription) : base(resourceDescription)
- {
- _name = blob.Uri.Segments[blob.Uri.Segments.Length - 1];
+ public AzureIndexInput(AzureDirectory azuredirectory, BlobClient blob, string resourceDescription) : base(resourceDescription)
+ {
+ _name = blob.Uri.Segments[blob.Uri.Segments.Length - 1];
#if FULLDEBUG
Debug.WriteLine(String.Format("opening {0} ", _name));
#endif
- _fileMutex = BlobMutexManager.GrabMutex(_name);
- _fileMutex.WaitOne();
- try
- {
- _azureDirectory = azuredirectory;
- _blobContainer = azuredirectory.BlobContainer;
- _blob = blob;
+ _fileMutex = BlobMutexManager.GrabMutex(_name);
+ _fileMutex.WaitOne();
+ try
+ {
+ _azureDirectory = azuredirectory;
+ _blobContainer = azuredirectory.BlobContainer;
+ _blob = blob;
- var fileName = _name;
+ var fileName = _name;
- var fFileNeeded = false;
- if (!CacheDirectory.FileExists(fileName))
- {
- fFileNeeded = true;
- }
- else
- {
- long cachedLength = CacheDirectory.FileLength(fileName);
- string blobLengthMetadata;
- bool hasMetadataValue = blob.Metadata.TryGetValue("CachedLength", out blobLengthMetadata);
- long blobLength = blob.Properties.Length;
- if (hasMetadataValue) long.TryParse(blobLengthMetadata, out blobLength);
+ var fFileNeeded = false;
+ if (!CacheDirectory.FileExists(fileName))
+ {
+ fFileNeeded = true;
+ }
+ else
+ {
+ long cachedLength = CacheDirectory.FileLength(fileName);
+ string blobLengthMetadata;
+ var properties = blob.GetProperties().Value;
+ bool hasMetadataValue = properties.Metadata.TryGetValue("CachedLength", out blobLengthMetadata);
+ long blobLength = properties.ContentLength;
+ if (hasMetadataValue) long.TryParse(blobLengthMetadata, out blobLength);
- string blobLastModifiedMetadata;
- long longLastModified = 0;
- DateTime blobLastModifiedUTC = blob.Properties.LastModified.Value.UtcDateTime;
- if (blob.Metadata.TryGetValue("CachedLastModified", out blobLastModifiedMetadata))
- {
- if (long.TryParse(blobLastModifiedMetadata, out longLastModified))
- blobLastModifiedUTC = DateTime.FromFileTimeUtc(longLastModified);
- //blobLastModifiedUTC = new DateTime(longLastModified).ToUniversalTime();
- }
+ string blobLastModifiedMetadata;
+ long longLastModified = 0;
+ DateTime blobLastModifiedUTC = properties.LastModified.UtcDateTime;
+ if (properties.Metadata.TryGetValue("CachedLastModified", out blobLastModifiedMetadata))
+ {
+ if (long.TryParse(blobLastModifiedMetadata, out longLastModified))
+ blobLastModifiedUTC = DateTime.FromFileTimeUtc(longLastModified);
+ //blobLastModifiedUTC = new DateTime(longLastModified).ToUniversalTime();
+ }
- if (cachedLength != blobLength)
- fFileNeeded = true;
- else
- {
+ if (cachedLength != blobLength)
+ fFileNeeded = true;
+ else
+ {
- // cachedLastModifiedUTC was not ouputting with a date (just time) and the time was always off
- var filePath = Path.Combine(_azureDirectory.CatalogPath, fileName);
- var lastModified = File.GetLastWriteTimeUtc(filePath);
- //long unixDate = CacheDirectory.FileModified(fileName);
- //DateTime start = new DateTime(1970, 1, 1, 0, 0, 0, DateTimeKind.Utc);
- //var cachedLastModifiedUTC = start.AddMilliseconds(unixDate).ToUniversalTime();
- var cachedLastModifiedUTC = lastModified;
+ // cachedLastModifiedUTC was not ouputting with a date (just time) and the time was always off
+ var filePath = Path.Combine(_azureDirectory.CatalogPath, fileName);
+ var lastModified = File.GetLastWriteTimeUtc(filePath);
+ //long unixDate = CacheDirectory.FileModified(fileName);
+ //DateTime start = new DateTime(1970, 1, 1, 0, 0, 0, DateTimeKind.Utc);
+ //var cachedLastModifiedUTC = start.AddMilliseconds(unixDate).ToUniversalTime();
+ var cachedLastModifiedUTC = lastModified;
- if (cachedLastModifiedUTC != blobLastModifiedUTC)
- {
- var timeSpan = blobLastModifiedUTC.Subtract(cachedLastModifiedUTC);
- if (timeSpan.TotalSeconds > 1)
- fFileNeeded = true;
- else
- {
+ if (cachedLastModifiedUTC != blobLastModifiedUTC)
+ {
+ var timeSpan = blobLastModifiedUTC.Subtract(cachedLastModifiedUTC);
+ if (timeSpan.TotalSeconds > 1)
+ fFileNeeded = true;
+ else
+ {
#if FULLDEBUG
Debug.WriteLine(timeSpan.TotalSeconds);
#endif
- // file not needed
- }
- }
- }
- }
+ // file not needed
+ }
+ }
+ }
+ }
- // if the file does not exist
- // or if it exists and it is older then the lastmodified time in the blobproperties (which always comes from the blob storage)
- if (fFileNeeded)
- {
- if (_azureDirectory.ShouldCompressFile(_name))
- {
- InflateStream(fileName);
- }
- else
- {
- using (var fileStream = _azureDirectory.CreateCachedOutputAsStream(fileName))
- {
- // get the blob
- _blob.DownloadToStream(fileStream);
+ // if the file does not exist
+ // or if it exists and it is older then the lastmodified time in the blobproperties (which always comes from the blob storage)
+ if (fFileNeeded)
+ {
+ if (_azureDirectory.ShouldCompressFile(_name))
+ {
+ InflateStream(fileName);
+ }
+ else
+ {
+ using (var fileStream = _azureDirectory.CreateCachedOutputAsStream(fileName))
+ {
+ // get the blob
+ _blob.DownloadTo(fileStream);
- fileStream.Flush();
- Debug.WriteLine(string.Format("GET {0} RETREIVED {1} bytes", _name, fileStream.Length));
- }
- }
+ fileStream.Flush();
+ Debug.WriteLine(string.Format("GET {0} RETREIVED {1} bytes", _name, fileStream.Length));
+ }
+ }
- // and open it as an input
- _indexInput = CacheDirectory.OpenInput(fileName, IOContext.DEFAULT);
- }
- else
- {
+ // and open it as an input
+ _indexInput = CacheDirectory.OpenInput(fileName, IOContext.DEFAULT);
+ }
+ else
+ {
#if FULLDEBUG
Debug.WriteLine(String.Format("Using cached file for {0}", _name));
#endif
- // open the file in read only mode
- _indexInput = CacheDirectory.OpenInput(fileName, IOContext.DEFAULT);
+ // open the file in read only mode
+ _indexInput = CacheDirectory.OpenInput(fileName, IOContext.DEFAULT);
+ }
+ }
+ finally
+ {
+ _fileMutex.ReleaseMutex();
+ }
}
- }
- finally
- {
- _fileMutex.ReleaseMutex();
- }
- }
- private void InflateStream(string fileName)
- {
- // then we will get it fresh into local deflatedName
- // StreamOutput deflatedStream = new StreamOutput(CacheDirectory.CreateOutput(deflatedName));
- using (var deflatedStream = new MemoryStream())
- {
- // get the deflated blob
- _blob.DownloadToStream(deflatedStream);
+ private void InflateStream(string fileName)
+ {
+ // then we will get it fresh into local deflatedName
+ // StreamOutput deflatedStream = new StreamOutput(CacheDirectory.CreateOutput(deflatedName));
+ using (var deflatedStream = new MemoryStream())
+ {
+ // get the deflated blob
+ _blob.DownloadTo(deflatedStream);
- Debug.WriteLine(string.Format("GET {0} RETREIVED {1} bytes", _name, deflatedStream.Length));
+ Debug.WriteLine(string.Format("GET {0} RETREIVED {1} bytes", _name, deflatedStream.Length));
- // seek back to begininng
- deflatedStream.Seek(0, SeekOrigin.Begin);
+ // seek back to begininng
+ deflatedStream.Seek(0, SeekOrigin.Begin);
- // open output file for uncompressed contents
- using (var fileStream = _azureDirectory.CreateCachedOutputAsStream(fileName))
- using (var decompressor = new DeflateStream(deflatedStream, CompressionMode.Decompress))
- {
- var bytes = new byte[65535];
- var nRead = 0;
- do
- {
- nRead = decompressor.Read(bytes, 0, 65535);
- if (nRead > 0)
- fileStream.Write(bytes, 0, nRead);
- } while (nRead == 65535);
+ // open output file for uncompressed contents
+ using (var fileStream = _azureDirectory.CreateCachedOutputAsStream(fileName))
+ using (var decompressor = new DeflateStream(deflatedStream, CompressionMode.Decompress))
+ {
+ var bytes = new byte[65535];
+ var nRead = 0;
+ do
+ {
+ nRead = decompressor.Read(bytes, 0, 65535);
+ if (nRead > 0)
+ fileStream.Write(bytes, 0, nRead);
+ } while (nRead == 65535);
+ }
+ }
}
- }
- }
- public AzureIndexInput(AzureIndexInput cloneInput, string resourceDescription) : base(resourceDescription)
- {
- _fileMutex = BlobMutexManager.GrabMutex(cloneInput._name);
- _fileMutex.WaitOne();
+ public AzureIndexInput(AzureIndexInput cloneInput, string resourceDescription) : base(resourceDescription)
+ {
+ _fileMutex = BlobMutexManager.GrabMutex(cloneInput._name);
+ _fileMutex.WaitOne();
- try
- {
+ try
+ {
#if FULLDEBUG
Debug.WriteLine(String.Format("Creating clone for {0}", cloneInput._name));
#endif
- _azureDirectory = cloneInput._azureDirectory;
- _blobContainer = cloneInput._blobContainer;
- _blob = cloneInput._blob;
- _indexInput = cloneInput._indexInput.Clone() as IndexInput;
- }
- catch (Exception)
- {
- // sometimes we get access denied on the 2nd stream...but not always. I haven't tracked it down yet
- // but this covers our tail until I do
- Debug.WriteLine(String.Format("Dagnabbit, falling back to memory clone for {0}", cloneInput._name));
- }
- finally
- {
- _fileMutex.ReleaseMutex();
- }
- }
+ _azureDirectory = cloneInput._azureDirectory;
+ _blobContainer = cloneInput._blobContainer;
+ _blob = cloneInput._blob;
+ _indexInput = cloneInput._indexInput.Clone() as IndexInput;
+ }
+ catch (Exception)
+ {
+ // sometimes we get access denied on the 2nd stream...but not always. I haven't tracked it down yet
+ // but this covers our tail until I do
+ Debug.WriteLine(string.Format("Dagnabbit, falling back to memory clone for {0}", cloneInput._name));
+ }
+ finally
+ {
+ _fileMutex.ReleaseMutex();
+ }
+ }
- public override byte ReadByte()
- {
- return _indexInput.ReadByte();
- }
+ public override byte ReadByte()
+ {
+ return _indexInput.ReadByte();
+ }
- public override void ReadBytes(byte[] b, int offset, int len)
- {
- _indexInput.ReadBytes(b, offset, len);
- }
+ public override void ReadBytes(byte[] b, int offset, int len)
+ {
+ _indexInput.ReadBytes(b, offset, len);
+ }
- //public override long FilePointer
- //{
- // get
- // {
- // return _indexInput.FilePointer;
- // }
- //}
+ //public override long FilePointer
+ //{
+ // get
+ // {
+ // return _indexInput.FilePointer;
+ // }
+ //}
- public override void Seek(long pos)
- {
- _indexInput.Seek(pos);
- }
+ public override void Seek(long pos)
+ {
+ _indexInput.Seek(pos);
+ }
- protected override void Dispose(bool disposing)
- {
- _fileMutex.WaitOne();
- try
- {
+ protected override void Dispose(bool disposing)
+ {
+ _fileMutex.WaitOne();
+ try
+ {
#if FULLDEBUG
Debug.WriteLine(String.Format("CLOSED READSTREAM local {0}", _name));
#endif
- _indexInput.Dispose();
- _indexInput = null;
- _azureDirectory = null;
- _blobContainer = null;
- _blob = null;
- GC.SuppressFinalize(this);
- }
- finally
- {
- _fileMutex.ReleaseMutex();
- }
- }
+ _indexInput.Dispose();
+ _indexInput = null;
+ _azureDirectory = null;
+ _blobContainer = null;
+ _blob = null;
+ GC.SuppressFinalize(this);
+ }
+ finally
+ {
+ _fileMutex.ReleaseMutex();
+ }
+ }
- //public override long Length()
- //{
- // return _indexInput.Length();
- //}
- public override long Length => _indexInput.Length;
+ public override long Length => _indexInput.Length;
- public override System.Object Clone()
- {
- IndexInput clone = null;
- try
- {
- _fileMutex.WaitOne();
- AzureIndexInput input = new AzureIndexInput(this, "clone");
- clone = (IndexInput)input;
- }
- catch (System.Exception err)
- {
- Debug.WriteLine(err.ToString());
- }
- finally
- {
- _fileMutex.ReleaseMutex();
- }
- Debug.Assert(clone != null);
- return clone;
- }
+ public override object Clone()
+ {
+ IndexInput clone = null;
+ try
+ {
+ _fileMutex.WaitOne();
+ clone = new AzureIndexInput(this, "clone");
+ }
+ catch (Exception err)
+ {
+ Debug.WriteLine(err.ToString());
+ }
+ finally
+ {
+ _fileMutex.ReleaseMutex();
+ }
+ Debug.Assert(clone != null);
+ return clone;
+ }
- public override long GetFilePointer()
- {
- return _indexInput.GetFilePointer();
+ public override long GetFilePointer()
+ {
+ return _indexInput.GetFilePointer();
+ }
}
- }
}
diff --git a/AzureDirectory/AzureIndexOutput.cs b/AzureDirectory/AzureIndexOutput.cs
index c1ba5be..0495dff 100644
--- a/AzureDirectory/AzureIndexOutput.cs
+++ b/AzureDirectory/AzureIndexOutput.cs
@@ -1,196 +1,158 @@
-using Lucene.Net.Support;
-using Microsoft.Azure.Storage.Blob;
-//using Microsoft.WindowsAzure.Storage.Blob;
+//using Lucene.Net.Support;
using System;
using System.Diagnostics;
using System.IO;
using System.IO.Compression;
using System.Threading;
-
+//using Azure.Storage.Blobs;
+using System.Collections.Generic;
+using Azure.Storage.Blobs.Specialized;
namespace Lucene.Net.Store.Azure
{
- ///
- /// Implements IndexOutput semantics for a write/append only file
- ///
- public class AzureIndexOutput : IndexOutput
- {
- private AzureDirectory _azureDirectory;
- private CloudBlobContainer _blobContainer;
- private string _name;
- private IndexOutput _indexOutput;
- private Mutex _fileMutex;
- private ICloudBlob _blob;
- private readonly CRC32 _crc;
- public Lucene.Net.Store.Directory CacheDirectory { get { return _azureDirectory.CacheDirectory; } }
-
- public AzureIndexOutput(AzureDirectory azureDirectory, ICloudBlob blob)
- {
- _crc = new CRC32();
-
- _name = blob.Uri.Segments[blob.Uri.Segments.Length - 1];
-
- _fileMutex = BlobMutexManager.GrabMutex(_name);
- _fileMutex.WaitOne();
- try
- {
- _azureDirectory = azureDirectory;
- _blobContainer = _azureDirectory.BlobContainer;
- _blob = blob;
-
- // create the local cache one we will operate against...
- _indexOutput = CacheDirectory.CreateOutput(_name, IOContext.DEFAULT);
- }
- finally
- {
- _fileMutex.ReleaseMutex();
- }
- }
-
- public override void Flush()
+ ///
+ /// Implements IndexOutput semantics for a write/append only file
+ ///
+ public class AzureIndexOutput : IndexOutput
{
- _indexOutput.Flush();
- }
-
- protected override void Dispose(bool disposing)
- {
- _fileMutex.WaitOne();
- try
- {
- string fileName = _name;
-
- // make sure it's all written out
- _indexOutput.Flush();
-
- long originalLength = _indexOutput.Length;
- _indexOutput.Dispose();
-
- Stream blobStream;
-
- // optionally put a compressor around the blob stream
- if (_azureDirectory.ShouldCompressFile(_name))
+ private AzureDirectory _azureDirectory;
+ private string _name;
+ private IndexOutput _indexOutput;
+ private Mutex _fileMutex;
+ private BlockBlobClient _blob;
+ public Directory CacheDirectory { get { return _azureDirectory.CacheDirectory; } }
+
+ public AzureIndexOutput(AzureDirectory azureDirectory, BlockBlobClient blob)
{
- blobStream = CompressStream(fileName, originalLength);
- }
- else
- {
- blobStream = new StreamInput(CacheDirectory.OpenInput(fileName, IOContext.DEFAULT));
+ _name = blob.Uri.Segments[blob.Uri.Segments.Length - 1];
+
+ _fileMutex = BlobMutexManager.GrabMutex(_name);
+ _fileMutex.WaitOne();
+ try
+ {
+ _azureDirectory = azureDirectory;
+ _blob = blob;
+
+ // create the local cache one we will operate against...
+ _indexOutput = CacheDirectory.CreateOutput(_name, IOContext.DEFAULT);
+ }
+ finally
+ {
+ _fileMutex.ReleaseMutex();
+ }
}
- try
+ public override void Flush()
{
- // push the blobStream up to the cloud
- _blob.UploadFromStream(blobStream);
-
- // set the metadata with the original index file properties
- _blob.Metadata["CachedLength"] = originalLength.ToString();
-
- var filePath = Path.Combine(_azureDirectory.CatalogPath, fileName);
- var lastModified = File.GetLastWriteTimeUtc(filePath);
- long fileTimeUtc = lastModified.ToFileTimeUtc();
-
- //_blob.Metadata["CachedLastModified"] = CacheDirectory.FileModified(fileName).ToString();
- _blob.Metadata["CachedLastModified"] = fileTimeUtc.ToString();
- _blob.SetMetadata();
-
- Debug.WriteLine(string.Format("PUT {1} bytes to {0} in cloud", _name, blobStream.Length));
+ _indexOutput.Flush();
}
- finally
+
+ protected override void Dispose(bool disposing)
{
- blobStream.Dispose();
- }
+ _fileMutex.WaitOne();
+ try
+ {
+ string fileName = _name;
+
+ // make sure it's all written out
+ _indexOutput.Flush();
+
+ long originalLength = _indexOutput.Length;
+ _indexOutput.Dispose();
+
+ Stream blobStream;
+
+ // optionally put a compressor around the blob stream
+ if (_azureDirectory.ShouldCompressFile(_name))
+ {
+ blobStream = CompressStream(fileName, originalLength);
+ }
+ else
+ {
+ blobStream = new StreamInput(CacheDirectory.OpenInput(fileName, IOContext.DEFAULT));
+ }
+
+ try
+ {
+ // push the blobStream up to the cloud
+ _blob.Upload(blobStream);
+
+ // set the metadata with the original index file properties
+ //_blob.Metadata["CachedLength"] = originalLength.ToString();
+ var metadataUpdates = new Dictionary();
+ metadataUpdates.Add("CachedLength", originalLength.ToString());
+
+ var filePath = Path.Combine(_azureDirectory.CatalogPath, fileName);
+ var lastModified = File.GetLastWriteTimeUtc(filePath);
+ long fileTimeUtc = lastModified.ToFileTimeUtc();
+
+ //_blob.Metadata["CachedLastModified"] = CacheDirectory.FileModified(fileName).ToString();
+ metadataUpdates.Add("CachedLastModified", fileTimeUtc.ToString());
+ _blob.SetMetadata(metadataUpdates);
+
+ Debug.WriteLine(string.Format("PUT {1} bytes to {0} in cloud", _name, blobStream.Length));
+ }
+ finally
+ {
+ blobStream.Dispose();
+ }
#if FULLDEBUG
Debug.WriteLine(string.Format("CLOSED WRITESTREAM {0}", _name));
#endif
- // clean up
- _indexOutput = null;
- _blobContainer = null;
- _blob = null;
- GC.SuppressFinalize(this);
- }
- finally
- {
- _fileMutex.ReleaseMutex();
- }
- }
+ // clean up
+ _indexOutput = null;
+ _blob = null;
+ GC.SuppressFinalize(this);
+ }
+ finally
+ {
+ _fileMutex.ReleaseMutex();
+ }
+ }
- private MemoryStream CompressStream(string fileName, long originalLength)
- {
- // unfortunately, deflate stream doesn't allow seek, and we need a seekable stream
- // to pass to the blob storage stuff, so we compress into a memory stream
- MemoryStream compressedStream = new MemoryStream();
-
- try
- {
- using (var indexInput = CacheDirectory.OpenInput(fileName, IOContext.DEFAULT))
- using (var compressor = new DeflateStream(compressedStream, CompressionMode.Compress, true))
+ private MemoryStream CompressStream(string fileName, long originalLength)
{
- // compress to compressedOutputStream
- byte[] bytes = new byte[indexInput.Length];
- indexInput.ReadBytes(bytes, 0, (int)bytes.Length);
- compressor.Write(bytes, 0, (int)bytes.Length);
+ // unfortunately, deflate stream doesn't allow seek, and we need a seekable stream
+ // to pass to the blob storage stuff, so we compress into a memory stream
+ MemoryStream compressedStream = new MemoryStream();
+
+ try
+ {
+ using (var indexInput = CacheDirectory.OpenInput(fileName, IOContext.DEFAULT))
+ using (var compressor = new DeflateStream(compressedStream, CompressionMode.Compress, true))
+ {
+ // compress to compressedOutputStream
+ byte[] bytes = new byte[indexInput.Length];
+ indexInput.ReadBytes(bytes, 0, bytes.Length);
+ compressor.Write(bytes, 0, bytes.Length);
+ }
+
+ // seek back to beginning of comrpessed stream
+ compressedStream.Seek(0, SeekOrigin.Begin);
+
+ Debug.WriteLine(string.Format("COMPRESSED {0} -> {1} {2}% to {3}",
+ originalLength,
+ compressedStream.Length,
+ ((float)compressedStream.Length / (float)originalLength) * 100,
+ _name));
+ }
+ catch
+ {
+ // release the compressed stream resources if an error occurs
+ compressedStream.Dispose();
+ throw;
+ }
+ return compressedStream;
}
- // seek back to beginning of comrpessed stream
- compressedStream.Seek(0, SeekOrigin.Begin);
-
- Debug.WriteLine(string.Format("COMPRESSED {0} -> {1} {2}% to {3}",
- originalLength,
- compressedStream.Length,
- ((float)compressedStream.Length / (float)originalLength) * 100,
- _name));
- }
- catch
- {
- // release the compressed stream resources if an error occurs
- compressedStream.Dispose();
- throw;
- }
- return compressedStream;
- }
-
- public override long Length
- {
- get
- {
- return _indexOutput.Length;
- }
- }
-
- public override void WriteByte(byte b)
- {
- _indexOutput.WriteByte(b);
- }
-
- public override void WriteBytes(byte[] b, int length)
- {
- _indexOutput.WriteBytes(b, length);
- }
-
- public override void WriteBytes(byte[] b, int offset, int length)
- {
- _indexOutput.WriteBytes(b, offset, length);
- }
-
- public /*override*/ long FilePointer
- {
- get
- {
- return _indexOutput.GetFilePointer();
- }
- }
-
- public override long Checksum => _indexOutput.Checksum;
-
- public override void Seek(long pos)
- {
- _indexOutput.Seek(pos);
- }
-
- public override long GetFilePointer()
- {
- return _indexOutput.GetFilePointer();
+ public override long Length => _indexOutput.Length;
+ public override void WriteByte(byte b) => _indexOutput.WriteByte(b);
+ public override void WriteBytes(byte[] b, int length) => _indexOutput.WriteBytes(b, length);
+ public override void WriteBytes(byte[] b, int offset, int length) => _indexOutput.WriteBytes(b, offset, length);
+ public /*override*/ long FilePointer => _indexOutput.GetFilePointer();
+ public override long Checksum => _indexOutput.Checksum;
+ public override void Seek(long pos) => _indexOutput.Seek(pos);
+ public override long GetFilePointer() => _indexOutput.GetFilePointer();
}
- }
}
diff --git a/AzureDirectory/AzureLock.cs b/AzureDirectory/AzureLock.cs
index e129ed1..3861e2c 100644
--- a/AzureDirectory/AzureLock.cs
+++ b/AzureDirectory/AzureLock.cs
@@ -1,7 +1,8 @@
//using Microsoft.WindowsAzure.Storage;
//using Microsoft.WindowsAzure.Storage.Blob;
-using Microsoft.Azure.Storage;
-using Microsoft.Azure.Storage.Blob;
+using Azure;
+using Azure.Storage.Blobs;
+using Azure.Storage.Blobs.Specialized;
using System;
using System.Diagnostics;
using System.IO;
@@ -9,158 +10,154 @@
namespace Lucene.Net.Store.Azure
{
- ///
- /// Implements lock semantics on AzureDirectory via a blob lease
- ///
- public class AzureLock : Lock
- {
- private string _lockFile;
- private AzureDirectory _azureDirectory;
- private string _leaseid;
-
- public AzureLock(string lockFile, AzureDirectory directory)
+ ///
+ /// Implements lock semantics on AzureDirectory via a blob lease
+ ///
+ public class AzureLock : Lock
{
- _lockFile = lockFile;
- _azureDirectory = directory;
- }
+ private string _lockFile;
+ private AzureDirectory _azureDirectory;
+ private string _leaseid;
- #region Lock methods
- override public bool IsLocked()
- {
- var blob = _azureDirectory.BlobContainer.GetBlockBlobReference(_lockFile);
- try
- {
- Debug.Print("IsLocked() : {0}", _leaseid);
- if (String.IsNullOrEmpty(_leaseid))
+ public AzureLock(string lockFile, AzureDirectory directory)
{
- var tempLease = blob.AcquireLease(TimeSpan.FromSeconds(60), _leaseid);
- if (String.IsNullOrEmpty(tempLease))
- {
- Debug.Print("IsLocked() : TRUE");
- return true;
- }
- blob.ReleaseLease(new AccessCondition() { LeaseId = tempLease });
+ _lockFile = lockFile;
+ _azureDirectory = directory;
}
- Debug.Print("IsLocked() : {0}", _leaseid);
- return !string.IsNullOrEmpty(_leaseid);
- }
- catch (StorageException webErr)
- {
- if (_handleWebException(blob, webErr))
- return IsLocked();
- }
- _leaseid = null;
- return false;
- }
- public override bool Obtain()
- {
- var blob = _azureDirectory.BlobContainer.GetBlockBlobReference(_lockFile);
- try
- {
- Debug.Print("AzureLock:Obtain({0}) : {1}", _lockFile, _leaseid);
- if (String.IsNullOrEmpty(_leaseid))
+ #region Lock methods
+ override public bool IsLocked()
{
- _leaseid = blob.AcquireLease(TimeSpan.FromSeconds(60), _leaseid);
- Debug.Print("AzureLock:Obtain({0}): AcquireLease : {1}", _lockFile, _leaseid);
+ var blob = _azureDirectory.BlobContainer.GetBlockBlobClient(_lockFile);
+ try
+ {
+ Debug.Print("IsLocked() : {0}", _leaseid);
+ if (string.IsNullOrEmpty(_leaseid))
+ {
+ var leaseClient = blob.GetBlobLeaseClient(_leaseid);
+ var tempLease = leaseClient.Acquire(TimeSpan.FromSeconds(60)).Value;
+ if (string.IsNullOrEmpty(tempLease?.LeaseId))
+ {
+ Debug.Print("IsLocked() : TRUE");
+ return true;
+ }
+ leaseClient.Release();
+ }
+ Debug.Print("IsLocked() : {0}", _leaseid);
+ return !string.IsNullOrEmpty(_leaseid);
+ }
+ catch (RequestFailedException webErr)
+ {
+ if (_handleWebException(blob, webErr))
+ return IsLocked();
+ }
+ _leaseid = null;
+ return false;
+ }
- // keep the lease alive by renewing every 30 seconds
- long interval = (long)TimeSpan.FromSeconds(30).TotalMilliseconds;
- _renewTimer = new Timer((obj) =>
- {
- try
+ public override bool Obtain()
+ {
+ var blob = _azureDirectory.BlobContainer.GetBlockBlobClient(_lockFile);
+ try
+ {
+ Debug.Print("AzureLock:Obtain({0}) : {1}", _lockFile, _leaseid);
+ if (string.IsNullOrEmpty(_leaseid))
{
- AzureLock al = (AzureLock)obj;
- al.Renew();
+ _leaseid = blob.GetBlobLeaseClient(_leaseid).Acquire(TimeSpan.FromSeconds(60)).Value.LeaseId;
+ Debug.Print("AzureLock:Obtain({0}): AcquireLease : {1}", _lockFile, _leaseid);
+
+ // keep the lease alive by renewing every 30 seconds
+ long interval = (long)TimeSpan.FromSeconds(30).TotalMilliseconds;
+ _renewTimer = new Timer((obj) =>
+ {
+ try
+ {
+ AzureLock al = (AzureLock)obj;
+ al.Renew();
+ }
+ catch (Exception err) { Debug.Print(err.ToString()); }
+ }, this, interval, interval);
}
- catch (Exception err) { Debug.Print(err.ToString()); }
- }, this, interval, interval);
+ return !string.IsNullOrEmpty(_leaseid);
+ }
+ catch (RequestFailedException webErr)
+ {
+ if (_handleWebException(blob, webErr))
+ return Obtain();
+ }
+ return false;
}
- return !String.IsNullOrEmpty(_leaseid);
- }
- catch (StorageException webErr)
- {
- if (_handleWebException(blob, webErr))
- return Obtain();
- }
- return false;
- }
- private Timer _renewTimer;
+ private Timer _renewTimer;
- public void Renew()
- {
- if (!String.IsNullOrEmpty(_leaseid))
- {
- Debug.Print("AzureLock:Renew({0} : {1}", _lockFile, _leaseid);
- var blob = _azureDirectory.BlobContainer.GetBlockBlobReference(_lockFile);
- blob.RenewLease(new AccessCondition { LeaseId = _leaseid });
- }
- }
- protected override void Dispose(bool disposing)
- {
- Release();
- }
- public /*override*/ void Release()
- {
- Debug.Print("AzureLock:Release({0}) {1}", _lockFile, _leaseid);
- if (!String.IsNullOrEmpty(_leaseid))
- {
- var blob = _azureDirectory.BlobContainer.GetBlockBlobReference(_lockFile);
- blob.ReleaseLease(new AccessCondition { LeaseId = _leaseid });
- if (_renewTimer != null)
+ public void Renew()
{
- _renewTimer.Dispose();
- _renewTimer = null;
+ if (!string.IsNullOrEmpty(_leaseid))
+ {
+ Debug.Print("AzureLock:Renew({0} : {1}", _lockFile, _leaseid);
+ var blob = _azureDirectory.BlobContainer.GetBlockBlobClient(_lockFile);
+ blob.GetBlobLeaseClient(_leaseid).Renew();
+ }
}
- _leaseid = null;
- }
- }
- #endregion
+ protected override void Dispose(bool disposing)
+ {
+ Release();
+ }
+ public /*override*/ void Release()
+ {
+ Debug.Print("AzureLock:Release({0}) {1}", _lockFile, _leaseid);
+ if (!string.IsNullOrEmpty(_leaseid))
+ {
+ var blob = _azureDirectory.BlobContainer.GetBlockBlobClient(_lockFile);
+ blob.GetBlobLeaseClient(_leaseid).Release();
+ if (_renewTimer != null)
+ {
+ _renewTimer.Dispose();
+ _renewTimer = null;
+ }
+ _leaseid = null;
+ }
+ }
+ #endregion
- public void BreakLock()
- {
- Debug.Print("AzureLock:BreakLock({0}) {1}", _lockFile, _leaseid);
- var blob = _azureDirectory.BlobContainer.GetBlockBlobReference(_lockFile);
- try
- {
- blob.BreakLease();
- }
- catch (Exception)
- {
- }
- _leaseid = null;
- }
+ public void BreakLock()
+ {
+ Debug.Print("AzureLock:BreakLock({0}) {1}", _lockFile, _leaseid);
+ var blob = _azureDirectory.BlobContainer.GetBlockBlobClient(_lockFile);
+ try
+ {
+ blob.GetBlobLeaseClient(_leaseid).Break();
+ }
+ catch (Exception)
+ {
+ }
+ _leaseid = null;
+ }
- public override System.String ToString()
- {
- return String.Format("AzureLock@{0}.{1}", _lockFile, _leaseid);
- }
+ public override string ToString() => string.Format("AzureLock@{0}.{1}", _lockFile, _leaseid);
- private bool _handleWebException(ICloudBlob blob, StorageException err)
- {
- if (err.RequestInformation.HttpStatusCode == 404 || err.RequestInformation.HttpStatusCode == 409)
- {
- _azureDirectory.CreateContainer();
- using (var stream = new MemoryStream())
- using (var writer = new StreamWriter(stream))
+ private bool _handleWebException(BlockBlobClient blob, RequestFailedException err)
{
- writer.Write(_lockFile);
- try
- {
- blob.UploadFromStream(stream);
- }
- catch (Exception ex)
- {
- return false;
- }
+ if (err.Status == 404 || err.Status == 409)
+ {
+ _azureDirectory.CreateContainer();
+ using (var stream = new MemoryStream())
+ using (var writer = new StreamWriter(stream))
+ {
+ writer.Write(_lockFile);
+ try
+ {
+ blob.Upload(stream);
+ }
+ catch (Exception /*ex*/)
+ {
+ return false;
+ }
+ }
+ return true;
+ }
+ return false;
}
- return true;
- }
- return false;
}
-
- }
-
}
diff --git a/AzureDirectory/BlobMutexManager.cs b/AzureDirectory/BlobMutexManager.cs
index b00161c..a3f591b 100644
--- a/AzureDirectory/BlobMutexManager.cs
+++ b/AzureDirectory/BlobMutexManager.cs
@@ -2,28 +2,28 @@
namespace Lucene.Net.Store.Azure
{
- public static class BlobMutexManager
- {
-
- public static Mutex GrabMutex(string name)
+ public static class BlobMutexManager
{
- var mutexName = "luceneSegmentMutex_" + name;
- var notExisting = false;
+ public static Mutex GrabMutex(string name)
+ {
+ var mutexName = "luceneSegmentMutex_" + name;
+
+ var notExisting = false;
- if (Mutex.TryOpenExisting(mutexName, out var mutex))
- {
- return mutex;
- }
+ if (Mutex.TryOpenExisting(mutexName, out var mutex))
+ {
+ return mutex;
+ }
- // Here we know the mutex either doesn't exist or we don't have the necessary permissions.
+ // Here we know the mutex either doesn't exist or we don't have the necessary permissions.
- if (!Mutex.TryOpenExisting(mutexName, out mutex))
- {
- notExisting = true;
- }
+ if (!Mutex.TryOpenExisting(mutexName, out mutex))
+ {
+ notExisting = true;
+ }
- return notExisting ? new Mutex(false, mutexName, out _) : Mutex.OpenExisting(mutexName);
+ return notExisting ? new Mutex(false, mutexName, out _) : Mutex.OpenExisting(mutexName);
+ }
}
- }
}
diff --git a/AzureDirectory/StreamInput.cs b/AzureDirectory/StreamInput.cs
index 6167a93..d2f90c4 100644
--- a/AzureDirectory/StreamInput.cs
+++ b/AzureDirectory/StreamInput.cs
@@ -3,71 +3,71 @@
namespace Lucene.Net.Store.Azure
{
- ///
- /// Stream wrapper around IndexInput
- ///
- public class StreamInput : Stream
- {
- protected IndexInput Input { get; set; }
-
- public StreamInput(IndexInput input)
+ ///
+ /// Stream wrapper around IndexInput
+ ///
+ public class StreamInput : Stream
{
- this.Input = input;
- }
+ protected IndexInput Input { get; set; }
- public override bool CanRead => true;
+ public StreamInput(IndexInput input)
+ {
+ this.Input = input;
+ }
- public override bool CanSeek => true;
+ public override bool CanRead => true;
- public override bool CanWrite => false;
- public override void Flush() { }
- public override long Length => Input.Length;
+ public override bool CanSeek => true;
- public override long Position
- {
- get => Input.GetFilePointer();
- set => Input.Seek(value);
- }
+ public override bool CanWrite => false;
+ public override void Flush() { }
+ public override long Length => Input.Length;
- public override int Read(byte[] buffer, int offset, int count)
- {
+ public override long Position
+ {
+ get => Input.GetFilePointer();
+ set => Input.Seek(value);
+ }
- var pos = Input.GetFilePointer();
- try
- {
- var len = Input.Length;
- if (count > (len - pos))
- count = (int)(len - pos);
- Input.ReadBytes(buffer, offset, count);
- }
- catch (Exception) { }
- return (int)(Input.GetFilePointer() - pos);
- }
+ public override int Read(byte[] buffer, int offset, int count)
+ {
- public override long Seek(long offset, SeekOrigin origin)
- {
- switch (origin)
- {
- case SeekOrigin.Begin:
- Input.Seek(offset);
- break;
- case SeekOrigin.Current:
- Input.Seek(Input.GetFilePointer() + offset);
- break;
- case SeekOrigin.End:
- throw new System.NotImplementedException();
- }
- return Input.GetFilePointer();
- }
+ var pos = Input.GetFilePointer();
+ try
+ {
+ var len = Input.Length;
+ if (count > (len - pos))
+ count = (int)(len - pos);
+ Input.ReadBytes(buffer, offset, count);
+ }
+ catch (Exception) { }
+ return (int)(Input.GetFilePointer() - pos);
+ }
- public override void SetLength(long value) => throw new NotImplementedException();
+ public override long Seek(long offset, SeekOrigin origin)
+ {
+ switch (origin)
+ {
+ case SeekOrigin.Begin:
+ Input.Seek(offset);
+ break;
+ case SeekOrigin.Current:
+ Input.Seek(Input.GetFilePointer() + offset);
+ break;
+ case SeekOrigin.End:
+ throw new System.NotImplementedException();
+ }
+ return Input.GetFilePointer();
+ }
- public override void Write(byte[] buffer, int offset, int count) => throw new NotImplementedException();
+ public override void SetLength(long value) => throw new NotImplementedException();
- public override void Close()
- {
- Input.Dispose();
- base.Close();
+ public override void Write(byte[] buffer, int offset, int count) => throw new NotImplementedException();
+
+ public override void Close()
+ {
+ Input.Dispose();
+ base.Close();
+ }
}
- }
}
diff --git a/AzureDirectory/StreamOutput.cs b/AzureDirectory/StreamOutput.cs
index 5e473c6..15a36f7 100644
--- a/AzureDirectory/StreamOutput.cs
+++ b/AzureDirectory/StreamOutput.cs
@@ -3,64 +3,64 @@
namespace Lucene.Net.Store.Azure
{
- ///
- /// Stream wrapper around an IndexOutput
- ///
- public class StreamOutput : Stream
- {
- protected IndexOutput Output { get; set; }
-
- public StreamOutput(IndexOutput output)
+ ///
+ /// Stream wrapper around an IndexOutput
+ ///
+ public class StreamOutput : Stream
{
- this.Output = output;
- }
+ protected IndexOutput Output { get; set; }
- public override bool CanRead => false;
+ public StreamOutput(IndexOutput output)
+ {
+ this.Output = output;
+ }
- public override bool CanSeek => true;
+ public override bool CanRead => false;
- public override bool CanWrite => true;
+ public override bool CanSeek => true;
- public override void Flush() => this.Output.Flush();
+ public override bool CanWrite => true;
- public override long Length => this.Output.Length;
+ public override void Flush() => this.Output.Flush();
- public override long Position
- {
- get => this.Output.GetFilePointer();
- set => this.Output.Seek(value);
- }
+ public override long Length => this.Output.Length;
- public override int Read(byte[] buffer, int offset, int count)
- {
- throw new NotImplementedException();
- }
+ public override long Position
+ {
+ get => this.Output.GetFilePointer();
+ set => this.Output.Seek(value);
+ }
- public override long Seek(long offset, SeekOrigin origin)
- {
- switch (origin)
- {
- case SeekOrigin.Begin:
- this.Output.Seek(offset);
- break;
- case SeekOrigin.Current:
- this.Output.Seek(Output.GetFilePointer() + offset);
- break;
- case SeekOrigin.End:
- throw new NotImplementedException();
- }
- return Output.GetFilePointer();
- }
+ public override int Read(byte[] buffer, int offset, int count)
+ {
+ throw new NotImplementedException();
+ }
- public override void SetLength(long value) => throw new NotImplementedException();
+ public override long Seek(long offset, SeekOrigin origin)
+ {
+ switch (origin)
+ {
+ case SeekOrigin.Begin:
+ this.Output.Seek(offset);
+ break;
+ case SeekOrigin.Current:
+ this.Output.Seek(Output.GetFilePointer() + offset);
+ break;
+ case SeekOrigin.End:
+ throw new NotImplementedException();
+ }
+ return Output.GetFilePointer();
+ }
- public override void Write(byte[] buffer, int offset, int count) => this.Output.WriteBytes(buffer, offset, count);
+ public override void SetLength(long value) => throw new NotImplementedException();
- public override void Close()
- {
- Output.Flush();
- Output.Dispose();
- base.Close();
+ public override void Write(byte[] buffer, int offset, int count) => this.Output.WriteBytes(buffer, offset, count);
+
+ public override void Close()
+ {
+ Output.Flush();
+ Output.Dispose();
+ base.Close();
+ }
}
- }
}
diff --git a/readme.md b/readme.md
index 5796509..39fb03f 100644
--- a/readme.md
+++ b/readme.md
@@ -5,6 +5,8 @@
A fork of this project: https://azuredirectory.codeplex.com/
Updated to work with Lucene 3.0.3, and version 2.1.0.3 of the Azure Storage client.
+This branch has been updated to use Azure.Storage.Blobs (V12) instead of older Microsoft.Azure.Storage.Blob (V11) libraries, and Lucene v4.8.0-beta00012.
+
## Project description
Lucene.Net is a robust open source search technology which has an abstract interface called a Directory for defining how the index is stored. AzureDirectory is an implementation of that interface for Windows Azure Blob Storage.