diff --git a/CHANGELOG.md b/CHANGELOG.md index 1b85f876c..6337b242c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,9 @@ ## Unreleased +### Features +- Allow setting the cache memory portion and maximum for tilesources (#601) + ### Improvements - Cache histogram requests (#598) diff --git a/large_image/cache_util/cache.py b/large_image/cache_util/cache.py index b2b9c200e..4ee223da5 100644 --- a/large_image/cache_util/cache.py +++ b/large_image/cache_util/cache.py @@ -137,7 +137,8 @@ def __new__(metacls, name, bases, namespace, **kwargs): # noqa - N804 CacheProperties[cacheName].get('itemExpectedSize')): maxSize = pickAvailableCache( CacheProperties[cacheName]['itemExpectedSize'], - maxItems=CacheProperties[cacheName]['maxItems']) + maxItems=CacheProperties[cacheName]['maxItems'], + cacheName=cacheName) maxSize = namespace.pop('cacheMaxSize', maxSize) maxSize = kwargs.get('cacheMaxSize', maxSize) if maxSize is None: @@ -154,10 +155,10 @@ def __new__(metacls, name, bases, namespace, **kwargs): # noqa - N804 cacheName = cls if LruCacheMetaclass.namedCaches.get(cacheName) is None: - cache, cacheLock = CacheFactory().getCache(maxSize) + cache, cacheLock = CacheFactory().getCache(maxSize, cacheName=cacheName) LruCacheMetaclass.namedCaches[cacheName] = (cache, cacheLock) config.getConfig('logger').info( - 'Created LRU Cache for %r with %d maximum size' % (cacheName, maxSize)) + 'Created LRU Cache for %r with %d maximum size' % (cacheName, cache.maxsize)) else: (cache, cacheLock) = LruCacheMetaclass.namedCaches[cacheName] @@ -204,7 +205,7 @@ def getTileCache(): if _tileCache is None: # Decide whether to use Memcached or cachetools - _tileCache, _tileLock = CacheFactory().getCache() + _tileCache, _tileLock = CacheFactory().getCache(cacheName='tileCache') return _tileCache, _tileLock diff --git a/large_image/cache_util/cachefactory.py b/large_image/cache_util/cachefactory.py index d513510d3..fab34a148 100644 --- a/large_image/cache_util/cachefactory.py +++ b/large_image/cache_util/cachefactory.py @@ -30,7 +30,7 @@ MemCache = None -def pickAvailableCache(sizeEach, portion=8, maxItems=None): +def pickAvailableCache(sizeEach, portion=8, maxItems=None, cacheName=None): """ Given an estimated size of an item, return how many of those items would fit in a fixed portion of the available virtual memory. @@ -39,9 +39,17 @@ def pickAvailableCache(sizeEach, portion=8, maxItems=None): :param portion: the inverse fraction of the memory which can be used. :param maxItems: if specified, the number of items is never more than this value. + :param cacheName: if specified, the portion can be affected by the + configuration. :return: the number of items that should be cached. Always at least two, unless maxItems is less. """ + if cacheName: + portion = max(portion, int(config.getConfig( + f'cache_{cacheName}_memory_portion', portion))) + configMaxItems = int(config.getConfig(f'cache_{cacheName}_maximum', 0)) + if configMaxItems > 0: + maxItems = configMaxItems # Estimate usage based on (1 / portion) of the total virtual memory. if psutil: memory = psutil.virtual_memory().total @@ -56,18 +64,28 @@ def pickAvailableCache(sizeEach, portion=8, maxItems=None): class CacheFactory: logged = False - def getCacheSize(self, numItems): + def getCacheSize(self, numItems, cacheName=None): if numItems is None: defaultPortion = 32 try: - portion = int(config.getConfig('cache_python_memory_portion', defaultPortion)) - portion = max(portion, 3) + portion = int(config.getConfig('cache_python_memory_portion', 0)) + if cacheName: + portion = max(portion, int(config.getConfig( + f'cache_{cacheName}_memory_portion', portion))) + portion = max(portion or defaultPortion, 3) except ValueError: portion = defaultPortion numItems = pickAvailableCache(256**2 * 4 * 2, portion) + if cacheName: + try: + maxItems = int(config.getConfig(f'cache_{cacheName}_maximum', 0)) + if maxItems > 0: + numItems = min(numItems, max(maxItems, 3)) + except ValueError: + pass return numItems - def getCache(self, numItems=None): + def getCache(self, numItems=None, cacheName=None): # memcached is the fallback default, if available. cacheBackend = config.getConfig('cache_backend', 'python') if cacheBackend: @@ -96,7 +114,7 @@ def getCache(self, numItems=None): cache = None if cache is None: # fallback backend cacheBackend = 'python' - cache = LRUCache(self.getCacheSize(numItems)) + cache = LRUCache(self.getCacheSize(numItems, cacheName=cacheName)) cacheLock = threading.Lock() if numItems is None and not CacheFactory.logged: config.getConfig('logprint').info('Using %s for large_image caching' % cacheBackend) diff --git a/large_image/config.py b/large_image/config.py index 9b5763f86..c305892b1 100644 --- a/large_image/config.py +++ b/large_image/config.py @@ -11,6 +11,7 @@ 'logger': fallbackLogger, 'logprint': fallbackLogger, + # For tiles 'cache_backend': 'python', # 'python' or 'memcached' # 'python' cache can use 1/(val) of the available memory 'cache_python_memory_portion': 32, @@ -19,6 +20,15 @@ 'cache_memcached_username': None, 'cache_memcached_password': None, + # Generally, these keys are the form of "cache__" + + # For tilesources. These are also limited by available file handles. + # 'python' cache can use 1/(val) of the available memory based on a very + # rough estimate of the amount of memory used by a tilesource + 'cache_tilesource_memory_portion': 8, + # If >0, this is the maximum number of tilesources that will be cached + 'cache_tilesource_maximum': 0, + 'max_small_image_size': 4096, }