diff --git a/west.yml b/west.yml index ec3d37c0fe42..5046cf4c4e51 100644 --- a/west.yml +++ b/west.yml @@ -45,7 +45,7 @@ manifest: - name: zephyr repo-path: zephyr - revision: 77e193eb03f4237e5c37d4905d703432e659085c + revision: 8c4eec7ac6e37be89af89e021c6f5c96e1ac1e0a remote: zephyrproject # Import some projects listed in zephyr/west.yml@revision diff --git a/zephyr/lib/regions_mm.c b/zephyr/lib/regions_mm.c index cbf94e7f8fc8..8fa3f7b0d7b4 100644 --- a/zephyr/lib/regions_mm.c +++ b/zephyr/lib/regions_mm.c @@ -132,8 +132,8 @@ struct vmh_heap *vmh_init_heap(const struct vmh_heap_config *cfg, new_heap->physical_blocks_allocators[i] = new_allocator; /* Fill allocators data based on config and virtual region data */ - new_allocator->num_blocks = cfg->block_bundles_table[i].number_of_blocks; - new_allocator->blk_sz_shift = ilog2(cfg->block_bundles_table[i].block_size); + new_allocator->info.num_blocks = cfg->block_bundles_table[i].number_of_blocks; + new_allocator->info.blk_sz_shift = ilog2(cfg->block_bundles_table[i].block_size); new_allocator->buffer = (uint8_t *)new_heap->virtual_region->addr + offset; /* Create bit array that is a part of mem_block kept as a ptr */ @@ -270,11 +270,11 @@ void *vmh_alloc(struct vmh_heap *heap, uint32_t alloc_size) * mem_block. */ block_size = - 1 << heap->physical_blocks_allocators[mem_block_iterator]->blk_sz_shift; + 1 << heap->physical_blocks_allocators[mem_block_iterator]->info.blk_sz_shift; block_count = SOF_DIV_ROUND_UP((uint64_t)alloc_size, (uint64_t)block_size); if (block_count > - heap->physical_blocks_allocators[mem_block_iterator]->num_blocks) + heap->physical_blocks_allocators[mem_block_iterator]->info.num_blocks) continue; /* Try span alloc on first available mem_block for non span * check if block size is sufficient. @@ -455,7 +455,7 @@ int vmh_free_heap(struct vmh_heap *heap) if (!heap->physical_blocks_allocators[i]) continue; if (!sys_bitarray_is_region_cleared(heap->physical_blocks_allocators[i]->bitmap, - heap->physical_blocks_allocators[i]->num_blocks, 0)) + heap->physical_blocks_allocators[i]->info.num_blocks, 0)) return -ENOTEMPTY; } @@ -502,14 +502,14 @@ int vmh_free(struct vmh_heap *heap, void *ptr) for (mem_block_iterator = 0, ptr_range_found = false; mem_block_iterator < MAX_MEMORY_ALLOCATORS_COUNT; mem_block_iterator++) { - block_size = - 1 << heap->physical_blocks_allocators[mem_block_iterator]->blk_sz_shift; + block_size = 1 + << heap->physical_blocks_allocators[mem_block_iterator]->info.blk_sz_shift; if (vmh_is_ptr_in_memory_range((uintptr_t)ptr, (uintptr_t)heap->physical_blocks_allocators [mem_block_iterator]->buffer, heap->physical_blocks_allocators - [mem_block_iterator]->num_blocks * block_size)) { + [mem_block_iterator]->info.num_blocks * block_size)) { ptr_range_found = true; break; } @@ -556,7 +556,8 @@ int vmh_free(struct vmh_heap *heap, void *ptr) */ size_t bits_to_check = heap->physical_blocks_allocators - [mem_block_iterator]->num_blocks - ptr_bit_array_position; + [mem_block_iterator]->info.num_blocks + - ptr_bit_array_position; /* Neeeeeeeds optimization - thinking how to do it properly * each set bit in order after another means one allocated block.