diff --git a/west.yml b/west.yml index 8543b993e2b1..5046cf4c4e51 100644 --- a/west.yml +++ b/west.yml @@ -45,7 +45,7 @@ manifest: - name: zephyr repo-path: zephyr - revision: 492517b918d267f553688cd6b9d59b92ffc10f91 + revision: 8c4eec7ac6e37be89af89e021c6f5c96e1ac1e0a remote: zephyrproject # Import some projects listed in zephyr/west.yml@revision diff --git a/zephyr/lib/regions_mm.c b/zephyr/lib/regions_mm.c index cbf94e7f8fc8..dbb1aa087b26 100644 --- a/zephyr/lib/regions_mm.c +++ b/zephyr/lib/regions_mm.c @@ -132,8 +132,8 @@ struct vmh_heap *vmh_init_heap(const struct vmh_heap_config *cfg, new_heap->physical_blocks_allocators[i] = new_allocator; /* Fill allocators data based on config and virtual region data */ - new_allocator->num_blocks = cfg->block_bundles_table[i].number_of_blocks; - new_allocator->blk_sz_shift = ilog2(cfg->block_bundles_table[i].block_size); + new_allocator->info.num_blocks = cfg->block_bundles_table[i].number_of_blocks; + new_allocator->info.blk_sz_shift = ilog2(cfg->block_bundles_table[i].block_size); new_allocator->buffer = (uint8_t *)new_heap->virtual_region->addr + offset; /* Create bit array that is a part of mem_block kept as a ptr */ @@ -270,11 +270,11 @@ void *vmh_alloc(struct vmh_heap *heap, uint32_t alloc_size) * mem_block. */ block_size = - 1 << heap->physical_blocks_allocators[mem_block_iterator]->blk_sz_shift; + 1 << heap->physical_blocks_allocators[mem_block_iterator]->info.blk_sz_shift; block_count = SOF_DIV_ROUND_UP((uint64_t)alloc_size, (uint64_t)block_size); if (block_count > - heap->physical_blocks_allocators[mem_block_iterator]->num_blocks) + heap->physical_blocks_allocators[mem_block_iterator]->info.num_blocks) continue; /* Try span alloc on first available mem_block for non span * check if block size is sufficient. @@ -455,7 +455,7 @@ int vmh_free_heap(struct vmh_heap *heap) if (!heap->physical_blocks_allocators[i]) continue; if (!sys_bitarray_is_region_cleared(heap->physical_blocks_allocators[i]->bitmap, - heap->physical_blocks_allocators[i]->num_blocks, 0)) + heap->physical_blocks_allocators[i]->info.num_blocks, 0)) return -ENOTEMPTY; } @@ -492,24 +492,24 @@ int vmh_free(struct vmh_heap *heap, void *ptr) if (heap->core_id != cpu_get_id()) return -EINVAL; - size_t mem_block_iterator, i, size_to_free, block_size, ptr_bit_array_offset, + size_t mem_block_iter, i, size_to_free, block_size, ptr_bit_array_offset, ptr_bit_array_position, physical_block_count, check_offset, check_position, check_size; uintptr_t phys_aligned_ptr, phys_aligned_alloc_end, phys_block_ptr; bool ptr_range_found; /* Get allocator from which ptr was allocated */ - for (mem_block_iterator = 0, ptr_range_found = false; - mem_block_iterator < MAX_MEMORY_ALLOCATORS_COUNT; - mem_block_iterator++) { + for (mem_block_iter = 0, ptr_range_found = false; + mem_block_iter < MAX_MEMORY_ALLOCATORS_COUNT; + mem_block_iter++) { block_size = - 1 << heap->physical_blocks_allocators[mem_block_iterator]->blk_sz_shift; + 1 << heap->physical_blocks_allocators[mem_block_iter]->info.blk_sz_shift; if (vmh_is_ptr_in_memory_range((uintptr_t)ptr, (uintptr_t)heap->physical_blocks_allocators - [mem_block_iterator]->buffer, + [mem_block_iter]->buffer, heap->physical_blocks_allocators - [mem_block_iterator]->num_blocks * block_size)) { + [mem_block_iter]->info.num_blocks * block_size)) { ptr_range_found = true; break; } @@ -528,19 +528,19 @@ int vmh_free(struct vmh_heap *heap, void *ptr) /* Not sure if that is fastest way to find the size comments welcome */ ptr_bit_array_offset = (uintptr_t)ptr - - (uintptr_t)heap->physical_blocks_allocators[mem_block_iterator]->buffer; + - (uintptr_t)heap->physical_blocks_allocators[mem_block_iter]->buffer; ptr_bit_array_position = ptr_bit_array_offset / block_size; /* Allocation bit array check */ int bit_value, prev_bit_value = 0; - sys_bitarray_test_bit(heap->allocation_sizes[mem_block_iterator], + sys_bitarray_test_bit(heap->allocation_sizes[mem_block_iter], ptr_bit_array_position, &bit_value); /* If checked bit is in position 0 we assume it is valid * and assigned 0 for further logic */ if (ptr_bit_array_position) - sys_bitarray_test_bit(heap->allocation_sizes[mem_block_iterator], + sys_bitarray_test_bit(heap->allocation_sizes[mem_block_iter], ptr_bit_array_position - 1, &prev_bit_value); /* If bit is 1 we know we could be at the start of the allocation, @@ -556,7 +556,7 @@ int vmh_free(struct vmh_heap *heap, void *ptr) */ size_t bits_to_check = heap->physical_blocks_allocators - [mem_block_iterator]->num_blocks - ptr_bit_array_position; + [mem_block_iter]->info.num_blocks - ptr_bit_array_position; /* Neeeeeeeds optimization - thinking how to do it properly * each set bit in order after another means one allocated block. @@ -567,7 +567,7 @@ int vmh_free(struct vmh_heap *heap, void *ptr) i < bits_to_check; i++) { - sys_bitarray_test_bit(heap->allocation_sizes[mem_block_iterator], i, + sys_bitarray_test_bit(heap->allocation_sizes[mem_block_iter], i, &bit_value); if (bit_value) size_to_free += block_size; @@ -582,10 +582,10 @@ int vmh_free(struct vmh_heap *heap, void *ptr) } retval = sys_mem_blocks_free_contiguous( - heap->physical_blocks_allocators[mem_block_iterator], ptr, + heap->physical_blocks_allocators[mem_block_iter], ptr, size_to_free / block_size); } else { - retval = sys_mem_blocks_free(heap->physical_blocks_allocators[mem_block_iterator], + retval = sys_mem_blocks_free(heap->physical_blocks_allocators[mem_block_iter], 1, &ptr); } @@ -610,13 +610,13 @@ int vmh_free(struct vmh_heap *heap, void *ptr) phys_block_ptr = phys_aligned_ptr + i * CONFIG_MM_DRV_PAGE_SIZE; check_offset = phys_block_ptr - - (uintptr_t)heap->physical_blocks_allocators[mem_block_iterator]->buffer; + - (uintptr_t)heap->physical_blocks_allocators[mem_block_iter]->buffer; check_position = check_offset / block_size; check_size = CONFIG_MM_DRV_PAGE_SIZE / block_size; if (sys_bitarray_is_region_cleared( - heap->physical_blocks_allocators[mem_block_iterator]->bitmap, + heap->physical_blocks_allocators[mem_block_iter]->bitmap, check_size, check_offset)) sys_mm_drv_unmap_region((void *)phys_block_ptr, CONFIG_MM_DRV_PAGE_SIZE);