From b07fe487bded260365dbfe34ae5da679b08c1218 Mon Sep 17 00:00:00 2001 From: Tomasz Leman Date: Mon, 2 Oct 2023 12:08:46 +0200 Subject: [PATCH] west.yml: update zephyr to 3.5.0-rc1 Zepych update: total of 74 commits. Changes include: i8c4eec7ac6 intel_adsp: boot_complete must be done PRE_KERNEL_1 1fc16e6565 release: Zephyr 3.5.0-rc1 c910dc81a6 sys_clock: header: minor cleanup and doxygenization b9f8b91692 kernel: sys_clock: remove stray z_enable_sys_clock prototype cc2a558707 kernel: move more internal smp calls into internal domain a1c7bfbc63 kernel: remove unused z_init_thread_base from kernel.h 209ff606be kernel: move internal smp calls to a internal header e19f21cb27 kernel: move z_is_thread_essential out of public kernel header f0c7fbf0f1 kernel: move sched_priq.h to internal/ folder e6f1090553 kernel: Integrate object core statistics 1d5d674e0d kernel: Add initial k_obj_core_stats infrastructure 6df8efe354 kernel: Integrate object cores into kernel 55db86e512 kernel: Add initial obj_core infrastructure eb1e5a161d kernel: FIFO and LIFO have their own sections 9bedfd82a2 kernel: Refactor CPU usage baea37aeb4 kernel: Re-factor sys_mem_blocks definition 2f003e59e4 kernel: Re-factor k_mem_slab definition Signed-off-by: Tomasz Leman --- west.yml | 2 +- zephyr/lib/regions_mm.c | 19 ++++++++++--------- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/west.yml b/west.yml index ec3d37c0fe42..5046cf4c4e51 100644 --- a/west.yml +++ b/west.yml @@ -45,7 +45,7 @@ manifest: - name: zephyr repo-path: zephyr - revision: 77e193eb03f4237e5c37d4905d703432e659085c + revision: 8c4eec7ac6e37be89af89e021c6f5c96e1ac1e0a remote: zephyrproject # Import some projects listed in zephyr/west.yml@revision diff --git a/zephyr/lib/regions_mm.c b/zephyr/lib/regions_mm.c index cbf94e7f8fc8..8fa3f7b0d7b4 100644 --- a/zephyr/lib/regions_mm.c +++ b/zephyr/lib/regions_mm.c @@ -132,8 +132,8 @@ struct vmh_heap *vmh_init_heap(const struct vmh_heap_config *cfg, new_heap->physical_blocks_allocators[i] = new_allocator; /* Fill allocators data based on config and virtual region data */ - new_allocator->num_blocks = cfg->block_bundles_table[i].number_of_blocks; - new_allocator->blk_sz_shift = ilog2(cfg->block_bundles_table[i].block_size); + new_allocator->info.num_blocks = cfg->block_bundles_table[i].number_of_blocks; + new_allocator->info.blk_sz_shift = ilog2(cfg->block_bundles_table[i].block_size); new_allocator->buffer = (uint8_t *)new_heap->virtual_region->addr + offset; /* Create bit array that is a part of mem_block kept as a ptr */ @@ -270,11 +270,11 @@ void *vmh_alloc(struct vmh_heap *heap, uint32_t alloc_size) * mem_block. */ block_size = - 1 << heap->physical_blocks_allocators[mem_block_iterator]->blk_sz_shift; + 1 << heap->physical_blocks_allocators[mem_block_iterator]->info.blk_sz_shift; block_count = SOF_DIV_ROUND_UP((uint64_t)alloc_size, (uint64_t)block_size); if (block_count > - heap->physical_blocks_allocators[mem_block_iterator]->num_blocks) + heap->physical_blocks_allocators[mem_block_iterator]->info.num_blocks) continue; /* Try span alloc on first available mem_block for non span * check if block size is sufficient. @@ -455,7 +455,7 @@ int vmh_free_heap(struct vmh_heap *heap) if (!heap->physical_blocks_allocators[i]) continue; if (!sys_bitarray_is_region_cleared(heap->physical_blocks_allocators[i]->bitmap, - heap->physical_blocks_allocators[i]->num_blocks, 0)) + heap->physical_blocks_allocators[i]->info.num_blocks, 0)) return -ENOTEMPTY; } @@ -502,14 +502,14 @@ int vmh_free(struct vmh_heap *heap, void *ptr) for (mem_block_iterator = 0, ptr_range_found = false; mem_block_iterator < MAX_MEMORY_ALLOCATORS_COUNT; mem_block_iterator++) { - block_size = - 1 << heap->physical_blocks_allocators[mem_block_iterator]->blk_sz_shift; + block_size = 1 + << heap->physical_blocks_allocators[mem_block_iterator]->info.blk_sz_shift; if (vmh_is_ptr_in_memory_range((uintptr_t)ptr, (uintptr_t)heap->physical_blocks_allocators [mem_block_iterator]->buffer, heap->physical_blocks_allocators - [mem_block_iterator]->num_blocks * block_size)) { + [mem_block_iterator]->info.num_blocks * block_size)) { ptr_range_found = true; break; } @@ -556,7 +556,8 @@ int vmh_free(struct vmh_heap *heap, void *ptr) */ size_t bits_to_check = heap->physical_blocks_allocators - [mem_block_iterator]->num_blocks - ptr_bit_array_position; + [mem_block_iterator]->info.num_blocks + - ptr_bit_array_position; /* Neeeeeeeds optimization - thinking how to do it properly * each set bit in order after another means one allocated block.