Skip to content

Commit

Permalink
drivers: mm/intel_adsp: Add MMU support to MM driver
Browse files Browse the repository at this point in the history
In the Intel platforms most of HPSRAM is used by modules when
are required. By default all these banks are powered off and are
enabled by L2 MM driver only when needed.

At the driver initialisation each unused page is unmaped and
when whole HPSRAM bank is unused the power is switched off.

Since mapping memory in MMU TLB and flushing it at system start is
not needed and time consuming, this patch modifies the init
sequence to avoid it.

Additionally it adds MMU maping and unmaping for pages allocated
from unused pool.

Signed-off-by: Flavio Ceolin <[email protected]>
Signed-off-by: Jaroslaw Stelter <[email protected]>
Signed-off-by: Anas Nashif <[email protected]>
  • Loading branch information
jxstelter authored and Flavio Ceolin committed May 30, 2024
1 parent 34a0651 commit 13550b6
Show file tree
Hide file tree
Showing 2 changed files with 57 additions and 8 deletions.
5 changes: 1 addition & 4 deletions drivers/mm/mm_drv_intel_adsp.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,10 +30,7 @@

#include "mm_drv_common.h"

DEVICE_MMIO_TOPLEVEL_STATIC(tlb_regs, DT_DRV_INST(0));

#define TLB_BASE \
((mm_reg_t)DEVICE_MMIO_TOPLEVEL_GET(tlb_regs))
#define TLB_BASE (mm_reg_t)DT_REG_ADDR(DT_NODELABEL(tlb))

/*
* Number of significant bits in the page index (defines the size of
Expand Down
60 changes: 56 additions & 4 deletions drivers/mm/mm_drv_intel_adsp_mtl_tlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
#include <zephyr/drivers/mm/mm_drv_bank.h>
#include <zephyr/debug/sparse.h>
#include <zephyr/cache.h>
#include <kernel_arch_interface.h>

#define SRAM_BANK_PAGE_NUM (SRAM_BANK_SIZE / CONFIG_MM_DRV_PAGE_SIZE)

Expand Down Expand Up @@ -268,6 +269,9 @@ int sys_mm_drv_map_page(void *virt, uintptr_t phys, uint32_t flags)

tlb_entries[entry_idx] = entry;

#ifdef CONFIG_MMU
arch_mem_map(virt, va, CONFIG_MM_DRV_PAGE_SIZE, flags);
#endif
/*
* Invalid the cache of the newly mapped virtual page to
* avoid stale data.
Expand Down Expand Up @@ -329,7 +333,7 @@ int sys_mm_drv_map_array(void *virt, uintptr_t *phys,
return sys_mm_drv_simple_map_array(va, phys, cnt, flags);
}

int sys_mm_drv_unmap_page(void *virt)
static int sys_mm_drv_unmap_page_wflush(void *virt, bool flush_data)
{
k_spinlock_key_t key;
uint32_t entry_idx, bank_idx;
Expand Down Expand Up @@ -358,8 +362,14 @@ int sys_mm_drv_unmap_page(void *virt)
/*
* Flush the cache to make sure the backing physical page
* has the latest data.
* No flush when called from sys_mm_drv_mm_init().
*/
sys_cache_data_flush_range(virt, CONFIG_MM_DRV_PAGE_SIZE);
if (flush_data) {
sys_cache_data_flush_range(virt, CONFIG_MM_DRV_PAGE_SIZE);
#ifdef CONFIG_MMU
arch_mem_unmap(virt, CONFIG_MM_DRV_PAGE_SIZE);
#endif
}

entry_idx = get_tlb_entry_idx(va);
pa = tlb_entry_to_pa(tlb_entries[entry_idx]);
Expand Down Expand Up @@ -391,6 +401,11 @@ int sys_mm_drv_unmap_page(void *virt)
return ret;
}

int sys_mm_drv_unmap_page(void *virt)
{
return sys_mm_drv_unmap_page_wflush(virt, true);
}

int sys_mm_drv_unmap_region(void *virt, size_t size)
{
void *va = (__sparse_force void *)sys_cache_cached_ptr_get(virt);
Expand Down Expand Up @@ -441,6 +456,42 @@ int sys_mm_drv_update_page_flags(void *virt, uint32_t flags)
return ret;
}

#ifdef CONFIG_MM_DRV_INTEL_ADSP_TLB_REMAP_UNUSED_RAM
static int sys_mm_drv_unmap_region_initial(void *virt_in, size_t size)
{
void *virt = (__sparse_force void *)sys_cache_cached_ptr_get(virt_in);

k_spinlock_key_t key;
int ret = 0;
size_t offset;

CHECKIF(!sys_mm_drv_is_virt_addr_aligned(virt) ||
!sys_mm_drv_is_size_aligned(size)) {
ret = -EINVAL;
goto out;
}

key = k_spin_lock(&sys_mm_drv_common_lock);

for (offset = 0; offset < size; offset += CONFIG_MM_DRV_PAGE_SIZE) {
uint8_t *va = (uint8_t *)virt + offset;

int ret2 = sys_mm_drv_unmap_page_wflush(va, false);

if (ret2 != 0) {
__ASSERT(false, "cannot unmap %p\n", va);

ret = ret2;
}
}

k_spin_unlock(&sys_mm_drv_common_lock, key);

out:
return ret;
}
#endif

int sys_mm_drv_page_phys_get(void *virt, uintptr_t *phys)
{
uint16_t *tlb_entries = UINT_TO_POINTER(TLB_BASE);
Expand Down Expand Up @@ -721,8 +772,9 @@ static int sys_mm_drv_mm_init(const struct device *dev)
size_t unused_size = CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_SIZE -
UNUSED_L2_START_ALIGNED;

ret = sys_mm_drv_unmap_region(UINT_TO_POINTER(UNUSED_L2_START_ALIGNED),
unused_size);
ret = sys_mm_drv_unmap_region_initial(UINT_TO_POINTER(UNUSED_L2_START_ALIGNED),
unused_size);


/* Need to reset max pages statistics after unmap */
for (int i = 0; i < L2_SRAM_BANK_NUM; i++) {
Expand Down

0 comments on commit 13550b6

Please sign in to comment.