diff --git a/.gitignore b/.gitignore index 62a952f6e..197464e51 100644 --- a/.gitignore +++ b/.gitignore @@ -23,3 +23,4 @@ backup/ -smp host.txt Image +opensbi-0.9/build-oe/ diff --git a/opensbi-1.2/include/sbi/riscv_locks.h b/opensbi-1.2/include/sbi/riscv_locks.h index 38d9cbeb7..f8629a5ed 100644 --- a/opensbi-1.2/include/sbi/riscv_locks.h +++ b/opensbi-1.2/include/sbi/riscv_locks.h @@ -10,6 +10,7 @@ #include +#define LOCK_DEBUG 0 #define TICKET_SHIFT 16 typedef struct { diff --git a/opensbi-1.2/include/sbi/sbi_console.h b/opensbi-1.2/include/sbi/sbi_console.h index e15b55dcb..91d94ccde 100644 --- a/opensbi-1.2/include/sbi/sbi_console.h +++ b/opensbi-1.2/include/sbi/sbi_console.h @@ -41,6 +41,8 @@ int __printf(3, 4) sbi_snprintf(char *out, u32 out_sz, const char *format, ...); int __printf(1, 2) sbi_printf(const char *format, ...); +int __printf(1, 2) sbi_printf_nolock(const char *format, ...); + int __printf(1, 2) sbi_dprintf(const char *format, ...); void __printf(1, 2) __attribute__((noreturn)) sbi_panic(const char *format, ...); diff --git a/opensbi-1.2/include/sbi/sbi_hart.h b/opensbi-1.2/include/sbi/sbi_hart.h index 95b40e759..b251a2f01 100644 --- a/opensbi-1.2/include/sbi/sbi_hart.h +++ b/opensbi-1.2/include/sbi/sbi_hart.h @@ -12,6 +12,8 @@ #include +#define MAX_HARTS 8 + /** Possible privileged specification versions of a hart */ enum sbi_hart_priv_versions { /** Unknown privileged specification */ diff --git a/opensbi-1.2/include/sbi/sbi_ipi.h b/opensbi-1.2/include/sbi/sbi_ipi.h index f6ac8072f..fae73c299 100644 --- a/opensbi-1.2/include/sbi/sbi_ipi.h +++ b/opensbi-1.2/include/sbi/sbi_ipi.h @@ -17,6 +17,11 @@ #define SBI_IPI_EVENT_MAX __riscv_xlen /* clang-format on */ +#define SYNC_DEBUG 0 +#define MAX_HARTS 8 +#define IPI_NONE 0 +#define IPI_TLB 1 +#define IPI_PMP 2 /** IPI hardware device */ struct sbi_ipi_device { diff --git a/opensbi-1.2/include/sm/enclave.h b/opensbi-1.2/include/sm/enclave.h index 090301370..0d0969ae5 100644 --- a/opensbi-1.2/include/sm/enclave.h +++ b/opensbi-1.2/include/sm/enclave.h @@ -65,6 +65,7 @@ struct enclave_t //shared mem with kernel unsigned long kbuffer; + unsigned long kbuffer_paddr; unsigned long kbuffer_size; unsigned long* ocall_func_id; @@ -74,6 +75,7 @@ struct enclave_t //shared memory with host unsigned long untrusted_ptr; + unsigned long untrusted_ptr_paddr; unsigned long untrusted_size; // enclave measurement unsigned char hash[HASH_SIZE]; @@ -91,7 +93,7 @@ struct cpu_state_t int eid; }; -uintptr_t create_enclave(struct enclave_sbi_param_t create_args); +uintptr_t create_enclave(struct enclave_sbi_param_t create_args, int retry); uintptr_t run_enclave(uintptr_t* regs, unsigned int eid); uintptr_t stop_enclave(uintptr_t* regs, unsigned int eid); uintptr_t destroy_enclave(uintptr_t* regs, unsigned int eid); @@ -100,6 +102,7 @@ uintptr_t resume_from_stop(uintptr_t* regs, unsigned int eid); uintptr_t attest_enclave(uintptr_t eid, uintptr_t report_ptr, uintptr_t nonce); uintptr_t exit_enclave(uintptr_t* regs, unsigned long retval); uintptr_t do_timer_irq(uintptr_t* regs, uintptr_t mcause, uintptr_t mepc); +uintptr_t free_enclave_metadata(); uintptr_t resume_from_ocall(uintptr_t* regs, unsigned int eid); uintptr_t enclave_sys_write(uintptr_t *regs); diff --git a/opensbi-1.2/include/sm/enclave_args.h b/opensbi-1.2/include/sm/enclave_args.h index f5309a0b4..7f53870aa 100644 --- a/opensbi-1.2/include/sm/enclave_args.h +++ b/opensbi-1.2/include/sm/enclave_args.h @@ -21,6 +21,13 @@ struct mm_alloc_arg_t unsigned long resp_size; }; +struct mm_reclaim_arg_t +{ + unsigned long req_size; + uintptr_t req_addr; + unsigned long resp_size; +}; + // Attestation-related report struct sm_report_t { @@ -66,15 +73,17 @@ struct signature_t */ struct enclave_sbi_param_t { - unsigned int *eid_ptr; + unsigned int * eid_ptr; unsigned long paddr; unsigned long size; unsigned long entry_point; unsigned long untrusted_ptr; + unsigned long untrusted_paddr; unsigned long untrusted_size; unsigned long free_mem; //enclave shared mem with kernel unsigned long kbuffer; + unsigned long kbuffer_paddr; unsigned long kbuffer_size; unsigned long *ecall_arg0; unsigned long *ecall_arg1; diff --git a/opensbi-1.2/include/sm/platform/pmp/enclave_mm.h b/opensbi-1.2/include/sm/platform/pmp/enclave_mm.h index 9a6d75d8b..cfcdb1ff4 100644 --- a/opensbi-1.2/include/sm/platform/pmp/enclave_mm.h +++ b/opensbi-1.2/include/sm/platform/pmp/enclave_mm.h @@ -75,6 +75,8 @@ void* mm_alloc(unsigned long req_size, unsigned long* resp_size); int mm_free(void* paddr, unsigned long size); +int memory_reclaim(unsigned long* resp_size); + int mm_free_clear(void* paddr, unsigned long size); void print_buddy_system(); diff --git a/opensbi-1.2/include/sm/sm.h b/opensbi-1.2/include/sm/sm.h index 2753bd59e..d4f1d8d5b 100644 --- a/opensbi-1.2/include/sm/sm.h +++ b/opensbi-1.2/include/sm/sm.h @@ -38,6 +38,7 @@ extern uintptr_t _fw_start[], _fw_end[]; #define SBI_GET_KEY 88 //Error code of SBI_ALLOC_ENCLAVE_MEM +#define RETRY_SPIN_LOCK -3 #define ENCLAVE_NO_MEMORY -2 #define ENCLAVE_ERROR -1 #define ENCLAVE_SUCCESS 0 @@ -58,6 +59,8 @@ extern uintptr_t _fw_start[], _fw_end[]; #define FREE_MAX_MEMORY 2 #define FREE_SPEC_MEMORY 3 +#define RETRY_TIMES 5 + void sm_init(); uintptr_t sm_mm_init(uintptr_t paddr, unsigned long size); @@ -68,7 +71,9 @@ uintptr_t sm_alloc_enclave_mem(uintptr_t mm_alloc_arg); uintptr_t sm_free_enclave_mem(uintptr_t size_ptr,unsigned long flag); -uintptr_t sm_create_enclave(uintptr_t enclave_create_args); +uintptr_t sm_memory_reclaim(uintptr_t enclave_id, unsigned long eid); + +uintptr_t sm_create_enclave(uintptr_t enclave_create_args, bool retry); uintptr_t sm_attest_enclave(uintptr_t enclave_id, uintptr_t report, uintptr_t nonce); diff --git a/opensbi-1.2/lib/sbi/riscv_locks.c b/opensbi-1.2/lib/sbi/riscv_locks.c index acab77692..1721f0721 100644 --- a/opensbi-1.2/lib/sbi/riscv_locks.c +++ b/opensbi-1.2/lib/sbi/riscv_locks.c @@ -7,6 +7,10 @@ #include #include +#include +#include +#define MAX_HARTS 8 +volatile long waiting_for_spinlock[MAX_HARTS] = { 0 }; static inline bool spin_lock_unlocked(spinlock_t lock) { @@ -47,10 +51,14 @@ bool spin_trylock(spinlock_t *lock) void spin_lock(spinlock_t *lock) { - unsigned long inc = 1u << TICKET_SHIFT; + //for lock debug + unsigned long inc = 1u << TICKET_SHIFT; unsigned long mask = 0xffffu; u32 l0, tmp1, tmp2; + ulong hartid = csr_read(CSR_MHARTID); + // 正在尝试获取锁之前标记 + waiting_for_spinlock[hartid] = 1; __asm__ __volatile__( /* Atomically increment the next ticket. */ " amoadd.w.aqrl %0, %4, %3\n" @@ -69,9 +77,13 @@ void spin_lock(spinlock_t *lock) : "=&r"(l0), "=&r"(tmp1), "=&r"(tmp2), "+A"(*lock) : "r"(inc), "r"(mask), "I"(TICKET_SHIFT) : "memory"); + + waiting_for_spinlock[hartid] = 0; } void spin_unlock(spinlock_t *lock) { + ulong hartid = csr_read(CSR_MHARTID); + waiting_for_spinlock[hartid] = 0; __smp_store_release(&lock->owner, lock->owner + 1); } diff --git a/opensbi-1.2/lib/sbi/sbi_ecall_penglai.c b/opensbi-1.2/lib/sbi/sbi_ecall_penglai.c index 3539c90e5..2834e36f0 100644 --- a/opensbi-1.2/lib/sbi/sbi_ecall_penglai.c +++ b/opensbi-1.2/lib/sbi/sbi_ecall_penglai.c @@ -12,16 +12,20 @@ #include #include #include +#include +// static spinlock_t sm_big_lock = SPIN_LOCK_INITIALIZER; static int sbi_ecall_penglai_host_handler(unsigned long extid, unsigned long funcid, const struct sbi_trap_regs *regs, unsigned long *out_val, struct sbi_trap_info *out_trap) -{ +{ uintptr_t ret = 0; - printm("[Penglai KModule] %s invoked,funcid=%ld\r\n",__func__,funcid); + printm("[Penglai KModule@%u] %s invoked,funcid=%ld\r\n", + current_hartid(), __func__, funcid); //csr_write(CSR_MEPC, regs->mepc + 4); ((struct sbi_trap_regs *)regs)->mepc += 4; + // spin_lock(&sm_big_lock); switch (funcid) { // The following is the Penglai's Handler case SBI_MM_INIT: @@ -34,7 +38,7 @@ static int sbi_ecall_penglai_host_handler(unsigned long extid, unsigned long fun ret = sm_alloc_enclave_mem(regs->a0); break; case SBI_CREATE_ENCLAVE: - ret = sm_create_enclave(regs->a0); + ret = sm_create_enclave(regs->a0, regs->a1); break; case SBI_RUN_ENCLAVE: ret = sm_run_enclave((uintptr_t *)regs, regs->a0); @@ -51,6 +55,9 @@ static int sbi_ecall_penglai_host_handler(unsigned long extid, unsigned long fun case SBI_DESTROY_ENCLAVE: ret = sm_destroy_enclave((uintptr_t *)regs, regs->a0); break; + case SBI_MEMORY_RECLAIM: + ret=sm_memory_reclaim(regs->a0, regs->a1); + break; case SBI_FREE_ENCLAVE_MEM: ret= sm_free_enclave_mem(regs->a0, regs->a1); break; @@ -61,6 +68,9 @@ static int sbi_ecall_penglai_host_handler(unsigned long extid, unsigned long fun //((struct sbi_trap_regs *)regs)->mepc = csr_read(CSR_MEPC); //((struct sbi_trap_regs *)regs)->mstatus = csr_read(CSR_MSTATUS); *out_val = ret; + // spin_unlock(&sm_big_lock); + printm("[Penglai KModule@%u] %s return %ld, funcid=%ld\r\n", + current_hartid(), __func__, ret, funcid); return ret; } @@ -75,25 +85,28 @@ static int sbi_ecall_penglai_enclave_handler(unsigned long extid, unsigned long struct sbi_trap_info *out_trap) { uintptr_t ret = 0; - + // spin_lock(&sm_big_lock); //csr_write(CSR_MEPC, regs->mepc + 4); ((struct sbi_trap_regs *)regs)->mepc += 4; - + printm("[Penglai KModule@%u] %s invoked,funcid=%ld\r\n", + current_hartid(), __func__, funcid); switch (funcid) { // The following is the Penglai's Handler - case SBI_EXIT_ENCLAVE: + case SBI_EXIT_ENCLAVE://99 ret = sm_exit_enclave((uintptr_t *)regs, regs->a0); break; - case SBI_ENCLAVE_OCALL: + case SBI_ENCLAVE_OCALL://98 ret = sm_enclave_ocall((uintptr_t *)regs, regs->a0, regs->a1, regs->a2); break; - case SBI_GET_KEY: + case SBI_GET_KEY://88 ret = sm_enclave_get_key((uintptr_t *)regs, regs->a0, regs->a1, regs->a2, regs->a3); break; default: sbi_printf("[Penglai@Monitor] enclave interface(funcid:%ld) not supported yet\n", funcid); ret = SBI_ENOTSUPP; } + printm("[Penglai KModule@%u] %s return %ld,funcid=%ld\r\n", current_hartid(), __func__,ret , funcid); + // spin_unlock(&sm_big_lock); *out_val = ret; return ret; } diff --git a/opensbi-1.2/lib/sbi/sbi_ipi.c b/opensbi-1.2/lib/sbi/sbi_ipi.c index 7aafbbde5..5b40f53a0 100644 --- a/opensbi-1.2/lib/sbi/sbi_ipi.c +++ b/opensbi-1.2/lib/sbi/sbi_ipi.c @@ -23,6 +23,9 @@ #include #include +volatile unsigned long wait_for_sync[MAX_HARTS] = { IPI_NONE }; +volatile unsigned long skip_for_wait[MAX_HARTS][MAX_HARTS] = {{0}}; + struct sbi_ipi_data { unsigned long ipi_type; }; @@ -64,9 +67,10 @@ static int sbi_ipi_send(struct sbi_scratch *scratch, u32 remote_hartid, atomic_raw_set_bit(event, &ipi_data->ipi_type); smp_wmb(); - if (ipi_dev && ipi_dev->ipi_send) - ipi_dev->ipi_send(remote_hartid); + if (ipi_dev && ipi_dev->ipi_send) { + ipi_dev->ipi_send(remote_hartid); + } sbi_pmu_ctr_incr_fw(SBI_PMU_FW_IPI_SENT); if (ipi_ops->sync) diff --git a/opensbi-1.2/lib/sbi/sbi_pmp.c b/opensbi-1.2/lib/sbi/sbi_pmp.c index 4c670d6e6..a1b4be64b 100644 --- a/opensbi-1.2/lib/sbi/sbi_pmp.c +++ b/opensbi-1.2/lib/sbi/sbi_pmp.c @@ -14,8 +14,15 @@ #include #include +#define MAGIC_NUM 30; +extern volatile unsigned long waiting_for_spinlock[MAX_HARTS]; +extern volatile unsigned long wait_for_sync[MAX_HARTS]; +extern volatile int skip_for_wait[MAX_HARTS][MAX_HARTS]; //slot: mark which rhart no reply +extern volatile int print_m_mode; static unsigned long pmp_data_offset; static unsigned long pmp_sync_offset; +static volatile u32 curr_skip_hartid =-1; //0:cur_remotehartid, 1:skip_hartid + static void sbi_process_pmp(struct sbi_scratch *scratch) { @@ -27,12 +34,24 @@ static void sbi_process_pmp(struct sbi_scratch *scratch) int pmp_idx = data->pmp_idx_arg; set_pmp(pmp_idx, pmp_config); + ulong hartid = csr_read(CSR_MHARTID); //sync sbi_hartmask_for_each_hart(rhartid, &data->smask) { rscratch = sbi_hartid_to_scratch(rhartid); if (!rscratch) continue; + if(print_m_mode && SYNC_DEBUG) sbi_printf("hart %ld process sync pmp\n", hartid); pmp_sync = sbi_scratch_offset_ptr(rscratch, pmp_sync_offset); + if (skip_for_wait[rhartid][hartid] == 1) + { + *pmp_sync = 0; + if (SYNC_DEBUG) + sbi_printf("hart %ld no reply sync_pmp to %d\n", + hartid, rhartid); + skip_for_wait[rhartid][hartid] = 0; + continue; + } + while (atomic_raw_xchg_ulong(pmp_sync, 1)); } } @@ -53,6 +72,8 @@ static int sbi_update_pmp(struct sbi_scratch *scratch, return -1; } + wait_for_sync[curr_hartid] = IPI_PMP; + curr_skip_hartid = remote_hartid; pmp_data = sbi_scratch_offset_ptr(remote_scratch, pmp_data_offset); //update the remote hart pmp data sbi_memcpy(pmp_data, data, sizeof(struct pmp_data_t)); @@ -63,9 +84,54 @@ static int sbi_update_pmp(struct sbi_scratch *scratch, static void sbi_pmp_sync(struct sbi_scratch *scratch) { unsigned long *pmp_sync = - sbi_scratch_offset_ptr(scratch, pmp_sync_offset); - //wait the remote hart process the pmp signal - while (!atomic_raw_xchg_ulong(pmp_sync, 0)); + sbi_scratch_offset_ptr(scratch, pmp_sync_offset); + ulong hartid = csr_read(CSR_MHARTID); + wait_for_sync[hartid] = IPI_PMP; + + u32 remote_hartid = curr_skip_hartid; + + if (remote_hartid != -1 && (wait_for_sync[remote_hartid] == IPI_TLB || waiting_for_spinlock[remote_hartid] == 1)){ + if (SYNC_DEBUG) + sbi_printf("hart %ld skip wait %u sync pmp\n", hartid, + remote_hartid); + curr_skip_hartid = -1; + atomic_raw_xchg_ulong(pmp_sync, 0); + skip_for_wait[hartid][remote_hartid] = 1; + } else { + if (SYNC_DEBUG) + sbi_printf("hart %ld wait %d sync pmp\n", hartid, + curr_skip_hartid); + //wait the remote hart process the pmp signal + int retry = MAGIC_NUM; + while (!atomic_raw_xchg_ulong(pmp_sync, 0)) { + /** + * This is used to handle the situation + * where the remote enters m mode before + * sending pmp sync, and the remote is in spin + * lock after sending ipi. + */ + retry--; + if (retry == 0) { + retry = MAGIC_NUM; + if (remote_hartid != -1 && + (wait_for_sync[remote_hartid] == IPI_TLB || + waiting_for_spinlock[remote_hartid] == 1)) { + if (SYNC_DEBUG) + sbi_printf( + "hart %ld skip wait %u sync pmp\n", + hartid, remote_hartid); + curr_skip_hartid = -1; + atomic_raw_xchg_ulong(pmp_sync, 0); + skip_for_wait[hartid][remote_hartid] = 1; + break; + } + } + if (SYNC_DEBUG) + sbi_printf("hart %ld wait %u sync pmp\n", + hartid, remote_hartid); + }; + } + wait_for_sync[hartid] = IPI_NONE; return; } @@ -79,7 +145,11 @@ static struct sbi_ipi_event_ops pmp_ops = { static u32 pmp_event = SBI_IPI_EVENT_MAX; int sbi_send_pmp(ulong hmask, ulong hbase, struct pmp_data_t* pmp_data) -{ +{ + ulong hartid = csr_read(CSR_MHARTID); + wait_for_sync[hartid] = IPI_PMP; + if (SYNC_DEBUG) + sbi_printf("hart %ld begin sync pmp\n", hartid); return sbi_ipi_send_many(hmask, hbase, pmp_event, pmp_data); } diff --git a/opensbi-1.2/lib/sbi/sbi_tlb.c b/opensbi-1.2/lib/sbi/sbi_tlb.c index 4c142ea3e..96dce3f67 100644 --- a/opensbi-1.2/lib/sbi/sbi_tlb.c +++ b/opensbi-1.2/lib/sbi/sbi_tlb.c @@ -23,6 +23,9 @@ #include #include +extern volatile unsigned long wait_for_sync[MAX_HARTS]; +extern volatile int print_m_mode; + static unsigned long tlb_sync_off; static unsigned long tlb_fifo_off; static unsigned long tlb_fifo_mem_off; @@ -260,6 +263,10 @@ static void tlb_sync(struct sbi_scratch *scratch) unsigned long *tlb_sync = sbi_scratch_offset_ptr(scratch, tlb_sync_off); + ulong hartid = csr_read(CSR_MHARTID); + wait_for_sync[hartid] = IPI_TLB; + + while (!atomic_raw_xchg_ulong(tlb_sync, 0)) { /* * While we are waiting for remote hart to set the sync, @@ -268,6 +275,9 @@ static void tlb_sync(struct sbi_scratch *scratch) tlb_process_count(scratch, 1); } +// no_wait: + wait_for_sync[hartid] = IPI_NONE; + if(print_m_mode && SYNC_DEBUG) sbi_printf("hart %ld wait sync_tlb success!\n", hartid); return; } @@ -365,6 +375,7 @@ static int tlb_update(struct sbi_scratch *scratch, tinfo->local_fn(tinfo); return -1; } + wait_for_sync[curr_hartid] = IPI_TLB; tlb_fifo_r = sbi_scratch_offset_ptr(remote_scratch, tlb_fifo_off); @@ -372,7 +383,8 @@ static int tlb_update(struct sbi_scratch *scratch, if (ret != SBI_FIFO_UNCHANGED) { return 1; } - + ulong hartid = csr_read(CSR_MHARTID); + if(SYNC_DEBUG) sbi_printf("hart %ld begin wait %d sync_tlb\n", hartid, remote_hartid); while (sbi_fifo_enqueue(tlb_fifo_r, data) < 0) { /** * For now, Busy loop until there is space in the fifo. @@ -405,7 +417,9 @@ int sbi_tlb_request(ulong hmask, ulong hbase, struct sbi_tlb_info *tinfo) return SBI_EINVAL; tlb_pmu_incr_fw_ctr(tinfo); - + ulong hartid = csr_read(CSR_MHARTID); + if(print_m_mode && SYNC_DEBUG) sbi_printf("hart %ld begin sync tlb\n", hartid); + wait_for_sync[hartid] = IPI_TLB; return sbi_ipi_send_many(hmask, hbase, tlb_event, tinfo); } diff --git a/opensbi-1.2/lib/sbi/sbi_trap.c b/opensbi-1.2/lib/sbi/sbi_trap.c index f6ebcaf60..4a7ff93af 100644 --- a/opensbi-1.2/lib/sbi/sbi_trap.c +++ b/opensbi-1.2/lib/sbi/sbi_trap.c @@ -25,6 +25,9 @@ #include +int m_mode_status[MAX_HARTS]; +volatile int print_m_mode = 0; + static void __noreturn sbi_trap_error(const char *msg, int rc, ulong mcause, ulong mtval, ulong mtval2, ulong mtinst, struct sbi_trap_regs *regs) @@ -274,6 +277,8 @@ struct sbi_trap_regs *sbi_trap_handler(struct sbi_trap_regs *regs) mtval2 = csr_read(CSR_MTVAL2); mtinst = csr_read(CSR_MTINST); } + int hartid = csr_read(CSR_MHARTID); + m_mode_status[hartid] = 1; if (mcause & (1UL << (__riscv_xlen - 1))) { mcause &= ~(1UL << (__riscv_xlen - 1)); @@ -296,7 +301,9 @@ struct sbi_trap_regs *sbi_trap_handler(struct sbi_trap_regs *regs) msg = "unhandled external interrupt"; goto trap_error; }; - return regs; + hartid = csr_read(CSR_MHARTID); + m_mode_status[hartid] = 0; + return regs; } switch (mcause) { @@ -323,6 +330,7 @@ struct sbi_trap_regs *sbi_trap_handler(struct sbi_trap_regs *regs) } case CAUSE_SUPERVISOR_ECALL: case CAUSE_MACHINE_ECALL: + rc = sbi_ecall_handler(regs); msg = "ecall handler failed"; break; @@ -347,6 +355,8 @@ struct sbi_trap_regs *sbi_trap_handler(struct sbi_trap_regs *regs) trap_error: if (rc) sbi_trap_error(msg, rc, mcause, mtval, mtval2, mtinst, regs); + hartid = csr_read(CSR_MHARTID); + m_mode_status[hartid] = 0; return regs; } diff --git a/opensbi-1.2/lib/sbi/sm/enclave.c b/opensbi-1.2/lib/sbi/sm/enclave.c index 1eb31e35f..b7303151a 100644 --- a/opensbi-1.2/lib/sbi/sm/enclave.c +++ b/opensbi-1.2/lib/sbi/sm/enclave.c @@ -12,7 +12,7 @@ #include static struct cpu_state_t cpus[MAX_HARTS] = {{0,}, }; - +static volatile int eids_num = 0; //spinlock static spinlock_t enclave_metadata_lock = SPIN_LOCK_INITIALIZER; @@ -20,6 +20,31 @@ static spinlock_t enclave_metadata_lock = SPIN_LOCK_INITIALIZER; struct link_mem_t* enclave_metadata_head = NULL; struct link_mem_t* enclave_metadata_tail = NULL; +void acquire_big_metadata_lock(const char * str) +{ + if (LOCK_DEBUG) + printm("[PENGLAI SM@%s_%d] %s try lock\n", __func__, current_hartid(), str); + spin_lock(&enclave_metadata_lock); + if (LOCK_DEBUG) + printm("[PENGLAI SM@%s_%d] %s get lock\n", __func__, current_hartid(), str); +} +bool try_big_metadata_lock(const char * str) +{ + //spin_lock(&enclave_metadata_lock); + bool res; + res=spin_trylock(&enclave_metadata_lock); + if (LOCK_DEBUG) + printm("[PENGLAI SM@%s_%d] %s try get lock,res=%d\n", __func__, current_hartid(), str, res); + return res; +} + +void release_big_metadata_lock(const char *str) +{ + spin_unlock(&enclave_metadata_lock); + if (LOCK_DEBUG) + printm("[PENGLAI SM@%s %d] %s release lock\n", __func__, current_hartid(), str); +} + static void enter_enclave_world(int eid) { cpus[csr_read(CSR_MHARTID)].in_enclave = ENCLAVE_MODE; @@ -113,7 +138,7 @@ struct link_mem_t* add_link_mem(struct link_mem_t** tail) return new_link_mem; } -int remove_link_mem(struct link_mem_t** head, struct link_mem_t* ptr,bool clear) +int remove_link_mem(struct link_mem_t** head, struct link_mem_t* ptr, bool clear) { struct link_mem_t *cur_link_mem, *tmp_link_mem; int retval =0; @@ -161,7 +186,7 @@ static struct enclave_t* alloc_enclave() struct enclave_t* enclave = NULL; int i, found, eid; - spin_lock(&enclave_metadata_lock); + acquire_big_metadata_lock(__func__); //enclave metadata list hasn't be initialized yet if(enclave_metadata_head == NULL) @@ -211,19 +236,75 @@ static struct enclave_t* alloc_enclave() enclave->state = FRESH; enclave->eid = eid; } + //add to eids_num for recycle + eids_num += 1; alloc_eid_out: - spin_unlock(&enclave_metadata_lock); + release_big_metadata_lock(__func__); return enclave; } -static int free_enclave(int eid, bool clear) +uintptr_t free_enclave_metadata() +{ + int ret_val; + int eids_tmp; + struct link_mem_t *cur; + // struct enclave_t *enclave = NULL; + printm("[Penglai Monitor@%s] invoked\n", __func__); + ret_val = 0; + eids_tmp = eids_num; + for (cur = enclave_metadata_head; cur != NULL; + cur = cur->next_link_mem) { + struct enclave_t *check_enclave = NULL; + for (size_t i = 0; i < cur->slab_num; i++) { + check_enclave = (struct enclave_t *)(cur->addr) + i; + if (!check_enclave) { + printm("[Penglai Monitor@%s] don't have enclave_metadata in cur\r\n", + __func__); + ret_val = -1; + return ret_val; + }; + if (check_enclave->state == FRESH) { + break; + } + + if (check_enclave->state == INVALID) { + if (check_enclave->eid == (unsigned int)(-1)) { + eids_tmp--; + } + //all eids or eids in cur are invalid, free cur + if ((i == (cur->slab_num - 1)) || + eids_tmp == 0) { + struct link_mem_t *tmp = cur; + cur = cur->next_link_mem; + remove_link_mem(&enclave_metadata_head, + tmp, 1); + eids_num = eids_tmp; + break; + } else { + continue; + } + } else if (check_enclave->state != INVALID) { + printm("[Penglai Monitor@%s]enclave %ld is in use\n", + __func__, i); + break; + } + } + if (cur == NULL) { + break; + } + } + print_buddy_system(); + return 0; +} + +static int free_enclave(int eid) { struct link_mem_t *cur; struct enclave_t *enclave = NULL; int found, count, ret_val; - spin_lock(&enclave_metadata_lock); + acquire_big_metadata_lock(__func__); found = 0; count = 0; @@ -234,22 +315,22 @@ static int free_enclave(int eid, bool clear) enclave = (struct enclave_t*)(cur->addr) + (eid - count); sbi_memset((void*)enclave, 0, sizeof(struct enclave_t)); enclave->state = INVALID; - found = 1; - ret_val = 0; - remove_link_mem(&enclave_metadata_head,cur,clear); + enclave->eid = -1; + found = 1; + ret_val = 0; break; } count += cur->slab_num; } //haven't alloc this eid - if(!found) - { - printm("[Penglai Monitor@%s] haven't alloc this eid\r\n", __func__); + if (!found) { + printm("[Penglai Monitor@%s] haven't alloc this eid %d\r\n", + __func__, eid); ret_val = -1; } - spin_unlock(&enclave_metadata_lock); + release_big_metadata_lock(__func__); return ret_val; } @@ -260,7 +341,7 @@ struct enclave_t* get_enclave(int eid) struct enclave_t *enclave; int found, count; - spin_lock(&enclave_metadata_lock); + acquire_big_metadata_lock(__func__); found = 0; count = 0; @@ -283,12 +364,13 @@ struct enclave_t* get_enclave(int eid) enclave = NULL; } - spin_unlock(&enclave_metadata_lock); + release_big_metadata_lock(__func__); return enclave; } int swap_from_host_to_enclave(uintptr_t* host_regs, struct enclave_t* enclave) -{ +{ + printm("[Penglai Monitor@%s %d] swap_from_host_to_enclave\n", __func__, current_hartid()); //grant encalve access to memory if(grant_enclave_access(enclave) < 0) return -1; @@ -344,6 +426,7 @@ int swap_from_host_to_enclave(uintptr_t* host_regs, struct enclave_t* enclave) int swap_from_enclave_to_host(uintptr_t* regs, struct enclave_t* enclave) { + printm("[Penglai Monitor@%s] swap_from_enclave_to_host\n", __func__); //retrieve enclave access to memory retrieve_enclave_access(enclave); @@ -388,7 +471,7 @@ int swap_from_enclave_to_host(uintptr_t* regs, struct enclave_t* enclave) return 0; } -uintptr_t create_enclave(struct enclave_sbi_param_t create_args) +uintptr_t create_enclave(struct enclave_sbi_param_t create_args, bool retry) { struct enclave_t* enclave; unsigned int eid; @@ -398,26 +481,34 @@ uintptr_t create_enclave(struct enclave_sbi_param_t create_args) if(!enclave) { printm("[Penglai Monitor@%s] enclave allocation is failed \r\n", __func__); - sbi_memset((void*)(create_args.paddr), 0, create_args.size); - mm_free((void*)(create_args.paddr), create_args.size); - return ENCLAVE_ERROR; + + if (retry) + { + printm("[Penglai Monitor@%s] retry failed, clear enclave \r\n", __func__); + sbi_memset((void*)(create_args.paddr), 0, create_args.size); + mm_free((void*)(create_args.paddr), create_args.size); + } + + return ENCLAVE_NO_MEMORY; } - spin_lock(&enclave_metadata_lock); + // acquire_big_metadata_lock(__func__); eid = enclave->eid; enclave->paddr = create_args.paddr; enclave->size = create_args.size; enclave->entry_point = create_args.entry_point; enclave->untrusted_ptr = create_args.untrusted_ptr; - enclave->untrusted_size = create_args.untrusted_size; + enclave->untrusted_ptr_paddr = create_args.untrusted_paddr; + enclave->untrusted_size = create_args.untrusted_size; enclave->free_mem = create_args.free_mem; enclave->ocall_func_id = create_args.ecall_arg0; enclave->ocall_arg0 = create_args.ecall_arg1; enclave->ocall_arg1 = create_args.ecall_arg2; enclave->ocall_syscall_num = create_args.ecall_arg3; enclave->kbuffer = create_args.kbuffer; - enclave->kbuffer_size = create_args.kbuffer_size; + enclave->kbuffer_paddr = create_args.kbuffer_paddr; + enclave->kbuffer_size = create_args.kbuffer_size; enclave->host_ptbr = csr_read(CSR_SATP); enclave->thread_context.encl_ptbr = (create_args.paddr >> (RISCV_PGSHIFT) | SATP_MODE_CHOICE); enclave->root_page_table = (unsigned long*)create_args.paddr; @@ -442,6 +533,7 @@ uintptr_t create_enclave(struct enclave_sbi_param_t create_args) // TODO: verify hash and whitelist check // Check page table mapping secure and not out of bound + //put it in run_enclave for debug retval = check_enclave_pt(enclave); if(retval != 0) { @@ -459,7 +551,8 @@ uintptr_t create_enclave(struct enclave_sbi_param_t create_args) printm("[Penglai Monitor@%s] return eid:%d\n", __func__, enclave->eid); - spin_unlock(&enclave_metadata_lock); + //spin_unlock(&enclave_metadata_lock); + // release_big_metadata_lock(__func__); return 0; /* @@ -470,10 +563,11 @@ uintptr_t create_enclave(struct enclave_sbi_param_t create_args) sbi_memset((void*)(enclave->paddr), 0, enclave->size); mm_free((void*)(enclave->paddr), enclave->size); - spin_unlock(&enclave_metadata_lock); + //spin_unlock(&enclave_metadata_lock); + // release_big_metadata_lock(__func__); //free enclave struct - free_enclave(eid,0); //the enclave state will be set INVALID here + free_enclave(eid); //the enclave state will be set INVALID here return ENCLAVE_ERROR; } @@ -488,8 +582,13 @@ uintptr_t run_enclave(uintptr_t* regs, unsigned int eid) printm_err("[Penglai Monitor@%s] wrong enclave id\r\n", __func__); return -1UL; } - - spin_lock(&enclave_metadata_lock); +#if 0 + printm("[Penglai@%s], check PT for run enclave\n", __func__); + // dump_pt(enclave->root_page_table, 1); + retval = check_enclave_pt(enclave); +#endif + //spin_lock(&enclave_metadata_lock); + acquire_big_metadata_lock(__func__); if (enclave->state != FRESH) { @@ -528,7 +627,8 @@ uintptr_t run_enclave(uintptr_t* regs, unsigned int eid) enclave->state = RUNNING; run_enclave_out: - spin_unlock(&enclave_metadata_lock); + //spin_unlock(&enclave_metadata_lock); + release_big_metadata_lock(__func__); return retval; } @@ -542,7 +642,8 @@ uintptr_t stop_enclave(uintptr_t* regs, unsigned int eid) return -1UL; } - spin_lock(&enclave_metadata_lock); + //spin_lock(&enclave_metadata_lock); + acquire_big_metadata_lock(__func__); if(enclave->host_ptbr != csr_read(CSR_SATP)) { @@ -569,7 +670,8 @@ uintptr_t stop_enclave(uintptr_t* regs, unsigned int eid) enclave->state = STOPPED; stop_enclave_out: - spin_unlock(&enclave_metadata_lock); + //spin_unlock(&enclave_metadata_lock); + release_big_metadata_lock(__func__); return retval; } @@ -583,7 +685,8 @@ uintptr_t destroy_enclave(uintptr_t* regs, unsigned int eid) return -1UL; } - spin_lock(&enclave_metadata_lock); + //spin_lock(&enclave_metadata_lock); + acquire_big_metadata_lock(__func__); if (enclave->host_ptbr != csr_read(CSR_SATP)) { @@ -610,10 +713,10 @@ uintptr_t destroy_enclave(uintptr_t* regs, unsigned int eid) mm_free((void*)(enclave->paddr), enclave->size); enclave->state = INVALID; - spin_unlock(&enclave_metadata_lock); + release_big_metadata_lock(__func__); //free enclave struct - retval = free_enclave(eid,0); //the enclave state will be set INVALID here + retval = free_enclave(eid); //the enclave state will be set INVALID here return retval; } //FIXME: what if the enclave->state is RUNNABLE now? @@ -621,7 +724,7 @@ uintptr_t destroy_enclave(uintptr_t* regs, unsigned int eid) /* The real-destroy happen when the enclave traps into the monitor */ enclave->state = DESTROYED; out: - spin_unlock(&enclave_metadata_lock); + release_big_metadata_lock(__func__); return retval; } @@ -636,7 +739,7 @@ uintptr_t resume_from_stop(uintptr_t* regs, unsigned int eid) return -1UL; } - spin_lock(&enclave_metadata_lock); + acquire_big_metadata_lock(__func__); if(enclave->host_ptbr != csr_read(CSR_SATP)) { printm("[Penglai Monitor@%s] enclave doesn't belong to current host process\r\n", __func__); @@ -655,7 +758,7 @@ uintptr_t resume_from_stop(uintptr_t* regs, unsigned int eid) printm("[Penglai Monitor@%s] encalve-%d turns to runnable now!\n", __func__, eid); resume_from_stop_out: - spin_unlock(&enclave_metadata_lock); + release_big_metadata_lock(__func__); return retval; } @@ -669,7 +772,7 @@ uintptr_t resume_enclave(uintptr_t* regs, unsigned int eid) return -1UL; } - spin_lock(&enclave_metadata_lock); + acquire_big_metadata_lock(__func__); if(enclave->host_ptbr != csr_read(CSR_SATP)) { @@ -691,7 +794,7 @@ uintptr_t resume_enclave(uintptr_t* regs, unsigned int eid) spin_unlock(&enclave_metadata_lock); //free enclave struct - free_enclave(eid,0); //the enclave state will be set INVALID here + free_enclave(eid); //the enclave state will be set INVALID here return ENCLAVE_SUCCESS; //this will break the infinite loop in the enclave-driver } @@ -717,7 +820,7 @@ uintptr_t resume_enclave(uintptr_t* regs, unsigned int eid) retval = regs[10]; resume_enclave_out: - spin_unlock(&enclave_metadata_lock); + release_big_metadata_lock(__func__); return retval; } @@ -727,7 +830,7 @@ uintptr_t attest_enclave(uintptr_t eid, uintptr_t report_ptr, uintptr_t nonce) int attestable = 1; struct report_t report; enclave = get_enclave(eid); - spin_lock(&enclave_metadata_lock); + acquire_big_metadata_lock(__func__); if(!attestable) { @@ -746,7 +849,7 @@ uintptr_t attest_enclave(uintptr_t eid, uintptr_t report_ptr, uintptr_t nonce) copy_to_host((void*)report_ptr, (void*)(&report), sizeof(struct report_t)); - spin_unlock(&enclave_metadata_lock); + release_big_metadata_lock(__func__); return 0; } @@ -759,30 +862,37 @@ uintptr_t exit_enclave(uintptr_t* regs, unsigned long retval) printm_err("[Penglai Monitor@%s] cpu is not in enclave world now\r\n", __func__); return -1; } - printm_err("[Penglai Monitor@%s] retval of enclave is %lx\r\n", __func__, retval); + printm("[Penglai Monitor@%s] retval of enclave is %lx\r\n", __func__, retval); enclave = get_enclave(eid); - spin_lock(&enclave_metadata_lock); + //spin_lock(&enclave_metadata_lock); + acquire_big_metadata_lock(__func__); if(!enclave || check_enclave_authentication(enclave)!=0 || enclave->state != RUNNING) { printm_err("[Penglai Monitor@%s] current enclave's eid is not %d\r\n", __func__, eid); - spin_unlock(&enclave_metadata_lock); + //spin_unlock(&enclave_metadata_lock); + release_big_metadata_lock(__func__); return -1UL; } swap_from_enclave_to_host(regs, enclave); + printm("[Penglai Monitor@%s] untreasted mem:%lx ,val:%s \n\t kbuffer_addr:%lx val:%s \n", + __func__, enclave->untrusted_ptr, + (unsigned char *)(enclave->untrusted_ptr_paddr), enclave->kbuffer_paddr, + (unsigned char *)(enclave->kbuffer_paddr)); //free enclave's memory //TODO: support multiple memory region sbi_memset((void*)(enclave->paddr), 0, enclave->size); - mm_free((void*)(enclave->paddr), enclave->size); - - spin_unlock(&enclave_metadata_lock); + // mm_free((void*)(enclave->paddr), enclave->size); + // Optional: used to reclaim secmem immediately upon enclave exit + mm_free_clear((void*)(enclave->paddr), enclave->size); + release_big_metadata_lock(__func__); //free enclave struct - free_enclave(eid, 0); + free_enclave(eid); return 0; } @@ -791,15 +901,17 @@ uintptr_t enclave_sys_write(uintptr_t* regs) uintptr_t ret = 0; int eid = get_enclave_id(); struct enclave_t* enclave = NULL; + if(check_in_enclave_world() < 0) { printm_err("[Penglai Monitor@%s] check enclave world is failed\n", __func__); return -1; } - enclave = get_enclave(eid); + enclave = get_enclave(eid); + printm("[Penglai Monitor@%s] untreasted mem:%lx ,val:%s \n\t kbuffer_addr:%lx val:%s \n",__func__, enclave->untrusted_ptr_paddr, (unsigned char*)(enclave->untrusted_ptr_paddr), enclave->kbuffer_paddr, (unsigned char*)(enclave->kbuffer_paddr)); - spin_lock(&enclave_metadata_lock); + acquire_big_metadata_lock(__func__); if(!enclave || check_enclave_authentication(enclave)!=0 || enclave->state != RUNNING) { @@ -815,7 +927,7 @@ uintptr_t enclave_sys_write(uintptr_t* regs) enclave->state = RUNNABLE; ret = ENCLAVE_OCALL; out: - spin_unlock(&enclave_metadata_lock); + release_big_metadata_lock(__func__); return ret; } @@ -845,7 +957,7 @@ uintptr_t enclave_derive_seal_key(uintptr_t* regs, uintptr_t salt_va, uintptr_t enclave = get_enclave(eid); - spin_lock(&enclave_metadata_lock); + acquire_big_metadata_lock(__func__); if(!enclave || check_enclave_authentication(enclave)!=0 || enclave->state != RUNNING) { @@ -878,7 +990,7 @@ uintptr_t enclave_derive_seal_key(uintptr_t* regs, uintptr_t salt_va, uintptr_t } out: - spin_unlock(&enclave_metadata_lock); + release_big_metadata_lock(__func__); return ret; } @@ -895,7 +1007,7 @@ uintptr_t enclave_user_defined_ocall(uintptr_t* regs, uintptr_t ocall_buf_size) enclave = get_enclave(eid); - spin_lock(&enclave_metadata_lock); + acquire_big_metadata_lock(__func__); if(!enclave || check_enclave_authentication(enclave)!=0 || enclave->state != RUNNING) { @@ -912,7 +1024,7 @@ uintptr_t enclave_user_defined_ocall(uintptr_t* regs, uintptr_t ocall_buf_size) enclave->state = RUNNABLE; ret = ENCLAVE_OCALL; out: - spin_unlock(&enclave_metadata_lock); + release_big_metadata_lock(__func__); return ret; } @@ -936,7 +1048,7 @@ uintptr_t do_timer_irq(uintptr_t *regs, uintptr_t mcause, uintptr_t mepc) return -1UL; } - spin_lock(&enclave_metadata_lock); + acquire_big_metadata_lock(__func__); /* * An enclave trapping into monitor should not have other states. @@ -954,10 +1066,10 @@ uintptr_t do_timer_irq(uintptr_t *regs, uintptr_t mcause, uintptr_t mepc) sbi_memset((void*)(enclave->paddr), 0, enclave->size); mm_free((void*)(enclave->paddr), enclave->size); - spin_unlock(&enclave_metadata_lock); + release_big_metadata_lock(__func__); //free enclave struct - retval = free_enclave(eid,0); //the enclave state will be set INVALID here + retval = free_enclave(eid); //the enclave state will be set INVALID here retval = ENCLAVE_SUCCESS; //this means we will not run any more goto timer_irq_out; @@ -969,7 +1081,7 @@ uintptr_t do_timer_irq(uintptr_t *regs, uintptr_t mcause, uintptr_t mepc) retval = ENCLAVE_TIMER_IRQ; } - spin_unlock(&enclave_metadata_lock); + release_big_metadata_lock(__func__); timer_irq_out: csr_read_clear(CSR_MIE, MIP_MTIP); @@ -992,7 +1104,7 @@ uintptr_t resume_from_ocall(uintptr_t* regs, unsigned int eid) return -1UL; } - spin_lock(&enclave_metadata_lock); + acquire_big_metadata_lock(__func__); switch(ocall_func_id) { @@ -1007,7 +1119,7 @@ uintptr_t resume_from_ocall(uintptr_t* regs, unsigned int eid) break; } - spin_unlock(&enclave_metadata_lock); + release_big_metadata_lock(__func__); retval = resume_enclave(regs, eid); return retval; diff --git a/opensbi-1.2/lib/sbi/sm/platform/pmp/enclave_mm.c b/opensbi-1.2/lib/sbi/sm/platform/pmp/enclave_mm.c index 232f79903..a7bfd7c81 100644 --- a/opensbi-1.2/lib/sbi/sm/platform/pmp/enclave_mm.c +++ b/opensbi-1.2/lib/sbi/sm/platform/pmp/enclave_mm.c @@ -18,10 +18,43 @@ * TODO: this array can be removed as we can get * existing enclave regions via pmp registers */ -static struct mm_region_t mm_regions[N_PMP_REGIONS]; -static unsigned long pmp_bitmap = 0; +// static struct mm_region_t mm_regions[N_PMP_REGIONS]; +struct mm_region_t mm_regions[N_PMP_REGIONS]; +volatile unsigned long pmp_bitmap = 0; static spinlock_t pmp_bitmap_lock = SPIN_LOCK_INITIALIZER; +void acquire_big_emem_lock(const char *str) +{ + if (LOCK_DEBUG) + printm("[PENGLAI SM @%s_%d] %s try lock\n", __func__, + current_hartid(), str); + spin_lock(&pmp_bitmap_lock); + if (LOCK_DEBUG) + printm("[PENGLAI SM @%s_%d] %s get lock\n", __func__, + current_hartid(), str); +} +int try_big_emem_lock(const char *str) +{ + if (LOCK_DEBUG) + printm("[PENGLAI SM @%s_%d] %s try lock\n", __func__, + current_hartid(), str); + if (!spin_trylock(&pmp_bitmap_lock)) + { + return RETRY_SPIN_LOCK; + } + if (LOCK_DEBUG) + printm("[PENGLAI SM @%s_%d] %s get lock\n", __func__, + current_hartid(), str); + return 0; +} + +void release_big_emem_lock(const char *str) +{ + spin_unlock(&pmp_bitmap_lock); + if (LOCK_DEBUG) + printm("[PENGLAI SM@%s_%d] %s release lock\n", __func__, + current_hartid(), str); +} int check_mem_overlap(uintptr_t paddr, unsigned long size) { @@ -43,7 +76,7 @@ int check_mem_overlap(uintptr_t paddr, unsigned long size) && region_overlap(mm_regions[region_idx].paddr, mm_regions[region_idx].size, paddr, size)) { - printm_err("pmp memory overlaps with existing pmp memory!\r\n"); + printm_err("pmp memory overlaps with existing pmp memory!region_idx:%d\r\n", region_idx); return -1; } } @@ -60,7 +93,18 @@ uintptr_t copy_from_host(void* dest, void* src, size_t size) { int retval = -1; //get lock to prevent TOCTTOU - spin_lock(&pmp_bitmap_lock); + // acquire_big_emem_lock(__func__); + retval=try_big_emem_lock(__func__); + + int re_try = RETRY_TIMES; + while (retval == RETRY_SPIN_LOCK && re_try) { + re_try--; + retval=try_big_emem_lock(__func__); + } + if (retval == RETRY_SPIN_LOCK) + { + return retval; + } //check data is nonsecure //prevent coping from memory in secure region @@ -70,14 +114,16 @@ uintptr_t copy_from_host(void* dest, void* src, size_t size) retval = 0; } - spin_unlock(&pmp_bitmap_lock); + // spin_unlock(&pmp_bitmap_lock); + release_big_emem_lock(__func__); return retval; } uintptr_t copy_to_host(void* dest, void* src, size_t size) { int retval = -1; - spin_lock(&pmp_bitmap_lock); + // spin_lock(&pmp_bitmap_lock); + acquire_big_emem_lock(__func__); //check data is nonsecure //prevent coping from memory in secure region @@ -87,14 +133,16 @@ uintptr_t copy_to_host(void* dest, void* src, size_t size) retval = 0; } - spin_unlock(&pmp_bitmap_lock); + // spin_unlock(&pmp_bitmap_lock); + release_big_emem_lock(__func__); return retval; } int copy_word_to_host(unsigned int* ptr, uintptr_t value) { int retval = -1; - spin_lock(&pmp_bitmap_lock); + // spin_lock(&pmp_bitmap_lock); + acquire_big_emem_lock(__func__); //check data is nonsecure //prevent coping from memory in secure region @@ -104,7 +152,8 @@ int copy_word_to_host(unsigned int* ptr, uintptr_t value) retval = 0; } - spin_unlock(&pmp_bitmap_lock); + // spin_unlock(&pmp_bitmap_lock); + release_big_emem_lock(__func__); return retval; } @@ -340,7 +389,7 @@ uintptr_t copy_from_enclave(pte_t *enclave_root_pt, void* dest_pa, void* src_enc uintptr_t left_size = size; uintptr_t copy_size; if (page_left >= left_size) { - // do copy + // do copy in one time copy_size = left_size; src_pa = get_enclave_paddr_from_va(enclave_root_pt, (uintptr_t)src_enclave_va); if(src_pa == 0) @@ -495,7 +544,7 @@ int grant_kernel_access(void* req_paddr, unsigned long size) pmp_config.size = size; pmp_config.perm = PMP_R | PMP_W | PMP_X; pmp_config.mode = PMP_A_NAPOT; - set_pmp_and_sync(pmp_idx, pmp_config); + set_pmp(pmp_idx, pmp_config); return 0; } @@ -518,7 +567,8 @@ int retrieve_kernel_access(void* req_paddr, unsigned long size) return -1; } - clear_pmp_and_sync(pmp_idx); + // clear_pmp_and_sync(pmp_idx); + clear_pmp(pmp_idx); return 0; } @@ -535,9 +585,9 @@ int grant_enclave_access(struct enclave_t* enclave) //set pmp permission, ensure that enclave's paddr and size is pmp legal //TODO: support multiple memory regions - spin_lock(&pmp_bitmap_lock); - for(region_idx = 0; region_idx < N_PMP_REGIONS; ++region_idx) - { + // spin_lock(&pmp_bitmap_lock); + acquire_big_emem_lock(__func__); + for (region_idx = 0; region_idx < N_PMP_REGIONS; ++region_idx) { if(mm_regions[region_idx].valid && region_contain( mm_regions[region_idx].paddr, mm_regions[region_idx].size, enclave->paddr, enclave->size)) @@ -545,7 +595,8 @@ int grant_enclave_access(struct enclave_t* enclave) break; } } - spin_unlock(&pmp_bitmap_lock); + // spin_unlock(&pmp_bitmap_lock); + release_big_emem_lock(__func__); if(region_idx >= N_PMP_REGIONS) { @@ -595,9 +646,9 @@ int retrieve_enclave_access(struct enclave_t *enclave) //set pmp permission, ensure that enclave's paddr and size is pmp legal //TODO: support multiple memory regions - spin_lock(&pmp_bitmap_lock); - for(region_idx = 0; region_idx < N_PMP_REGIONS; ++region_idx) - { + // spin_lock(&pmp_bitmap_lock); + acquire_big_emem_lock(__func__); + for (region_idx = 0; region_idx < N_PMP_REGIONS; ++region_idx) { if(mm_regions[region_idx].valid && region_contain( mm_regions[region_idx].paddr, mm_regions[region_idx].size, enclave->paddr, enclave->size)) @@ -605,7 +656,8 @@ int retrieve_enclave_access(struct enclave_t *enclave) break; } } - spin_unlock(&pmp_bitmap_lock); + // spin_unlock(&pmp_bitmap_lock); + release_big_emem_lock(__func__); if(region_idx >= N_PMP_REGIONS) { @@ -648,9 +700,13 @@ uintptr_t mm_init(uintptr_t paddr, unsigned long size) return -1UL; //acquire a free enclave region - spin_lock(&pmp_bitmap_lock); + // spin_lock(&pmp_bitmap_lock); + // acquire_big_emem_lock(__func__); + if (try_big_emem_lock(__func__)) + { + return RETRY_SPIN_LOCK; + } - //check memory overlap //memory overlap should be checked after acquire lock if(check_mem_overlap(paddr, size) < 0) { @@ -698,7 +754,8 @@ uintptr_t mm_init(uintptr_t paddr, unsigned long size) mm_regions[region_idx].mm_list_head = mm_list_head; out: - spin_unlock(&pmp_bitmap_lock); + // spin_unlock(&pmp_bitmap_lock); + release_big_emem_lock(__func__); return retval; } @@ -902,6 +959,13 @@ static int insert_mm_region(int region_idx, struct mm_list_t* mm_region, int mer } //found the exact mm_list + + if(merge && mm_regions[region_idx].mm_list_head == mm_list_head && mm_list_head->mm_list == mm_region){ + //An entire pmp region is reclaimed + if(mm_region->order == mm_list_head->order){ + return 0; + } + } int ret_val = 0; struct mm_list_head_t *new_list_head = (struct mm_list_head_t*)MM_LIST_2_PADDR(mm_region); if(mm_list_head && mm_list_head->order == mm_region->order) @@ -954,29 +1018,39 @@ static int insert_mm_region(int region_idx, struct mm_list_t* mm_region, int mer //TODO: delete this function void print_buddy_system() { - //spinlock_lock(&pmp_bitmap_lock); + // spin_lock(&pmp_bitmap_lock); - struct mm_list_head_t* mm_list_head = mm_regions[0].mm_list_head; - printm("struct mm_list_head_t size is 0x%lx\r\n", sizeof(struct mm_list_head_t)); - printm("struct mm_list_t size is 0x%lx\r\n", sizeof(struct mm_list_t)); - while(mm_list_head) + for (size_t i = 0; i < N_PMP_REGIONS; i++) { - printm("mm_list_head addr is 0x%ln, order is %d\r\n", (long int *)mm_list_head, mm_list_head->order); - printm("mm_list_head prev is 0x%ln, next is 0x%ln, mm_list is 0x%ln\r\n", + if (!mm_regions[i].valid) + { + break; + } + + struct mm_list_head_t* mm_list_head = mm_regions[i].mm_list_head; + // printm("struct mm_list_head_t size is 0x%lx\r\n", sizeof(struct mm_list_head_t)); + // printm("struct mm_list_t size is 0x%lx\r\n", sizeof(struct mm_list_t)); + while(mm_list_head) + { + printm("mm_list_head[%ld] addr is 0x%ln, order is %d\r\n",i, (long int *)mm_list_head, mm_list_head->order); + printm("mm_list_head prev is 0x%ln, next is 0x%ln, mm_list is 0x%ln\r\n", (long int *)mm_list_head->prev_list_head, (long int *)mm_list_head->next_list_head, (long int*)mm_list_head->mm_list); - struct mm_list_t *mm_region = mm_list_head->mm_list; - while(mm_region) - { - printm(" mm_region addr is 0x%ln, order is %d\r\n", (long int *)mm_region, mm_region->order); - printm(" mm_region prev is 0x%ln, next is 0x%ln\r\n", (long int*)mm_region->prev_mm, (long int*)mm_region->next_mm); - mm_region = mm_region->next_mm; + struct mm_list_t *mm_region = mm_list_head->mm_list; + while(mm_region) + { + printm(" mm_region addr is 0x%ln=0x%p, paddr is 0x%p, order is %d\r\n", (long int *)mm_region,(long int *)MM_LIST_2_PADDR(mm_region),(long int *)MM_LIST_2_PADDR(mm_region), mm_region->order); + printm(" mm_region prev is 0x%ln, next is 0x%ln\r\n\n", (long int*)mm_region->prev_mm, (long int*)mm_region->next_mm); + mm_region = mm_region->next_mm; + } + mm_list_head = mm_list_head->next_list_head; } - mm_list_head = mm_list_head->next_list_head; } + + printm("************\r\n"); - //spinlock_unlock(&pmp_bitmap_lock); + // spin_unlock(&pmp_bitmap_lock); } void* mm_alloc(unsigned long req_size, unsigned long *resp_size) @@ -986,9 +1060,9 @@ void* mm_alloc(unsigned long req_size, unsigned long *resp_size) return ret_addr; //TODO: reduce lock granularity - spin_lock(&pmp_bitmap_lock); + acquire_big_emem_lock(__func__); - //print_buddy_system(); + print_buddy_system(); unsigned long order = ilog2(req_size-1) + 1; for(int region_idx=0; region_idx < N_PMP_REGIONS; ++region_idx) @@ -1018,9 +1092,9 @@ void* mm_alloc(unsigned long req_size, unsigned long *resp_size) break; } - //print_buddy_system(); + // print_buddy_system(); - spin_unlock(&pmp_bitmap_lock); + release_big_emem_lock(__func__); if(ret_addr && resp_size) { @@ -1047,9 +1121,8 @@ int mm_free(void* req_paddr, unsigned long free_size) mm_region->prev_mm = NULL; mm_region->next_mm = NULL; - spin_lock(&pmp_bitmap_lock); - - //print_buddy_system(); + // spin_lock(&pmp_bitmap_lock); + acquire_big_emem_lock(__func__); for(region_idx=0; region_idx < N_PMP_REGIONS; ++region_idx) { @@ -1099,11 +1172,11 @@ int mm_free(void* req_paddr, unsigned long free_size) printm("mm_free: failed to insert mm(addr 0x%lx, order %ld)\r\n in mm_regions[%d]\r\n", paddr, order, region_idx); } - //printm("after mm_free\r\n"); - //print_buddy_system(); + printm("after mm_free\r\n"); + print_buddy_system(); mm_free_out: - spin_unlock(&pmp_bitmap_lock); + release_big_emem_lock(__func__); return ret_val; } //TODO:Reserved interfaces for calls to reclaim unused memory @@ -1123,8 +1196,7 @@ int mm_free_clear(void* req_paddr, unsigned long free_size) mm_region->prev_mm = NULL; mm_region->next_mm = NULL; - spin_lock(&pmp_bitmap_lock); - + acquire_big_emem_lock(__func__); //print_buddy_system(); for(region_idx=0; region_idx < N_PMP_REGIONS; ++region_idx) @@ -1136,7 +1208,7 @@ int mm_free_clear(void* req_paddr, unsigned long free_size) } if(region_idx >= N_PMP_REGIONS) { - printm("mm_free: buddy system doesn't contain memory(addr 0x%lx, order %ld)\r\n", paddr, order); + printm("mm_free_clear: buddy system doesn't contain memory(addr 0x%lx, order %ld)\r\n", paddr, order); ret_val = -1; goto mm_free_out; } @@ -1152,7 +1224,7 @@ int mm_free_clear(void* req_paddr, unsigned long free_size) unsigned long region_size = 1 << mm_region->order; if(region_overlap(paddr, size, region_paddr, region_size)) { - printm("mm_free: memory(addr 0x%lx order %ld) overlap with free memory(addr 0x%lx order %d)\r\n", paddr, order, region_paddr, mm_region->order); + printm("mm_free_clear: memory(addr 0x%lx order %ld) overlap with free memory(addr 0x%lx order %d)\r\n", paddr, order, region_paddr, mm_region->order); ret_val = -1; break; } @@ -1163,16 +1235,16 @@ int mm_free_clear(void* req_paddr, unsigned long free_size) mm_list_head = mm_list_head->next_list_head; } - if(mm_list_head) - { - goto mm_free_out; - } + // if(mm_list_head) + // { + // goto mm_free_out; + // } //insert with merge ret_val = insert_mm_region(region_idx, mm_region, 1); if(ret_val < 0) { - printm("mm_free: failed to insert mm(addr 0x%lx, order %ld)\r\n in mm_regions[%d]\r\n", paddr, order, region_idx); + printm("mm_free_clear: failed to insert mm(addr 0x%lx, order %ld)\r\n in mm_regions[%d]\r\n", paddr, order, region_idx); } //printm("after mm_free\r\n"); @@ -1187,13 +1259,13 @@ int mm_free_clear(void* req_paddr, unsigned long free_size) struct mm_list_t *mm_list = mm_list_head->mm_list; if (((long int *)MM_LIST_2_PADDR(mm_list) == (long int *)pmp_config.paddr)&&((pmp_config.size) == (1<order))) { - delete_certain_region(region_idx,&mm_list_head,mm_list); + delete_certain_region(region_idx, &mm_list_head, mm_list); mm_regions[region_idx].valid = 0; mm_regions[region_idx].paddr = 0; + mm_regions[region_idx].size = 0; mm_regions[region_idx].mm_list_head = NULL; - // release_big_emem_lock(__func__); - clear_pmp_and_sync(pmp_idx); - // acquire_big_emem_lock(__func__); + clear_pmp_and_sync(pmp_idx); + pmp_bitmap &= ~(1 << pmp_idx); } printm("***after mm_free***\r\n"); @@ -1201,6 +1273,26 @@ int mm_free_clear(void* req_paddr, unsigned long free_size) dump_pmps(); mm_free_out: - spin_unlock(&pmp_bitmap_lock); + // spin_unlock(&pmp_bitmap_lock); + release_big_emem_lock(__func__); return ret_val; +} + +int memory_reclaim(unsigned long* resp_size) +{ + uintptr_t retval = 0; + + printm("[Penglai Monitor@%s %d] invoked\r\n", __func__, current_hartid()); + print_buddy_system(); + + retval = free_enclave_metadata(); + if(retval != 0) + { + printm_err("M mode: sm_memory_reclaim: free_enclave_metadata error\r\n"); + dump_pmps(); + return ENCLAVE_ERROR; + } + print_buddy_system(); + dump_pmps(); + return ENCLAVE_SUCCESS; } \ No newline at end of file diff --git a/opensbi-1.2/lib/sbi/sm/platform/pmp/platform.c b/opensbi-1.2/lib/sbi/sm/platform/pmp/platform.c index beccebff4..2d9a1eeb0 100644 --- a/opensbi-1.2/lib/sbi/sm/platform/pmp/platform.c +++ b/opensbi-1.2/lib/sbi/sm/platform/pmp/platform.c @@ -16,7 +16,7 @@ int platform_init() printm("[Penglai Monitor@%s] init platfrom and prepare PMP\n", __func__); //config the PMP 0 to protect security monitor pmp_config.paddr = (uintptr_t)SM_BASE; - pmp_config.size = (unsigned long)SM_SIZE; + pmp_config.size = (unsigned long)SM_SIZE;//0x80024588 pmp_config.mode = PMP_A_NAPOT; pmp_config.perm = PMP_NO_PERM; set_pmp_and_sync(0, pmp_config); diff --git a/opensbi-1.2/lib/sbi/sm/platform/spmp/platform.c b/opensbi-1.2/lib/sbi/sm/platform/spmp/platform.c index 2e50e7b67..285a775ac 100644 --- a/opensbi-1.2/lib/sbi/sm/platform/spmp/platform.c +++ b/opensbi-1.2/lib/sbi/sm/platform/spmp/platform.c @@ -21,7 +21,7 @@ int platform_init() pmp_config.perm = PMP_R | PMP_W | PMP_X; set_pmp(NPMP-1, pmp_config); - //config the last PMP to protect security monitor + //config the last-1 PMP to protect security monitor pmp_config.paddr = (uintptr_t)SM_BASE; pmp_config.size = (unsigned long)SM_SIZE; pmp_config.mode = PMP_A_NAPOT; diff --git a/opensbi-1.2/lib/sbi/sm/pmp.c b/opensbi-1.2/lib/sbi/sm/pmp.c index b39722081..497724827 100644 --- a/opensbi-1.2/lib/sbi/sm/pmp.c +++ b/opensbi-1.2/lib/sbi/sm/pmp.c @@ -250,6 +250,7 @@ void clear_pmp(int pmp_idx) pmp_cfg_t.paddr = 0; pmp_cfg_t.size = 0; set_pmp(pmp_idx, pmp_cfg_t); + // set_pmp_and_sync(pmp_idx, pmp_cfg_t); return; } @@ -314,7 +315,7 @@ void dump_pmps(void) for (i=0; i<16; i++){ struct pmp_config_t pmp = get_pmp(i); (void)pmp; //to ignore the unused variable warnings - printm("[Debug:SM@%s] pmp_%d: mode(0x%lx) perm(0x%lx) paddr(0x%lx) size(0x%lx)\n", - __func__, i, pmp.mode, pmp.perm, pmp.paddr, pmp.size); + printm("[Debug:SM@%s %u] pmp_%d: mode(0x%lx) perm(0x%lx) paddr(0x%lx) size(0x%lx)\n", + __func__, current_hartid(),i, pmp.mode, pmp.perm, pmp.paddr, pmp.size); } } diff --git a/opensbi-1.2/lib/sbi/sm/sm.c b/opensbi-1.2/lib/sbi/sm/sm.c index 781743f9b..49a822911 100644 --- a/opensbi-1.2/lib/sbi/sm/sm.c +++ b/opensbi-1.2/lib/sbi/sm/sm.c @@ -6,9 +6,31 @@ #include #include #include +#include + +extern volatile int print_m_mode; //static int sm_initialized = 0; //static spinlock_t sm_init_lock = SPINLOCK_INIT; +static spinlock_t sm_alloc_enclave_mem_lock = SPIN_LOCK_INITIALIZER; +void acquire_big_sm_lock(const char *str) +{ + if (LOCK_DEBUG) + printm("[PENGLAI SM@%s_%d] %s try lock\n", __func__, + current_hartid(), str); + spin_lock(&sm_alloc_enclave_mem_lock); + if (LOCK_DEBUG) + printm("[PENGLAI SM@%s_%d] %s get lock\n", __func__, + current_hartid(), str); +} + +void release_big_sm_lock(const char *str) +{ + spin_unlock(&sm_alloc_enclave_mem_lock); + if (LOCK_DEBUG) + printm("[PENGLAI SM@%s_%d] %s release lock\n", __func__, + current_hartid(), str); +} void sm_init() { @@ -38,11 +60,10 @@ uintptr_t sm_mm_init(uintptr_t paddr, unsigned long size) uintptr_t sm_mm_extend(uintptr_t paddr, unsigned long size) { uintptr_t retval = 0; - printm("[Penglai Monitor] %s invoked\r\n", __func__); - - retval = mm_init(paddr, size); - - printm("[Penglai Monitor] %s return:%ld\r\n", __func__, retval); + printm("[Penglai Monitor %d] %s invoked\r\n", current_hartid(), __func__); + print_m_mode = 1; + retval = mm_init(paddr, size); + printm("[Penglai Monitor %d] %s return:%ld\r\n", current_hartid(), __func__, retval); return retval; } @@ -75,8 +96,6 @@ uintptr_t sm_alloc_enclave_mem(uintptr_t mm_alloc_arg) printm("M mode: sm_alloc_enclave_mem: no enough memory\r\n"); return ENCLAVE_NO_MEMORY; } - dump_pmps(); - //grant kernel access to this memory if (grant_kernel_access(paddr, resp_size) != 0) { printm_err( @@ -103,7 +122,41 @@ uintptr_t sm_alloc_enclave_mem(uintptr_t mm_alloc_arg) return ENCLAVE_SUCCESS; } -uintptr_t sm_create_enclave(uintptr_t enclave_sbi_param) +uintptr_t sm_memory_reclaim(uintptr_t mm_reclaim_arg, unsigned long eid) +{ + uintptr_t retval = 0; + unsigned long resp_size = 0; + printm("[Penglai Monitor] %s invoked\r\n", __func__); + struct mm_reclaim_arg_t mm_reclaim_arg_local; + + retval = memory_reclaim(&resp_size); + if (retval == RETRY_SPIN_LOCK) { + return retval; + } + + retval = copy_from_host(&mm_reclaim_arg_local, + (struct mm_reclaim_arg_t *)mm_reclaim_arg, + sizeof(struct mm_reclaim_arg_t)); + if (retval != 0) { + printm_err( + "M mode: sm_memory_reclaim: unknown error happended when copy from host\r\n"); + return ENCLAVE_ERROR; + } + + mm_reclaim_arg_local.resp_size = resp_size; + retval = copy_to_host((struct mm_reclaim_arg_t *)mm_reclaim_arg, + &mm_reclaim_arg_local, + sizeof(struct mm_reclaim_arg_t)); + if (retval != 0) { + printm_err( + "M mode: sm_memory_reclaim: unknown error happended when copy to host\r\n"); + return ENCLAVE_ERROR; + } + printm("[Penglai Monitor] %s return:%ld\r\n", __func__, retval); + return retval; +} + +uintptr_t sm_create_enclave(uintptr_t enclave_sbi_param, bool retry) { struct enclave_sbi_param_t enclave_sbi_param_local; uintptr_t retval = 0; @@ -113,6 +166,11 @@ uintptr_t sm_create_enclave(uintptr_t enclave_sbi_param) retval = copy_from_host(&enclave_sbi_param_local, (struct enclave_sbi_param_t *)enclave_sbi_param, sizeof(struct enclave_sbi_param_t)); + if (retval == RETRY_SPIN_LOCK) + { + return retval; + } + if (retval != 0) { printm_err( "M mode: sm_create_enclave: unknown error happended when copy from host\r\n"); @@ -121,14 +179,14 @@ uintptr_t sm_create_enclave(uintptr_t enclave_sbi_param) void *paddr = (void *)enclave_sbi_param_local.paddr; unsigned long size = (unsigned long)enclave_sbi_param_local.size; - if (retrieve_kernel_access(paddr, size) != + if (!retry && retrieve_kernel_access(paddr, size) != 0) //we always allow kernel access the memory now { mm_free(paddr, size); return -1UL; } - retval = create_enclave(enclave_sbi_param_local); + retval = create_enclave(enclave_sbi_param_local, retry); printm("[Penglai Monitor] %s created return value:%ld \r\n", __func__, retval); @@ -195,12 +253,11 @@ uintptr_t sm_resume_enclave(uintptr_t *regs, unsigned long eid) uintptr_t sm_exit_enclave(uintptr_t *regs, unsigned long retval) { uintptr_t ret; - printm("[Penglai Monitor] %s invoked\r\n", __func__); + printm("[Penglai Monitor %d] %s invoked\r\n", current_hartid(), __func__); ret = exit_enclave(regs, retval); - printm("[Penglai Monitor] %s return: %ld\r\n", __func__, ret); - + printm("[Penglai Monitor %d] %s return: %ld\r\n", current_hartid(), __func__, ret); return ret; } @@ -273,7 +330,12 @@ uintptr_t sm_do_timer_irq(uintptr_t *regs, uintptr_t mcause, uintptr_t mepc) regs[11] = ret; //value return ret; } - +/** + * \brief Used to clear pmp settings when uninstalling kernel modules + * + * \param size_ptr Used to pass the size of the freed memory to the driver + * \param flag Select whether to clear a specific pmp +*/ uintptr_t sm_free_enclave_mem(uintptr_t size_ptr, unsigned long flag) { uintptr_t ret = 0; @@ -281,6 +343,8 @@ uintptr_t sm_free_enclave_mem(uintptr_t size_ptr, unsigned long flag) dump_pmps(); switch (flag) { case FREE_MAX_MEMORY: + free_enclave_metadata(); + for (size_t i = NPMP - 2; i >= 0; i--) { int pmp_idx = i; struct pmp_config_t pmp_config = get_pmp(pmp_idx); @@ -290,14 +354,13 @@ uintptr_t sm_free_enclave_mem(uintptr_t size_ptr, unsigned long flag) } if (pmp_idx == 0) { - printm("M mode: sm_memory_reclaim: There is no mem to reclaim\r\n"); + sbi_printf("M mode:Finish free and there is no mem to reclaim\r\n"); dump_pmps(); size = 0; ret = 0; break; } - - clear_pmp_and_sync(pmp_idx); + mm_free_clear((void *)pmp_config.paddr, pmp_config.size); ret = pmp_config.paddr; size = pmp_config.size;