diff --git a/src/jit/Backend.cpp b/src/jit/Backend.cpp index 0465b1c3b..75321ec7c 100644 --- a/src/jit/Backend.cpp +++ b/src/jit/Backend.cpp @@ -223,8 +223,7 @@ CompileContext::CompileContext(Module* module, JITCompiler* compiler) #if (defined SLJIT_CONFIG_X86 && SLJIT_CONFIG_X86) , shuffleOffset(0) #endif /* SLJIT_CONFIG_X86 */ - , stackTmpStart(0) - , stackMemoryStart(sizeof(sljit_sw)) + , stackTmpStart(sizeof(sljit_sw)) , nextTryBlock(0) , currentTryBlock(InstanceConstData::globalTryBlock) , trapBlocksStart(0) @@ -233,7 +232,9 @@ CompileContext::CompileContext(Module* module, JITCompiler* compiler) { // Compiler is not initialized yet. size_t offset = Instance::alignedSize(); - globalsStart = offset + sizeof(void*) * module->numberOfMemoryTypes(); + size_t numberOfMemoryTypes = module->numberOfMemoryTypes(); + targetBuffersStart = offset + numberOfMemoryTypes * sizeof(void*); + globalsStart = targetBuffersStart + Memory::TargetBuffer::sizeInPointers(numberOfMemoryTypes) * sizeof(void*); tableStart = globalsStart + module->numberOfGlobalTypes() * sizeof(void*); functionsStart = tableStart + module->numberOfTableTypes() * sizeof(void*); @@ -1020,7 +1021,6 @@ JITCompiler::JITCompiler(Module* module, uint32_t JITFlags) , m_savedIntegerRegCount(0) , m_savedFloatRegCount(0) , m_stackTmpSize(0) - , m_useMemory0(false) { if (module->m_jitModule != nullptr) { ASSERT(module->m_jitModule->m_instanceConstData != nullptr); @@ -1038,10 +1038,6 @@ void JITCompiler::compileFunction(JITFunction* jitFunc, bool isExternal) m_functionList.push_back(FunctionList(jitFunc, isExternal, m_branchTableSize)); - sljit_uw stackTmpStart = m_context.stackMemoryStart + (m_useMemory0 ? sizeof(Memory::TargetBuffer) : 0); - // Align data. - m_context.stackTmpStart = static_cast((stackTmpStart + sizeof(sljit_sw) - 1) & ~(sizeof(sljit_sw) - 1)); - if (m_compiler == nullptr) { // First compiled function. m_compiler = sljit_create_compiler(nullptr); @@ -1482,7 +1478,6 @@ void JITCompiler::clear() m_last = nullptr; m_branchTableSize = 0; m_stackTmpSize = 0; - m_useMemory0 = false; #if (defined SLJIT_CONFIG_X86 && SLJIT_CONFIG_X86) m_context.shuffleOffset = 0; #endif /* SLJIT_CONFIG_X86 */ @@ -1530,24 +1525,6 @@ void JITCompiler::emitProlog() (m_savedIntegerRegCount + 2) | SLJIT_ENTER_FLOAT(m_savedFloatRegCount), m_context.stackTmpStart + m_stackTmpSize); sljit_emit_op1(m_compiler, SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), kContextOffset, SLJIT_R0, 0); - if (hasMemory0()) { - sljit_sw stackMemoryStart = m_context.stackMemoryStart; - ASSERT(m_context.stackTmpStart >= stackMemoryStart + static_cast(sizeof(Memory::TargetBuffer))); - - sljit_emit_op1(m_compiler, SLJIT_MOV, SLJIT_R0, 0, SLJIT_MEM1(kInstanceReg), Instance::alignedSize()); - - sljit_emit_op1(m_compiler, SLJIT_MOV_P, SLJIT_R1, 0, SLJIT_MEM1(SLJIT_R0), offsetof(Memory, m_targetBuffers)); - sljit_emit_op1(m_compiler, SLJIT_MOV, SLJIT_R2, 0, SLJIT_MEM1(SLJIT_R0), offsetof(Memory, m_sizeInByte) + WORD_LOW_OFFSET); - sljit_get_local_base(m_compiler, SLJIT_MEM1(SLJIT_R0), offsetof(Memory, m_targetBuffers), stackMemoryStart); - sljit_emit_op1(m_compiler, SLJIT_MOV_P, SLJIT_R0, 0, SLJIT_MEM1(SLJIT_R0), offsetof(Memory, m_buffer)); - -#if (defined SLJIT_32BIT_ARCHITECTURE && SLJIT_32BIT_ARCHITECTURE) - sljit_emit_op1(m_compiler, SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), stackMemoryStart + offsetof(Memory::TargetBuffer, sizeInByte) + WORD_HIGH_OFFSET, SLJIT_IMM, 0); -#endif /* SLJIT_32BIT_ARCHITECTURE */ - sljit_emit_op1(m_compiler, SLJIT_MOV_P, SLJIT_MEM1(SLJIT_SP), stackMemoryStart + offsetof(Memory::TargetBuffer, prev), SLJIT_R1, 0); - sljit_emit_op1(m_compiler, SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), stackMemoryStart + offsetof(Memory::TargetBuffer, sizeInByte) + WORD_LOW_OFFSET, SLJIT_R2, 0); - sljit_emit_op1(m_compiler, SLJIT_MOV_P, SLJIT_MEM1(SLJIT_SP), stackMemoryStart + offsetof(Memory::TargetBuffer, buffer), SLJIT_R0, 0); - } m_context.branchTableOffset = 0; size_t size = func.branchTableSize * sizeof(sljit_up); @@ -1568,19 +1545,6 @@ void JITCompiler::emitProlog() } } -void JITCompiler::emitRestoreMemories() -{ - if (!hasMemory0()) { - return; - } - - sljit_sw stackMemoryStart = m_context.stackMemoryStart; - - sljit_emit_op1(m_compiler, SLJIT_MOV, SLJIT_R1, 0, SLJIT_MEM1(kInstanceReg), Instance::alignedSize()); - sljit_emit_op1(m_compiler, SLJIT_MOV_P, SLJIT_R2, 0, SLJIT_MEM1(SLJIT_SP), stackMemoryStart + offsetof(Memory::TargetBuffer, prev)); - sljit_emit_op1(m_compiler, SLJIT_MOV_P, SLJIT_MEM1(SLJIT_R1), offsetof(Memory, m_targetBuffers), SLJIT_R2, 0); -} - void JITCompiler::emitEpilog() { FunctionList& func = m_functionList.back(); @@ -1598,7 +1562,6 @@ void JITCompiler::emitEpilog() m_context.earlyReturns.clear(); } - emitRestoreMemories(); sljit_emit_return(m_compiler, SLJIT_MOV_P, SLJIT_R0, 0); m_context.emitSlowCases(m_compiler); @@ -1661,7 +1624,6 @@ void JITCompiler::emitEpilog() sljit_emit_op_dst(m_compiler, SLJIT_GET_RETURN_ADDRESS, SLJIT_R1, 0); sljit_emit_icall(m_compiler, SLJIT_CALL, SLJIT_ARGS2(W, W, W), SLJIT_IMM, GET_FUNC_ADDR(sljit_sw, getTrapHandler)); - emitRestoreMemories(); sljit_emit_return_to(m_compiler, SLJIT_R0, 0); while (trapJumpIndex < trapJumps.size()) { diff --git a/src/jit/ByteCodeParser.cpp b/src/jit/ByteCodeParser.cpp index d421218b3..cbe68369f 100644 --- a/src/jit/ByteCodeParser.cpp +++ b/src/jit/ByteCodeParser.cpp @@ -1022,14 +1022,12 @@ static void compileFunction(JITCompiler* compiler) case ByteCode::Load32Opcode: { group = Instruction::Load; paramType = ParamTypes::ParamSrcDst; - compiler->useMemory0(); requiredInit = OTLoadI32; break; } case ByteCode::Load64Opcode: { group = Instruction::Load; paramType = ParamTypes::ParamSrcDst; - compiler->useMemory0(); requiredInit = OTLoadI64; break; } @@ -1049,7 +1047,6 @@ static void compileFunction(JITCompiler* compiler) case ByteCode::I64Load32UOpcode: { group = Instruction::Load; paramType = ParamTypes::ParamSrcDstValue; - compiler->useMemory0(); if (requiredInit == OTNone) { requiredInit = OTLoadI64; } @@ -1072,7 +1069,6 @@ static void compileFunction(JITCompiler* compiler) case ByteCode::V128Load64ZeroOpcode: { group = Instruction::Load; paramType = ParamTypes::ParamSrcDstValue; - compiler->useMemory0(); if (opcode == ByteCode::F32LoadOpcode) requiredInit = OTLoadF32; @@ -1089,7 +1085,6 @@ static void compileFunction(JITCompiler* compiler) SIMDMemoryLoad* loadOperation = reinterpret_cast(byteCode); Instruction* instr = compiler->append(byteCode, Instruction::LoadLaneSIMD, opcode, 2, 1); instr->setRequiredRegsDescriptor(OTLoadLaneV128); - compiler->useMemory0(); Operand* operands = instr->operands(); operands[0] = STACK_OFFSET(loadOperation->src0Offset()); @@ -1100,14 +1095,12 @@ static void compileFunction(JITCompiler* compiler) case ByteCode::Store32Opcode: { group = Instruction::Store; paramType = ParamTypes::ParamSrc2; - compiler->useMemory0(); requiredInit = OTStoreI32; break; } case ByteCode::Store64Opcode: { group = Instruction::Store; paramType = ParamTypes::ParamSrc2; - compiler->useMemory0(); requiredInit = OTStoreI64; break; } @@ -1127,7 +1120,6 @@ static void compileFunction(JITCompiler* compiler) case ByteCode::I64StoreOpcode: { group = Instruction::Store; paramType = ParamTypes::ParamSrc2Value; - compiler->useMemory0(); if (requiredInit == OTNone) { requiredInit = OTStoreI64; } @@ -1138,7 +1130,6 @@ static void compileFunction(JITCompiler* compiler) case ByteCode::V128StoreOpcode: { group = Instruction::Store; paramType = ParamTypes::ParamSrc2Value; - compiler->useMemory0(); if (opcode == ByteCode::F32StoreOpcode) requiredInit = OTStoreF32; @@ -1155,7 +1146,6 @@ static void compileFunction(JITCompiler* compiler) SIMDMemoryStore* storeOperation = reinterpret_cast(byteCode); Instruction* instr = compiler->append(byteCode, Instruction::Store, opcode, 2, 0); instr->setRequiredRegsDescriptor(OTStoreV128); - compiler->useMemory0(); Operand* operands = instr->operands(); operands[0] = STACK_OFFSET(storeOperation->src0Offset()); @@ -1330,7 +1320,6 @@ static void compileFunction(JITCompiler* compiler) Instruction* instr = compiler->append(byteCode, Instruction::Memory, opcode, 0, 1); instr->setRequiredRegsDescriptor(OTPutI32); - compiler->useMemory0(); *instr->operands() = STACK_OFFSET(memorySize->dstOffset()); break; @@ -1874,7 +1863,6 @@ static void compileFunction(JITCompiler* compiler) compiler->increaseStackTmpSize(8); } #endif /* SLJIT_32BIT_ARCHITECTURE */ - compiler->useMemory0(); if (requiredInit == OTNone) { requiredInit = OTLoadI64; } @@ -1899,7 +1887,6 @@ static void compileFunction(JITCompiler* compiler) compiler->increaseStackTmpSize(8); } #endif /* SLJIT_32BIT_ARCHITECTURE */ - compiler->useMemory0(); if (requiredInit == OTNone) { requiredInit = OTStoreI64; } @@ -1965,7 +1952,6 @@ static void compileFunction(JITCompiler* compiler) AtomicRmw* atomicRmw = reinterpret_cast(byteCode); Operand* operands = instr->operands(); instr->setRequiredRegsDescriptor(requiredInit != OTNone ? requiredInit : OTAtomicRmwI64); - compiler->useMemory0(); operands[0] = STACK_OFFSET(atomicRmw->src0Offset()); operands[1] = STACK_OFFSET(atomicRmw->src1Offset()); @@ -1997,7 +1983,6 @@ static void compileFunction(JITCompiler* compiler) AtomicRmwCmpxchg* atomicRmwCmpxchg = reinterpret_cast(byteCode); Operand* operands = instr->operands(); instr->setRequiredRegsDescriptor(requiredInit != OTNone ? requiredInit : OTAtomicRmwCmpxchgI64); - compiler->useMemory0(); operands[0] = STACK_OFFSET(atomicRmwCmpxchg->src0Offset()); operands[1] = STACK_OFFSET(atomicRmwCmpxchg->src1Offset()); @@ -2017,7 +2002,6 @@ static void compileFunction(JITCompiler* compiler) Operand* operands = instr->operands(); instr->setRequiredRegsDescriptor(requiredInit != OTNone ? requiredInit : OTAtomicWaitI32); compiler->increaseStackTmpSize(16); - compiler->useMemory0(); operands[0] = STACK_OFFSET(memoryAtomicWait->src0Offset()); operands[1] = STACK_OFFSET(memoryAtomicWait->src1Offset()); @@ -2032,7 +2016,6 @@ static void compileFunction(JITCompiler* compiler) MemoryAtomicNotify* memoryAtomicNotify = reinterpret_cast(byteCode); Operand* operands = instr->operands(); instr->setRequiredRegsDescriptor(OTAtomicNotify); - compiler->useMemory0(); operands[0] = STACK_OFFSET(memoryAtomicNotify->src0Offset()); operands[1] = STACK_OFFSET(memoryAtomicNotify->src1Offset()); diff --git a/src/jit/Compiler.h b/src/jit/Compiler.h index 8c30aa602..ffa7d8f80 100644 --- a/src/jit/Compiler.h +++ b/src/jit/Compiler.h @@ -585,11 +585,11 @@ struct CompileContext { #if (defined SLJIT_CONFIG_X86 && SLJIT_CONFIG_X86) uintptr_t shuffleOffset; #endif /* SLJIT_CONFIG_X86 */ + size_t targetBuffersStart; size_t globalsStart; size_t tableStart; size_t functionsStart; sljit_sw stackTmpStart; - sljit_sw stackMemoryStart; size_t nextTryBlock; size_t currentTryBlock; size_t trapBlocksStart; @@ -761,16 +761,6 @@ class JITCompiler { } } - void useMemory0() - { - m_useMemory0 = true; - } - - bool hasMemory0() - { - return m_useMemory0; - } - void setModuleFunction(ModuleFunction* moduleFunction) { m_moduleFunction = moduleFunction; @@ -817,7 +807,6 @@ class JITCompiler { // Backend operations. void emitProlog(); void emitEpilog(); - void emitRestoreMemories(); #if !defined(NDEBUG) static const char* m_byteCodeNames[]; @@ -841,7 +830,6 @@ class JITCompiler { uint8_t m_savedIntegerRegCount; uint8_t m_savedFloatRegCount; uint8_t m_stackTmpSize; - bool m_useMemory0; std::vector m_tryBlocks; std::vector m_functionList; diff --git a/src/jit/MemoryInl.h b/src/jit/MemoryInl.h index e069b5e28..b5f2e6e37 100644 --- a/src/jit/MemoryInl.h +++ b/src/jit/MemoryInl.h @@ -57,9 +57,8 @@ struct MemAddress { void MemAddress::check(sljit_compiler* compiler, Operand* offsetOperand, sljit_uw offset, sljit_u32 size) { CompileContext* context = CompileContext::get(compiler); - sljit_sw stackMemoryStart = context->stackMemoryStart; + sljit_sw targetBufferOffset = context->targetBuffersStart; - ASSERT(context->compiler->hasMemory0()); ASSERT(!(options & LoadInteger) || baseReg != sourceReg); ASSERT(!(options & LoadInteger) || offsetReg != sourceReg); #if defined(ENABLE_EXTENDED_FEATURES) @@ -107,8 +106,8 @@ void MemAddress::check(sljit_compiler* compiler, Operand* offsetOperand, sljit_u if (offset + size <= context->initialMemorySize) { ASSERT(baseReg != 0); - sljit_emit_op1(compiler, SLJIT_MOV_P, baseReg, 0, SLJIT_MEM1(SLJIT_SP), - stackMemoryStart + offsetof(Memory::TargetBuffer, buffer)); + sljit_emit_op1(compiler, SLJIT_MOV_P, baseReg, 0, SLJIT_MEM1(kInstanceReg), + targetBufferOffset + offsetof(Memory::TargetBuffer, buffer)); memArg.arg = SLJIT_MEM1(baseReg); memArg.argw = offset; load(compiler); @@ -124,12 +123,12 @@ void MemAddress::check(sljit_compiler* compiler, Operand* offsetOperand, sljit_u ASSERT(baseReg != 0 && offsetReg != 0); /* The sizeInByte is always a 32 bit number on 32 bit systems. */ - sljit_emit_op1(compiler, SLJIT_MOV, SLJIT_TMP_DEST_REG, 0, SLJIT_MEM1(SLJIT_SP), - stackMemoryStart + offsetof(Memory::TargetBuffer, sizeInByte) + WORD_LOW_OFFSET); + sljit_emit_op1(compiler, SLJIT_MOV, SLJIT_TMP_DEST_REG, 0, SLJIT_MEM1(kInstanceReg), + targetBufferOffset + offsetof(Memory::TargetBuffer, sizeInByte) + WORD_LOW_OFFSET); sljit_emit_op1(compiler, SLJIT_MOV, offsetReg, 0, SLJIT_IMM, static_cast(offset + size)); - sljit_emit_op1(compiler, SLJIT_MOV_P, baseReg, 0, SLJIT_MEM1(SLJIT_SP), - stackMemoryStart + offsetof(Memory::TargetBuffer, buffer)); + sljit_emit_op1(compiler, SLJIT_MOV_P, baseReg, 0, SLJIT_MEM1(kInstanceReg), + targetBufferOffset + offsetof(Memory::TargetBuffer, buffer)); load(compiler); @@ -162,13 +161,13 @@ void MemAddress::check(sljit_compiler* compiler, Operand* offsetOperand, sljit_u if (context->initialMemorySize != context->maximumMemorySize) { /* The sizeInByte is always a 32 bit number on 32 bit systems. */ - sljit_emit_op1(compiler, SLJIT_MOV, SLJIT_TMP_DEST_REG, 0, SLJIT_MEM1(SLJIT_SP), - stackMemoryStart + offsetof(Memory::TargetBuffer, sizeInByte) + WORD_LOW_OFFSET); + sljit_emit_op1(compiler, SLJIT_MOV, SLJIT_TMP_DEST_REG, 0, SLJIT_MEM1(kInstanceReg), + targetBufferOffset + offsetof(Memory::TargetBuffer, sizeInByte) + WORD_LOW_OFFSET); offset += size; } - sljit_emit_op1(compiler, SLJIT_MOV_P, baseReg, 0, SLJIT_MEM1(SLJIT_SP), - stackMemoryStart + offsetof(Memory::TargetBuffer, buffer)); + sljit_emit_op1(compiler, SLJIT_MOV_P, baseReg, 0, SLJIT_MEM1(kInstanceReg), + targetBufferOffset + offsetof(Memory::TargetBuffer, buffer)); load(compiler); diff --git a/src/jit/MemoryUtilInl.h b/src/jit/MemoryUtilInl.h index 483121f39..4b5b5eb2b 100644 --- a/src/jit/MemoryUtilInl.h +++ b/src/jit/MemoryUtilInl.h @@ -79,13 +79,12 @@ static void emitMemory(sljit_compiler* compiler, Instruction* instr) switch (opcode) { case ByteCode::MemorySizeOpcode: { ASSERT(!(instr->info() & Instruction::kIsCallback)); - ASSERT(context->compiler->hasMemory0()); JITArg dstArg(params); /* The sizeInByte is always a 32 bit number on 32 bit systems. */ - sljit_emit_op2(compiler, SLJIT_LSHR, dstArg.arg, dstArg.argw, SLJIT_MEM1(SLJIT_SP), - context->stackMemoryStart + offsetof(Memory::TargetBuffer, sizeInByte) + WORD_LOW_OFFSET, SLJIT_IMM, 16); + sljit_emit_op2(compiler, SLJIT_LSHR, dstArg.arg, dstArg.argw, SLJIT_MEM1(kInstanceReg), + context->targetBuffersStart + offsetof(Memory::TargetBuffer, sizeInByte) + WORD_LOW_OFFSET, SLJIT_IMM, 16); return; } case ByteCode::MemoryInitOpcode: diff --git a/src/runtime/Instance.cpp b/src/runtime/Instance.cpp index 44212845a..d9d7965ac 100644 --- a/src/runtime/Instance.cpp +++ b/src/runtime/Instance.cpp @@ -30,8 +30,11 @@ namespace Walrus { Instance* Instance::newInstance(Module* module) { // Must follow the order in Module::instantiate. - size_t numberOfRefs = module->numberOfMemoryTypes() + module->numberOfGlobalTypes() - + module->numberOfTableTypes() + module->numberOfFunctions() + module->numberOfTagTypes(); + + size_t numberOfRefs = module->numberOfMemoryTypes() + + Memory::TargetBuffer::sizeInPointers(module->numberOfMemoryTypes()) + + module->numberOfGlobalTypes() + module->numberOfTableTypes() + + module->numberOfFunctions() + module->numberOfTagTypes(); void* result = malloc(alignedSize() + numberOfRefs * sizeof(void*)); @@ -60,6 +63,16 @@ Instance::Instance(Module* module) module->store()->appendInstance(this); } +Instance::~Instance() +{ + size_t size = m_module->numberOfMemoryTypes(); + Memory::TargetBuffer* targetBuffers = reinterpret_cast(alignedEnd() + m_module->numberOfMemoryTypes()); + + for (size_t i = 0; i < size; i++) { + targetBuffers[i].deque(m_memories[i]); + } +} + Optional Instance::resolveExportType(std::string& name) { for (auto me : m_module->exports()) { diff --git a/src/runtime/Instance.h b/src/runtime/Instance.h index e734b1c5f..c1de77416 100644 --- a/src/runtime/Instance.h +++ b/src/runtime/Instance.h @@ -100,6 +100,11 @@ class Instance : public Object { return (sizeof(Instance) + sizeof(void*) - 1) & ~(sizeof(void*) - 1); } + void** alignedEnd() + { + return reinterpret_cast(reinterpret_cast(this) + Instance::alignedSize()); + } + Module* module() const { return m_module; } Function* function(uint32_t index) const { return m_functions[index]; } @@ -128,7 +133,7 @@ class Instance : public Object { private: Instance(Module* module); - ~Instance() {} + ~Instance(); Module* m_module; diff --git a/src/runtime/Memory.cpp b/src/runtime/Memory.cpp index 19343adfd..3d651ec02 100644 --- a/src/runtime/Memory.cpp +++ b/src/runtime/Memory.cpp @@ -123,7 +123,7 @@ bool Memory::grow(uint64_t growSizeInByte) while (targetBuffer != nullptr) { targetBuffer->sizeInByte = sizeInByte(); targetBuffer->buffer = buffer(); - targetBuffer = targetBuffer->prev; + targetBuffer = targetBuffer->next; } return true; } else if (newSizeInByte == m_sizeInByte) { @@ -233,6 +233,41 @@ void Memory::fillMemory(uint32_t start, uint8_t value, uint32_t size) #endif } +void Memory::TargetBuffer::enque(Memory* memory) +{ + next = memory->m_targetBuffers; + buffer = memory->buffer(); + sizeInByte = memory->sizeInByte(); + + memory->m_targetBuffers = this; +} + +void Memory::TargetBuffer::deque(Memory* memory) +{ + // Cache is not initialized. + if (sizeInByte == ~(uint64_t)0) { + return; + } + + TargetBuffer* current = memory->m_targetBuffers; + + if (current == this) { + memory->m_targetBuffers = next; + return; + } + + while (true) { + ASSERT(current != nullptr && current->next != nullptr); + + if (current->next == this) { + current->next = next; + return; + } + + current = current->next; + } +} + #if defined(ENABLE_EXTENDED_FEATURES) void Memory::checkAtomicAccess(ExecutionState& state, uint32_t offset, uint32_t size, uint32_t addend) const { diff --git a/src/runtime/Memory.h b/src/runtime/Memory.h index 30c89eb28..397bc47db 100644 --- a/src/runtime/Memory.h +++ b/src/runtime/Memory.h @@ -39,13 +39,24 @@ class Memory : public Extern { // Caching memory target for fast access. struct TargetBuffer { TargetBuffer() - : prev(nullptr) - , buffer(nullptr) - , sizeInByte(0) { + setUninitialized(); } - TargetBuffer* prev; + static size_t sizeInPointers(size_t memoryCount) + { + return ((memoryCount * sizeof(Memory::TargetBuffer)) + (sizeof(void*) - 1)) / sizeof(void*); + } + + void setUninitialized() + { + sizeInByte = ~(uint64_t)0; + } + + void enque(Memory* memory); + void deque(Memory* memory); + + TargetBuffer* next; uint8_t* buffer; uint64_t sizeInByte; }; diff --git a/src/runtime/Module.cpp b/src/runtime/Module.cpp index 3d6c98c61..f7a1eb0c2 100644 --- a/src/runtime/Module.cpp +++ b/src/runtime/Module.cpp @@ -118,11 +118,13 @@ Instance* Module::instantiate(ExecutionState& state, const ExternVector& imports { Instance* instance = Instance::newInstance(this); - void** references = reinterpret_cast(reinterpret_cast(instance) + Instance::alignedSize()); + void** references = instance->alignedEnd(); // Must follow the order in Instance::newInstance. instance->m_memories = reinterpret_cast(references); references += numberOfMemoryTypes(); + Memory::TargetBuffer* targetBuffers = reinterpret_cast(references); + references += Memory::TargetBuffer::sizeInPointers(numberOfMemoryTypes()); instance->m_globals = reinterpret_cast(references); references += numberOfGlobalTypes(); instance->m_tables = reinterpret_cast(references); @@ -137,6 +139,10 @@ Instance* Module::instantiate(ExecutionState& state, const ExternVector& imports size_t memIndex = 0; size_t tagIndex = 0; + for (size_t i = 0; i < numberOfMemoryTypes(); i++) { + targetBuffers[i].setUninitialized(); + } + if (imports.size() < m_imports.size()) { Trap::throwException(state, "Insufficient import"); } @@ -227,6 +233,11 @@ Instance* Module::instantiate(ExecutionState& state, const ExternVector& imports tagIndex++; } + // All memories are resolved, enque them. + for (size_t i = 0; i < numberOfMemoryTypes(); i++) { + targetBuffers[i].enque(instance->m_memories[i]); + } + // init global while (globIndex < m_globalTypes.size()) { GlobalType* globalType = m_globalTypes[globIndex];