diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS index 1198de8f358fbc..28b0199b9481d3 100644 --- a/deps/v8/AUTHORS +++ b/deps/v8/AUTHORS @@ -191,6 +191,7 @@ Vladimir Krivosheev Vladimir Shutoff Wenlu Wang Wiktor Garbacz +Wouter Vermeiren Xiaoyin Liu Yannic Bonenberger Yong Wang diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn index 5c226f4b836af6..554b26006c3489 100644 --- a/deps/v8/BUILD.gn +++ b/deps/v8/BUILD.gn @@ -606,8 +606,9 @@ config("toolchain") { } } if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") { - defines += [ "V8_TARGET_ARCH_PPC" ] - if (v8_current_cpu == "ppc64") { + if (v8_current_cpu == "ppc") { + defines += [ "V8_TARGET_ARCH_PPC" ] + } else if (v8_current_cpu == "ppc64") { defines += [ "V8_TARGET_ARCH_PPC64" ] } if (host_byteorder == "little") { @@ -1605,11 +1606,16 @@ v8_source_set("v8_initializers") { ### gcmole(arch:mips64el) ### "src/builtins/mips64/builtins-mips64.cc", ] - } else if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") { + } else if (v8_current_cpu == "ppc") { sources += [ ### gcmole(arch:ppc) ### "src/builtins/ppc/builtins-ppc.cc", ] + } else if (v8_current_cpu == "ppc64") { + sources += [ + ### gcmole(arch:ppc64) ### + "src/builtins/ppc/builtins-ppc.cc", + ] } else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") { sources += [ ### gcmole(arch:s390) ### @@ -3190,7 +3196,7 @@ v8_source_set("v8_base_without_compiler") { "src/regexp/mips64/regexp-macro-assembler-mips64.h", "src/wasm/baseline/mips64/liftoff-assembler-mips64.h", ] - } else if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") { + } else if (v8_current_cpu == "ppc") { sources += [ ### gcmole(arch:ppc) ### "src/codegen/ppc/assembler-ppc-inl.h", "src/codegen/ppc/assembler-ppc.cc", @@ -3206,9 +3212,42 @@ v8_source_set("v8_base_without_compiler") { "src/compiler/backend/ppc/instruction-codes-ppc.h", "src/compiler/backend/ppc/instruction-scheduler-ppc.cc", "src/compiler/backend/ppc/instruction-selector-ppc.cc", + "src/compiler/backend/ppc/unwinding-info-writer-ppc.cc", + "src/compiler/backend/ppc/unwinding-info-writer-ppc.h", + "src/debug/ppc/debug-ppc.cc", + "src/deoptimizer/ppc/deoptimizer-ppc.cc", + "src/diagnostics/ppc/disasm-ppc.cc", + "src/diagnostics/ppc/eh-frame-ppc.cc", + "src/execution/ppc/frame-constants-ppc.cc", + "src/execution/ppc/frame-constants-ppc.h", + "src/execution/ppc/simulator-ppc.cc", + "src/execution/ppc/simulator-ppc.h", + "src/regexp/ppc/regexp-macro-assembler-ppc.cc", + "src/regexp/ppc/regexp-macro-assembler-ppc.h", + "src/wasm/baseline/ppc/liftoff-assembler-ppc.h", + ] + } else if (v8_current_cpu == "ppc64") { + sources += [ ### gcmole(arch:ppc64) ### + "src/codegen/ppc/assembler-ppc-inl.h", + "src/codegen/ppc/assembler-ppc.cc", + "src/codegen/ppc/assembler-ppc.h", + "src/codegen/ppc/constants-ppc.cc", + "src/codegen/ppc/constants-ppc.h", + "src/codegen/ppc/cpu-ppc.cc", + "src/codegen/ppc/interface-descriptors-ppc.cc", + "src/codegen/ppc/macro-assembler-ppc.cc", + "src/codegen/ppc/macro-assembler-ppc.h", + "src/codegen/ppc/register-ppc.h", + "src/compiler/backend/ppc/code-generator-ppc.cc", + "src/compiler/backend/ppc/instruction-codes-ppc.h", + "src/compiler/backend/ppc/instruction-scheduler-ppc.cc", + "src/compiler/backend/ppc/instruction-selector-ppc.cc", + "src/compiler/backend/ppc/unwinding-info-writer-ppc.cc", + "src/compiler/backend/ppc/unwinding-info-writer-ppc.h", "src/debug/ppc/debug-ppc.cc", "src/deoptimizer/ppc/deoptimizer-ppc.cc", "src/diagnostics/ppc/disasm-ppc.cc", + "src/diagnostics/ppc/eh-frame-ppc.cc", "src/execution/ppc/frame-constants-ppc.cc", "src/execution/ppc/frame-constants-ppc.h", "src/execution/ppc/simulator-ppc.cc", diff --git a/deps/v8/gni/snapshot_toolchain.gni b/deps/v8/gni/snapshot_toolchain.gni index b5fb1823b382e0..adda4717dd215f 100644 --- a/deps/v8/gni/snapshot_toolchain.gni +++ b/deps/v8/gni/snapshot_toolchain.gni @@ -89,6 +89,10 @@ if (v8_snapshot_toolchain == "") { } } else if (v8_current_cpu == "arm" || v8_current_cpu == "mipsel") { _cpus = "x86_v8_${v8_current_cpu}" + } else if (v8_current_cpu == "ppc") { + # reset clang to use gcc toolchain + _clang = "" + _cpus = "${v8_current_cpu}" } else { # This branch should not be reached; leave _cpus blank so the assert # below will fail. diff --git a/deps/v8/src/base/build_config.h b/deps/v8/src/base/build_config.h index f4300824ebe106..8d142c456c9cc1 100644 --- a/deps/v8/src/base/build_config.h +++ b/deps/v8/src/base/build_config.h @@ -33,13 +33,12 @@ #elif defined(__MIPSEB__) || defined(__MIPSEL__) #define V8_HOST_ARCH_MIPS 1 #define V8_HOST_ARCH_32_BIT 1 +#elif defined(__PPC64__) || defined(_ARCH_PPC64) +#define V8_HOST_ARCH_PPC64 1 +#define V8_HOST_ARCH_64_BIT 1 #elif defined(__PPC__) || defined(_ARCH_PPC) #define V8_HOST_ARCH_PPC 1 -#if defined(__PPC64__) || defined(_ARCH_PPC64) -#define V8_HOST_ARCH_64_BIT 1 -#else #define V8_HOST_ARCH_32_BIT 1 -#endif #elif defined(__s390__) || defined(__s390x__) #define V8_HOST_ARCH_S390 1 #if defined(__s390x__) @@ -78,7 +77,7 @@ // environment as presented by the compiler. #if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && \ !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64 && \ - !V8_TARGET_ARCH_PPC && !V8_TARGET_ARCH_S390 + !V8_TARGET_ARCH_PPC && !V8_TARGET_ARCH_PPC64 && !V8_TARGET_ARCH_S390 #if defined(_M_X64) || defined(__x86_64__) #define V8_TARGET_ARCH_X64 1 #elif defined(_M_IX86) || defined(__i386__) @@ -91,6 +90,8 @@ #define V8_TARGET_ARCH_MIPS64 1 #elif defined(__MIPSEB__) || defined(__MIPSEL__) #define V8_TARGET_ARCH_MIPS 1 +#elif defined(_ARCH_PPC64) +#define V8_TARGET_ARCH_PPC64 1 #elif defined(_ARCH_PPC) #define V8_TARGET_ARCH_PPC 1 #else @@ -118,11 +119,9 @@ #elif V8_TARGET_ARCH_MIPS64 #define V8_TARGET_ARCH_64_BIT 1 #elif V8_TARGET_ARCH_PPC -#if V8_TARGET_ARCH_PPC64 -#define V8_TARGET_ARCH_64_BIT 1 -#else #define V8_TARGET_ARCH_32_BIT 1 -#endif +#elif V8_TARGET_ARCH_PPC64 +#define V8_TARGET_ARCH_64_BIT 1 #elif V8_TARGET_ARCH_S390 #if V8_TARGET_ARCH_S390X #define V8_TARGET_ARCH_64_BIT 1 diff --git a/deps/v8/src/base/cpu.cc b/deps/v8/src/base/cpu.cc index 0b1514b5759aff..d82e8b2be14bba 100644 --- a/deps/v8/src/base/cpu.cc +++ b/deps/v8/src/base/cpu.cc @@ -16,7 +16,7 @@ #if V8_OS_QNX #include // cpuinfo #endif -#if V8_OS_LINUX && V8_HOST_ARCH_PPC +#if (V8_OS_LINUX && (V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64)) #include #endif #if V8_OS_AIX @@ -607,7 +607,7 @@ CPU::CPU() #elif V8_HOST_ARCH_ARM64 // Implementer, variant and part are currently unused under ARM64. -#elif V8_HOST_ARCH_PPC +#elif V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64 #ifndef USE_SIMULATOR #if V8_OS_LINUX @@ -659,6 +659,8 @@ CPU::CPU() part_ = PPC_G5; } else if (strcmp(auxv_cpu_type, "ppc7450") == 0) { part_ = PPC_G4; + } else if (strcmp(auxv_cpu_type, "ppc440") == 0) { + part_ = PPC_G4; } else if (strcmp(auxv_cpu_type, "pa6t") == 0) { part_ = PPC_PA6T; } @@ -687,7 +689,7 @@ CPU::CPU() } #endif // V8_OS_AIX #endif // !USE_SIMULATOR -#endif // V8_HOST_ARCH_PPC +#endif // V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64 } } // namespace base diff --git a/deps/v8/src/base/debug/stack_trace_posix.cc b/deps/v8/src/base/debug/stack_trace_posix.cc index ed602af547d2f9..9cb1735c6fb1c3 100644 --- a/deps/v8/src/base/debug/stack_trace_posix.cc +++ b/deps/v8/src/base/debug/stack_trace_posix.cc @@ -25,7 +25,7 @@ #include #include -#if V8_LIBC_GLIBC || V8_LIBC_BSD || V8_LIBC_UCLIBC || V8_OS_SOLARIS +#if V8_LIBC_GLIBC || V8_LIBC_BSD || ( V8_LIBC_UCLIBC && __UCLIBC_HAS_BACKTRACE__ ) || V8_OS_SOLARIS #define HAVE_EXECINFO_H 1 #endif diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc index c50cdd7a98eefd..5bfcc9936e787d 100644 --- a/deps/v8/src/base/platform/platform-posix.cc +++ b/deps/v8/src/base/platform/platform-posix.cc @@ -419,7 +419,7 @@ void OS::DebugBreak() { asm("break"); #elif V8_HOST_ARCH_MIPS64 asm("break"); -#elif V8_HOST_ARCH_PPC +#elif V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64 asm("twge 2,2"); #elif V8_HOST_ARCH_IA32 asm("int $3"); diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc index 5ba797f8d78226..0f4ef499b080c8 100644 --- a/deps/v8/src/builtins/ppc/builtins-ppc.cc +++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -#if V8_TARGET_ARCH_PPC +#if V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 #include "src/api/api-arguments.h" #include "src/codegen/code-factory.h" @@ -2807,7 +2807,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, } void Builtins::Generate_DoubleToI(MacroAssembler* masm) { - Label out_of_range, only_low, negate, done, fastpath_done; + Label out_of_range, only_low, negate, done, fastpath_done, conv_inv, conv_ok; Register result_reg = r3; HardAbortScope hard_abort(masm); // Avoid calls to Abort. @@ -2818,7 +2818,11 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) { Register scratch_high = GetRegisterThatIsNotOneOf(result_reg, scratch, scratch_low); DoubleRegister double_scratch = kScratchDoubleReg; - +#if !V8_TARGET_ARCH_PPC64 + CRegister cr = cr7; + int crbit = v8::internal::Assembler::encode_crbit( + cr, static_cast(VXCVI % CRWIDTH)); +#endif __ Push(result_reg, scratch); // Account for saved regs. int argument_offset = 2 * kPointerSize; @@ -2827,17 +2831,20 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) { __ lfd(double_scratch, MemOperand(sp, argument_offset)); // Do fast-path convert from double to int. - __ ConvertDoubleToInt64(double_scratch, #if !V8_TARGET_ARCH_PPC64 - scratch, -#endif - result_reg, d0); - -// Test for overflow -#if V8_TARGET_ARCH_PPC64 - __ TestIfInt32(result_reg, r0); + __ mtfsb0(VXCVI); + __ ConvertDoubleToInt32NoPPC64(double_scratch, result_reg, scratch); + __ mcrfs(cr, VXCVI); + __ bc(__ branch_offset(&conv_inv), BT, crbit); + __ addi(scratch, result_reg, Operand(0)); + __ b(&conv_ok); + __ bind(&conv_inv); + __ addi(scratch, result_reg, Operand(1)); + __ bind(&conv_ok); + __ cmp(scratch, result_reg, cr); #else - __ TestIfInt32(scratch, result_reg, r0); + __ ConvertDoubleToInt64(double_scratch, result_reg, d0); + __ TestIfInt32(result_reg, r0); #endif __ beq(&fastpath_done); @@ -3337,4 +3344,4 @@ void Builtins::Generate_DirectCEntry(MacroAssembler* masm) { } // namespace internal } // namespace v8 -#endif // V8_TARGET_ARCH_PPC +#endif // V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_PPC64 diff --git a/deps/v8/src/codegen/assembler-arch.h b/deps/v8/src/codegen/assembler-arch.h index cab4cbfc3bbcdf..d56b372504689d 100644 --- a/deps/v8/src/codegen/assembler-arch.h +++ b/deps/v8/src/codegen/assembler-arch.h @@ -15,7 +15,7 @@ #include "src/codegen/arm64/assembler-arm64.h" #elif V8_TARGET_ARCH_ARM #include "src/codegen/arm/assembler-arm.h" -#elif V8_TARGET_ARCH_PPC +#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 #include "src/codegen/ppc/assembler-ppc.h" #elif V8_TARGET_ARCH_MIPS #include "src/codegen/mips/assembler-mips.h" diff --git a/deps/v8/src/codegen/assembler-inl.h b/deps/v8/src/codegen/assembler-inl.h index fd08a38555adf2..8c81315d50d1c1 100644 --- a/deps/v8/src/codegen/assembler-inl.h +++ b/deps/v8/src/codegen/assembler-inl.h @@ -15,7 +15,7 @@ #include "src/codegen/arm64/assembler-arm64-inl.h" #elif V8_TARGET_ARCH_ARM #include "src/codegen/arm/assembler-arm-inl.h" -#elif V8_TARGET_ARCH_PPC +#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 #include "src/codegen/ppc/assembler-ppc-inl.h" #elif V8_TARGET_ARCH_MIPS #include "src/codegen/mips/assembler-mips-inl.h" diff --git a/deps/v8/src/codegen/constant-pool.cc b/deps/v8/src/codegen/constant-pool.cc index 6816c5b7ad580b..745def6a5c129b 100644 --- a/deps/v8/src/codegen/constant-pool.cc +++ b/deps/v8/src/codegen/constant-pool.cc @@ -9,7 +9,7 @@ namespace v8 { namespace internal { -#if defined(V8_TARGET_ARCH_PPC) +#if defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64) ConstantPoolBuilder::ConstantPoolBuilder(int ptr_reach_bits, int double_reach_bits) { @@ -209,7 +209,7 @@ int ConstantPoolBuilder::Emit(Assembler* assm) { return !empty ? (assm->pc_offset() - emitted_label_.pos()) : 0; } -#endif // defined(V8_TARGET_ARCH_PPC) +#endif // defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64) #if defined(V8_TARGET_ARCH_ARM64) diff --git a/deps/v8/src/codegen/constant-pool.h b/deps/v8/src/codegen/constant-pool.h index d07452336b4e40..8823fbc4ad903c 100644 --- a/deps/v8/src/codegen/constant-pool.h +++ b/deps/v8/src/codegen/constant-pool.h @@ -81,7 +81,7 @@ class ConstantPoolEntry { enum { SHARING_PROHIBITED = -2, SHARING_ALLOWED = -1 }; }; -#if defined(V8_TARGET_ARCH_PPC) +#if defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64) // ----------------------------------------------------------------------------- // Embedded constant pool support @@ -162,7 +162,7 @@ class ConstantPoolBuilder { PerTypeEntryInfo info_[ConstantPoolEntry::NUMBER_OF_TYPES]; }; -#endif // defined(V8_TARGET_ARCH_PPC) +#endif // defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64) #if defined(V8_TARGET_ARCH_ARM64) diff --git a/deps/v8/src/codegen/constants-arch.h b/deps/v8/src/codegen/constants-arch.h index b49d2b64f23598..7a222c960ff42c 100644 --- a/deps/v8/src/codegen/constants-arch.h +++ b/deps/v8/src/codegen/constants-arch.h @@ -15,7 +15,7 @@ #include "src/codegen/mips/constants-mips.h" // NOLINT #elif V8_TARGET_ARCH_MIPS64 #include "src/codegen/mips64/constants-mips64.h" // NOLINT -#elif V8_TARGET_ARCH_PPC +#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 #include "src/codegen/ppc/constants-ppc.h" // NOLINT #elif V8_TARGET_ARCH_S390 #include "src/codegen/s390/constants-s390.h" // NOLINT diff --git a/deps/v8/src/codegen/external-reference.cc b/deps/v8/src/codegen/external-reference.cc index 44503e532d1ed0..cca76e32192368 100644 --- a/deps/v8/src/codegen/external-reference.cc +++ b/deps/v8/src/codegen/external-reference.cc @@ -469,7 +469,7 @@ ExternalReference ExternalReference::invoke_accessor_getter_callback() { #define re_stack_check_func RegExpMacroAssemblerARM64::CheckStackGuardState #elif V8_TARGET_ARCH_ARM #define re_stack_check_func RegExpMacroAssemblerARM::CheckStackGuardState -#elif V8_TARGET_ARCH_PPC +#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 #define re_stack_check_func RegExpMacroAssemblerPPC::CheckStackGuardState #elif V8_TARGET_ARCH_MIPS #define re_stack_check_func RegExpMacroAssemblerMIPS::CheckStackGuardState diff --git a/deps/v8/src/codegen/macro-assembler.h b/deps/v8/src/codegen/macro-assembler.h index 0e588c08059933..e438d54910d60e 100644 --- a/deps/v8/src/codegen/macro-assembler.h +++ b/deps/v8/src/codegen/macro-assembler.h @@ -40,7 +40,7 @@ enum AllocationFlags { #elif V8_TARGET_ARCH_ARM #include "src/codegen/arm/constants-arm.h" #include "src/codegen/arm/macro-assembler-arm.h" -#elif V8_TARGET_ARCH_PPC +#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 #include "src/codegen/ppc/constants-ppc.h" #include "src/codegen/ppc/macro-assembler-ppc.h" #elif V8_TARGET_ARCH_MIPS diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.cc b/deps/v8/src/codegen/ppc/assembler-ppc.cc index fa28d4ec50ec68..81d456174088e7 100644 --- a/deps/v8/src/codegen/ppc/assembler-ppc.cc +++ b/deps/v8/src/codegen/ppc/assembler-ppc.cc @@ -36,7 +36,7 @@ #include "src/codegen/ppc/assembler-ppc.h" -#if V8_TARGET_ARCH_PPC +#if V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 #include "src/base/bits.h" #include "src/base/cpu.h" @@ -1635,22 +1635,38 @@ void Assembler::fctiw(const DoubleRegister frt, const DoubleRegister frb) { void Assembler::frin(const DoubleRegister frt, const DoubleRegister frb, RCBit rc) { +#ifndef V8_TARGET_ARCH_PPC64 + assert(false); +#else emit(EXT4 | FRIN | frt.code() * B21 | frb.code() * B11 | rc); +#endif } void Assembler::friz(const DoubleRegister frt, const DoubleRegister frb, RCBit rc) { +#ifndef V8_TARGET_ARCH_PPC64 + assert(false); +#else emit(EXT4 | FRIZ | frt.code() * B21 | frb.code() * B11 | rc); +#endif } void Assembler::frip(const DoubleRegister frt, const DoubleRegister frb, RCBit rc) { +#ifndef V8_TARGET_ARCH_PPC64 + assert(false); +#else emit(EXT4 | FRIP | frt.code() * B21 | frb.code() * B11 | rc); +#endif } void Assembler::frim(const DoubleRegister frt, const DoubleRegister frb, RCBit rc) { +#ifndef V8_TARGET_ARCH_PPC64 + assert(false); +#else emit(EXT4 | FRIM | frt.code() * B21 | frb.code() * B11 | rc); +#endif } void Assembler::frsp(const DoubleRegister frt, const DoubleRegister frb, @@ -1660,42 +1676,74 @@ void Assembler::frsp(const DoubleRegister frt, const DoubleRegister frb, void Assembler::fcfid(const DoubleRegister frt, const DoubleRegister frb, RCBit rc) { + #ifndef V8_TARGET_ARCH_PPC64 + assert(false); +#else emit(EXT4 | FCFID | frt.code() * B21 | frb.code() * B11 | rc); +#endif } void Assembler::fcfidu(const DoubleRegister frt, const DoubleRegister frb, RCBit rc) { +#ifndef V8_TARGET_ARCH_PPC64 + assert(false); +#else emit(EXT4 | FCFIDU | frt.code() * B21 | frb.code() * B11 | rc); +#endif } void Assembler::fcfidus(const DoubleRegister frt, const DoubleRegister frb, RCBit rc) { +#ifndef V8_TARGET_ARCH_PPC64 + assert(false); +#else emit(EXT3 | FCFIDUS | frt.code() * B21 | frb.code() * B11 | rc); +#endif } void Assembler::fcfids(const DoubleRegister frt, const DoubleRegister frb, RCBit rc) { +#ifndef V8_TARGET_ARCH_PPC64 + assert(false); +#else emit(EXT3 | FCFIDS | frt.code() * B21 | frb.code() * B11 | rc); +#endif } void Assembler::fctid(const DoubleRegister frt, const DoubleRegister frb, RCBit rc) { +#ifndef V8_TARGET_ARCH_PPC64 + assert(false); +#else emit(EXT4 | FCTID | frt.code() * B21 | frb.code() * B11 | rc); +#endif } void Assembler::fctidz(const DoubleRegister frt, const DoubleRegister frb, RCBit rc) { +#ifndef V8_TARGET_ARCH_PPC64 + assert(false); +#else emit(EXT4 | FCTIDZ | frt.code() * B21 | frb.code() * B11 | rc); +#endif } void Assembler::fctidu(const DoubleRegister frt, const DoubleRegister frb, RCBit rc) { +#ifndef V8_TARGET_ARCH_PPC64 + assert(false); +#else emit(EXT4 | FCTIDU | frt.code() * B21 | frb.code() * B11 | rc); +#endif } void Assembler::fctiduz(const DoubleRegister frt, const DoubleRegister frb, RCBit rc) { +#ifndef V8_TARGET_ARCH_PPC64 + assert(false); +#else emit(EXT4 | FCTIDUZ | frt.code() * B21 | frb.code() * B11 | rc); +#endif } void Assembler::fsel(const DoubleRegister frt, const DoubleRegister fra, @@ -1737,7 +1785,18 @@ void Assembler::mtfsf(const DoubleRegister frb, bool L, int FLM, bool W, void Assembler::fsqrt(const DoubleRegister frt, const DoubleRegister frb, RCBit rc) { +#ifndef USE_SIMULATOR +#if V8_TARGET_ARCH_PPC + assert(false); +#endif +#else emit(EXT4 | FSQRT | frt.code() * B21 | frb.code() * B11 | rc); +#endif +} + +void Assembler::frsqrte(const DoubleRegister frt, const DoubleRegister frb, + RCBit rc) { + emit(EXT4 | FRSQRTE | frt.code() * B21 | frb.code() * B11 | rc); } void Assembler::fabs(const DoubleRegister frt, const DoubleRegister frb, @@ -1962,4 +2021,4 @@ Register UseScratchRegisterScope::Acquire() { } // namespace internal } // namespace v8 -#endif // V8_TARGET_ARCH_PPC +#endif // V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.h b/deps/v8/src/codegen/ppc/assembler-ppc.h index 2fc3a295db7892..e07645ee5c6435 100644 --- a/deps/v8/src/codegen/ppc/assembler-ppc.h +++ b/deps/v8/src/codegen/ppc/assembler-ppc.h @@ -914,6 +914,8 @@ class Assembler : public AssemblerBase { RCBit rc = LeaveRC); void fsqrt(const DoubleRegister frt, const DoubleRegister frb, RCBit rc = LeaveRC); + void frsqrte(const DoubleRegister frt, const DoubleRegister frb, + RCBit rc); void fabs(const DoubleRegister frt, const DoubleRegister frb, RCBit rc = LeaveRC); void fmadd(const DoubleRegister frt, const DoubleRegister fra, diff --git a/deps/v8/src/codegen/ppc/constants-ppc.cc b/deps/v8/src/codegen/ppc/constants-ppc.cc index 4cee2cbcb597e2..ee2e19aa24c4f0 100644 --- a/deps/v8/src/codegen/ppc/constants-ppc.cc +++ b/deps/v8/src/codegen/ppc/constants-ppc.cc @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -#if V8_TARGET_ARCH_PPC +#if V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 #include "src/codegen/ppc/constants-ppc.h" @@ -46,4 +46,4 @@ int Registers::Number(const char* name) { } // namespace internal } // namespace v8 -#endif // V8_TARGET_ARCH_PPC +#endif // V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 diff --git a/deps/v8/src/codegen/ppc/constants-ppc.h b/deps/v8/src/codegen/ppc/constants-ppc.h index 2e499fd2c41357..2b481b18140b97 100644 --- a/deps/v8/src/codegen/ppc/constants-ppc.h +++ b/deps/v8/src/codegen/ppc/constants-ppc.h @@ -20,7 +20,7 @@ #define UNIMPLEMENTED_PPC() #endif -#if V8_HOST_ARCH_PPC && \ +#if (V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64) && \ (V8_OS_AIX || (V8_TARGET_ARCH_PPC64 && V8_TARGET_BIG_ENDIAN && \ (!defined(_CALL_ELF) || _CALL_ELF == 1))) #define ABI_USES_FUNCTION_DESCRIPTORS 1 @@ -28,28 +28,30 @@ #define ABI_USES_FUNCTION_DESCRIPTORS 0 #endif -#if !V8_HOST_ARCH_PPC || V8_OS_AIX || V8_TARGET_ARCH_PPC64 +#if !(V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64) || V8_OS_AIX || \ + V8_TARGET_ARCH_PPC64 #define ABI_PASSES_HANDLES_IN_REGS 1 #else #define ABI_PASSES_HANDLES_IN_REGS 0 #endif -#if !V8_HOST_ARCH_PPC || !V8_TARGET_ARCH_PPC64 || V8_TARGET_LITTLE_ENDIAN || \ - (defined(_CALL_ELF) && _CALL_ELF == 2) +#if !(V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64) || !V8_TARGET_ARCH_PPC64 || \ + V8_TARGET_LITTLE_ENDIAN || (defined(_CALL_ELF) && _CALL_ELF == 2) #define ABI_RETURNS_OBJECT_PAIRS_IN_REGS 1 #else #define ABI_RETURNS_OBJECT_PAIRS_IN_REGS 0 #endif -#if !V8_HOST_ARCH_PPC || \ - (V8_TARGET_ARCH_PPC64 && \ +#if !(V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64) || \ + (V8_TARGET_ARCH_PPC64 && \ (V8_TARGET_LITTLE_ENDIAN || (defined(_CALL_ELF) && _CALL_ELF == 2))) #define ABI_CALL_VIA_IP 1 #else #define ABI_CALL_VIA_IP 0 #endif -#if !V8_HOST_ARCH_PPC || V8_OS_AIX || V8_TARGET_ARCH_PPC64 +#if !(V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64) || V8_OS_AIX || \ + V8_TARGET_ARCH_PPC64 #define ABI_TOC_REGISTER 2 #else #define ABI_TOC_REGISTER 13 @@ -1136,7 +1138,7 @@ using Instr = uint32_t; V(and_, ANDX, 0x7C000038) \ /* AND with Complement */ \ V(andc, ANDCX, 0x7C000078) \ - /* OR */ \ + /* OR with Complement */ \ V(orx, ORX, 0x7C000378) \ /* OR with Complement */ \ V(orc, ORC, 0x7C000338) \ @@ -1176,7 +1178,7 @@ using Instr = uint32_t; V(lhbrx, LHBRX, 0x7C00062C) \ /* Load Word Byte-Reverse Indexed */ \ V(lwbrx, LWBRX, 0x7C00042C) \ - /* Load Doubleword Byte-Reverse Indexed */ \ + /* Load Byte and Zero Indexed */ \ V(ldbrx, LDBRX, 0x7C000428) \ /* Load Byte and Zero Indexed */ \ V(lbzx, LBZX, 0x7C0000AE) \ @@ -1240,7 +1242,7 @@ using Instr = uint32_t; V(cmpl, CMPL, 0x7C000040) #define PPC_X_OPCODE_EH_S_FORM_LIST(V) \ - /* Store Byte Conditional Indexed */ \ + /* Store Word Conditional Indexed & record CR0 */ \ V(stbcx, STBCX, 0x7C00056D) \ /* Store Halfword Conditional Indexed Xform */ \ V(sthcx, STHCX, 0x7C0005AD) \ @@ -1260,7 +1262,7 @@ using Instr = uint32_t; V(ldarx, LDARX, 0x7C0000A8) #define PPC_X_OPCODE_UNUSED_LIST(V) \ - /* Bit Permute Doubleword */ \ + /* Move To Condition Register from FPSCR */ \ V(bpermd, BPERMD, 0x7C0001F8) \ /* Extend Sign Word */ \ V(extsw, EXTSW, 0x7C0007B4) \ @@ -1292,7 +1294,7 @@ using Instr = uint32_t; V(eqv, EQV, 0x7C000238) \ /* Instruction Cache Block Invalidate */ \ V(icbi, ICBI, 0x7C0007AC) \ - /* NAND */ \ + /* Synchronize */ \ V(nand, NAND, 0x7C0003B8) \ /* Parity Word */ \ V(prtyw, PRTYW, 0x7C000134) \ @@ -1518,7 +1520,7 @@ using Instr = uint32_t; V(stfdpx, STFDPX, 0x7C00072E) \ /* Floating Absolute Value */ \ V(fabs, FABS, 0xFC000210) \ - /* Floating Convert From Integer Doubleword */ \ + /* Floating Convert To Integer Word */ \ V(fcfid, FCFID, 0xFC00069C) \ /* Floating Convert From Integer Doubleword Single */ \ V(fcfids, FCFIDS, 0xEC00069C) \ @@ -1539,7 +1541,7 @@ using Instr = uint32_t; V(fctidz, FCTIDZ, 0xFC00065E) \ /* Floating Convert To Integer Word */ \ V(fctiw, FCTIW, 0xFC00001C) \ - /* Floating Convert To Integer Word Unsigned */ \ + /* Floating Convert To Integer Word with round to Zero */ \ V(fctiwu, FCTIWU, 0xFC00011C) \ /* Floating Convert To Integer Word Unsigned with round toward Zero */ \ V(fctiwuz, FCTIWUZ, 0xFC00011E) \ @@ -1547,13 +1549,13 @@ using Instr = uint32_t; V(fctiwz, FCTIWZ, 0xFC00001E) \ /* Floating Move Register */ \ V(fmr, FMR, 0xFC000090) \ - /* Floating Negative Absolute Value */ \ + /* Floating Negate */ \ V(fnabs, FNABS, 0xFC000110) \ /* Floating Negate */ \ V(fneg, FNEG, 0xFC000050) \ /* Floating Round to Single-Precision */ \ V(frsp, FRSP, 0xFC000018) \ - /* Move From FPSCR */ \ + /* Move To FPSCR Bit 0 */ \ V(mffs, MFFS, 0xFC00048E) \ /* Move To FPSCR Bit 0 */ \ V(mtfsb0, MTFSB0, 0xFC00008C) \ @@ -1749,17 +1751,17 @@ using Instr = uint32_t; #define PPC_DQ_OPCODE_LIST(V) V(lsq, LSQ, 0xE0000000) #define PPC_D_OPCODE_LIST(V) \ - /* Trap Doubleword Immediate */ \ + /* Add Immediate */ \ V(tdi, TDI, 0x08000000) \ /* Add Immediate */ \ V(addi, ADDI, 0x38000000) \ /* Add Immediate Carrying */ \ V(addic, ADDIC, 0x30000000) \ - /* Add Immediate Carrying & record CR0 */ \ + /* Add Immediate Shifted */ \ V(addicx, ADDICx, 0x34000000) \ /* Add Immediate Shifted */ \ V(addis, ADDIS, 0x3C000000) \ - /* AND Immediate & record CR0 */ \ + /* Compare Immediate */ \ V(andix, ANDIx, 0x70000000) \ /* AND Immediate Shifted & record CR0 */ \ V(andisx, ANDISx, 0x74000000) \ @@ -1843,17 +1845,17 @@ using Instr = uint32_t; V(mfspr, MFSPR, 0x7C0002A6) \ /* Move To Condition Register Fields */ \ V(mtcrf, MTCRF, 0x7C000120) \ - /* Move To One Condition Register Field */ \ + /* Move To Special Purpose Register */ \ V(mtocrf, MTOCRF, 0x7C100120) \ /* Move To Special Purpose Register */ \ V(mtspr, MTSPR, 0x7C0003A6) \ - /* Debugger Notify Halt */ \ + /* Move From Device Control Register */ \ V(dnh, DNH, 0x4C00018C) \ /* Move From Device Control Register */ \ V(mfdcr, MFDCR, 0x7C000286) \ /* Move To Device Control Register */ \ V(mtdcr, MTDCR, 0x7C000386) \ - /* Move from Performance Monitor Register */ \ + /* Move From Branch History Rolling Buffer */ \ V(mfpmr, MFPMR, 0x7C00029C) \ /* Move To Performance Monitor Register */ \ V(mtpmr, MTPMR, 0x7C00039C) \ @@ -1905,7 +1907,7 @@ using Instr = uint32_t; V(frsqrte, FRSQRTE, 0xFC000034) \ /* Floating Select */ \ V(fsel, FSEL, 0xFC00002E) \ - /* Floating Square Root */ \ + /* Floating Subtract */ \ V(fsqrt, FSQRT, 0xFC00002C) \ /* Floating Square Root Single */ \ V(fsqrts, FSQRTS, 0xEC00002C) \ @@ -1993,7 +1995,7 @@ using Instr = uint32_t; V(bc, BCX, 0x40000000) #define PPC_XO_OPCODE_LIST(V) \ - /* Divide Doubleword */ \ + /* Add */ \ V(divd, DIVD, 0x7C0003D2) \ /* Divide Doubleword Extended */ \ V(divde, DIVDE, 0x7C000352) \ @@ -2039,7 +2041,7 @@ using Instr = uint32_t; V(addzeo, ADDZEO, 0x7C000594) \ /* Divide Word Format */ \ V(divw, DIVW, 0x7C0003D6) \ - /* Divide Word Extended */ \ + /* Divide Word & record OV */ \ V(divwe, DIVWE, 0x7C000356) \ /* Divide Word Extended & record OV */ \ V(divweo, DIVWEO, 0x7C000756) \ @@ -2085,7 +2087,7 @@ using Instr = uint32_t; V(subfze, SUBFZE, 0x7C000190) \ /* Subtract From Zero Extended & record OV */ \ V(subfzeo, SUBFZEO, 0x7C000590) \ - /* Add and Generate Sixes */ \ + /* Multiply Accumulate Cross Halfword to Word Modulo Signed */ \ V(addg, ADDG, 0x7C000094) \ /* Multiply Accumulate Cross Halfword to Word Modulo Signed */ \ V(macchw, MACCHW, 0x10000158) \ @@ -2989,6 +2991,11 @@ class DoubleRegisters { private: static const char* names_[kNumDoubleRegisters]; }; + +static constexpr int kR0DwarfCode = 0; +static constexpr int kFpDwarfCode = 31; // frame-pointer +static constexpr int kLrDwarfCode = 65; // return-address(lr) +static constexpr int kSpDwarfCode = 1; // stack-pointer (sp) } // namespace internal } // namespace v8 diff --git a/deps/v8/src/codegen/ppc/cpu-ppc.cc b/deps/v8/src/codegen/ppc/cpu-ppc.cc index 243fa29a4675a9..9559af7778291a 100644 --- a/deps/v8/src/codegen/ppc/cpu-ppc.cc +++ b/deps/v8/src/codegen/ppc/cpu-ppc.cc @@ -4,7 +4,7 @@ // CPU specific code for ppc independent of OS goes here. -#if V8_TARGET_ARCH_PPC +#if V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 #include "src/codegen/cpu-features.h" @@ -47,4 +47,4 @@ void CpuFeatures::FlushICache(void* buffer, size_t size) { } // namespace v8 #undef INSTR_AND_DATA_CACHE_COHERENCY -#endif // V8_TARGET_ARCH_PPC +#endif // V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 diff --git a/deps/v8/src/codegen/ppc/interface-descriptors-ppc.cc b/deps/v8/src/codegen/ppc/interface-descriptors-ppc.cc index 3d378d7a43870f..f2264b05fa91a5 100644 --- a/deps/v8/src/codegen/ppc/interface-descriptors-ppc.cc +++ b/deps/v8/src/codegen/ppc/interface-descriptors-ppc.cc @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -#if V8_TARGET_ARCH_PPC +#if V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 #include "src/codegen/interface-descriptors.h" @@ -286,4 +286,4 @@ void RunMicrotasksEntryDescriptor::InitializePlatformSpecific( } // namespace internal } // namespace v8 -#endif // V8_TARGET_ARCH_PPC +#endif // V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc index 51688606bad9da..68128e146946be 100644 --- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc +++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc @@ -5,7 +5,7 @@ #include // For assert #include // For LONG_MIN, LONG_MAX. -#if V8_TARGET_ARCH_PPC +#if V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 #include "src/base/bits.h" #include "src/base/division-by-constant.h" @@ -24,6 +24,16 @@ #include "src/snapshot/snapshot.h" #include "src/wasm/wasm-code-manager.h" +#include + +#if __FLOAT_WORD_ORDER == __LITTLE_ENDIAN +#define LO_WORD_OFFSET 0 +#define HI_WORD_OFFSET 4 +#else +#define LO_WORD_OFFSET 4 +#define HI_WORD_OFFSET 0 +#endif + // Satisfy cpplint check, but don't include platform-specific header. It is // included recursively via macro-assembler.h. #if 0 @@ -693,26 +703,193 @@ void TurboAssembler::CanonicalizeNaN(const DoubleRegister dst, fsub(dst, src, kDoubleRegZero); } + +void TurboAssembler::TruncateApproximatedDouble( + DoubleRegister dst, DoubleRegister src, + unsigned int bits) { + Register scratch = kScratchReg; + + subi(sp, sp, Operand(kDoubleSize)); + stfd(src, MemOperand(sp, 0)); + lwz(scratch, MemOperand(sp, LO_WORD_OFFSET)); + rlwinm(scratch, scratch, 0, 0, 31-bits); + stw(scratch, MemOperand(sp, LO_WORD_OFFSET)); + lfd(dst, MemOperand(sp, 0)); + addi(sp, sp, Operand(kDoubleSize)); +} + +void TurboAssembler::RsqrtNewtonStep( + DoubleRegister dst, DoubleRegister src, + RCBit rc) { + DoubleRegister Dscratch = d11; + DoubleRegister Dscratch2 = d12; + DoubleRegister Dscratch3 = kScratchDoubleReg; + Register scratch = kScratchReg; + + /* fast 1/sqrt(x) estimated */ + frsqrte(Dscratch3, src, rc); + + /* Two Newton method iterations over result to improve accuracy + https://en.wikipedia.org/wiki/Fast_inverse_square_root + y_n+1 = y_n(1.5 - 0.5*x*y_n^2) + */ + subi(sp, sp, Operand(2*kFloatSize)); + + lis(scratch, Operand(0x3f00)); + stw(scratch, MemOperand(sp, 0)); + lfs(Dscratch, MemOperand(sp, 0)); + lis(scratch, Operand(0x3fc0)); + stw(scratch, MemOperand(sp, kFloatSize)); + lfs(Dscratch2, MemOperand(sp, kFloatSize)); + + fmul(Dscratch, Dscratch, src); + fmul(Dscratch, Dscratch, Dscratch3); + fmul(Dscratch, Dscratch, Dscratch3); + + fsub(Dscratch2, Dscratch2, Dscratch); + fmul(Dscratch3, Dscratch3, Dscratch2); + + lfs(Dscratch, MemOperand(sp, 0)); + + fmul(Dscratch, Dscratch, src); + fmul(Dscratch, Dscratch, Dscratch3); + fmul(Dscratch, Dscratch, Dscratch3); + + lfs(Dscratch2, MemOperand(sp, kFloatSize)); + + fsub(Dscratch2, Dscratch2, Dscratch); + fmul(dst, Dscratch3, Dscratch2); + +/* Calculate 1/x */ + lis(scratch, Operand(0x3f80)); + stw(scratch, MemOperand(sp, 0)); + lfs(Dscratch, MemOperand(sp, 0)); + fdiv(dst, Dscratch, dst, rc); + + addi(sp, sp, Operand(2*kFloatSize)); + + TruncateApproximatedDouble(dst, dst, 16); +} + +void TurboAssembler::ConvertDoubleToInt32NoPPC64( + DoubleRegister src, + Register dest, Register dest_hi, + FPRoundingMode rounding_mode) { + + DoubleRegister double_scratch = kScratchDoubleReg; + + if (rounding_mode == kRoundToZero) { + fctiwz(src, src); + fctiwz(double_scratch, src); + } else { + SetRoundingMode(rounding_mode); + fctiw(src, src); + fctiw(double_scratch, src); + ResetRoundingMode(); + } + subi(sp, sp, Operand(kDoubleSize)); + stfd(double_scratch, MemOperand(sp, 0)); + lwz(dest, MemOperand(sp, LO_WORD_OFFSET)); + lwz(dest_hi, MemOperand(sp, HI_WORD_OFFSET)); + addi(sp, sp, Operand(kDoubleSize)); +} + +void TurboAssembler::ConvertIntToFloatingPointNoPPC64(Register src, + DoubleRegister double_dst, + bool result_is_a_float, + bool src_is_unsigned) { + + Register scratch = kScratchReg; + DoubleRegister double_scratch = kScratchDoubleReg; + + subi(sp, sp, Operand(kDoubleSize)); // reserve one temporary double on the stack + + // sign-extend src to 64-bit and store it to temp double on the stack +#if V8_TARGET_ARCH_PPC64 + extsw(r0, src); + std(r0, MemOperand(sp, 0)); +#else + srawi(r0, src, 31); + stw(r0, MemOperand(sp, HI_WORD_OFFSET)); + stw(src, MemOperand(sp, LO_WORD_OFFSET)); +#endif + + if (src_is_unsigned) { + // load 0x4330000000000000 into double_scratch + lis(scratch, Operand(0x4330)); + stw(scratch, MemOperand(sp, HI_WORD_OFFSET)); + lis(scratch, Operand::Zero()); + stw(scratch, MemOperand(sp, LO_WORD_OFFSET)); + lfd(double_scratch, MemOperand(sp, 0)); + + // load 0x1.00000dddddddd x10^D into double_dst + lis(scratch, Operand(0x4330)); + stw(scratch, MemOperand(sp, HI_WORD_OFFSET)); + stw(src, MemOperand(sp, LO_WORD_OFFSET)); + } else { + // load 0x4330000080000000 into double_scratch + lis(scratch, Operand(0x4330)); + stw(scratch, MemOperand(sp, HI_WORD_OFFSET)); + lis(scratch, Operand(-0x8000)); + stw(scratch, MemOperand(sp, LO_WORD_OFFSET)); + lfd(double_scratch, MemOperand(sp, 0)); + + // load into FPR + // load 0x1.00000dddddddd x10^D into double_dst + lis(scratch, Operand(0x4330)); + stw(scratch, MemOperand(sp, HI_WORD_OFFSET)); + xoris(scratch, src, Operand(0x8000)); + stw(scratch, MemOperand(sp, LO_WORD_OFFSET)); + } + + // Convert to double word FP from stack + lfd(double_dst, MemOperand(sp, 0)); + fsub(double_dst, double_dst, double_scratch); + + addi(sp, sp, Operand(kDoubleSize)); // restore stack + + if (result_is_a_float) { + // Round to single word FP + frsp(double_dst, double_dst); + } +} + void TurboAssembler::ConvertIntToDouble(Register src, DoubleRegister dst) { +#ifdef V8_TARGET_ARCH_PPC64 MovIntToDouble(dst, src, r0); fcfid(dst, dst); +#else + ConvertIntToFloatingPointNoPPC64(src, dst, false, false); +#endif } void TurboAssembler::ConvertUnsignedIntToDouble(Register src, DoubleRegister dst) { +#ifdef V8_TARGET_ARCH_PPC64 MovUnsignedIntToDouble(dst, src, r0); fcfid(dst, dst); +#else + ConvertIntToFloatingPointNoPPC64(src, dst, false, true); +#endif } void TurboAssembler::ConvertIntToFloat(Register src, DoubleRegister dst) { - MovIntToDouble(dst, src, r0); - fcfids(dst, dst); +#ifdef V8_TARGET_ARCH_PPC64 + MovUnsignedIntToDouble(dst, src, r0); + fcfid(dst, dst); +#else + ConvertIntToFloatingPointNoPPC64(src, dst, true, false); +#endif } void TurboAssembler::ConvertUnsignedIntToFloat(Register src, DoubleRegister dst) { +#ifdef V8_TARGET_ARCH_PPC64 MovUnsignedIntToDouble(dst, src, r0); fcfids(dst, dst); +#else + ConvertIntToFloatingPointNoPPC64(src, dst, true, true); +#endif } #if V8_TARGET_ARCH_PPC64 @@ -748,6 +925,7 @@ void TurboAssembler::ConvertDoubleToInt64(const DoubleRegister double_input, const Register dst, const DoubleRegister double_dst, FPRoundingMode rounding_mode) { +#if V8_TARGET_ARCH_PPC64 if (rounding_mode == kRoundToZero) { fctidz(double_dst, double_input); } else { @@ -755,6 +933,15 @@ void TurboAssembler::ConvertDoubleToInt64(const DoubleRegister double_input, fctid(double_dst, double_input); ResetRoundingMode(); } +#else + if (rounding_mode == kRoundToZero) { + fctiwz(double_dst, double_input); + } else { + SetRoundingMode(rounding_mode); + fctiw(double_dst, double_input); + ResetRoundingMode(); + } +#endif MovDoubleToInt64( #if !V8_TARGET_ARCH_PPC64 @@ -1604,24 +1791,29 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, void TurboAssembler::TryInlineTruncateDoubleToI(Register result, DoubleRegister double_input, Label* done) { - DoubleRegister double_scratch = kScratchDoubleReg; -#if !V8_TARGET_ARCH_PPC64 - Register scratch = ip; -#endif - - ConvertDoubleToInt64(double_input, + Label conv_inv, conv_ok; #if !V8_TARGET_ARCH_PPC64 - scratch, -#endif - result, double_scratch); - -// Test for overflow -#if V8_TARGET_ARCH_PPC64 - TestIfInt32(result, r0); + Register scratch = kScratchReg; + CRegister cr = cr7; + int crbit = v8::internal::Assembler::encode_crbit( + cr, static_cast(VXCVI % CRWIDTH)); + mtfsb0(VXCVI); + ConvertDoubleToInt32NoPPC64(double_input, result, scratch); + mcrfs(cr, VXCVI); + bc(branch_offset(&conv_inv), BT, crbit); + addi(scratch, result, Operand(0)); + b(&conv_ok); + bind(&conv_inv); + li(result, Operand(0)); + addi(scratch, result, Operand(1)); + bind(&conv_ok); + cmp(scratch, result, cr); #else - TestIfInt32(scratch, result, r0); + DoubleRegister double_scratch = kScratchDoubleReg; + ConvertDoubleToInt64(double_input, result, double_scratch); + TestIfInt32(result, r0); #endif - beq(done); + b(done); } void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid, @@ -2446,7 +2638,7 @@ void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi smi, LoadSmiLiteral(scratch, smi); add(dst, src, scratch); #else - Add(dst, src, reinterpret_cast(smi), scratch); + Add(dst, src, smi.value(), scratch); #endif } @@ -2456,7 +2648,7 @@ void MacroAssembler::SubSmiLiteral(Register dst, Register src, Smi smi, LoadSmiLiteral(scratch, smi); sub(dst, src, scratch); #else - Add(dst, src, -(reinterpret_cast(smi)), scratch); + Add(dst, src, -smi.value(), scratch); #endif } @@ -2489,9 +2681,17 @@ void TurboAssembler::LoadP(Register dst, const MemOperand& mem, // Todo: enhance to use scratch if dst is unsuitable DCHECK_NE(dst, r0); addi(dst, mem.ra(), Operand(adj)); +#if V8_TARGET_ARCH_PPC64 ld(dst, MemOperand(dst, alignedOffset)); +#else + lwz(dst, MemOperand(dst, alignedOffset)); +#endif } else { +#if V8_TARGET_ARCH_PPC64 ld(dst, mem); +#else + lwz(dst, mem); +#endif } } } @@ -2953,15 +3153,26 @@ void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) { } void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { - STATIC_ASSERT(kSystemPointerSize == 8); - STATIC_ASSERT(kSmiShiftSize == 31); +#if V8_TARGET_ARCH_PPC64 + STATIC_ASSERT(kSystemPointerSize == 8); + STATIC_ASSERT(kSmiShiftSize == 31); +#else + STATIC_ASSERT(kSystemPointerSize == 4); + STATIC_ASSERT(kSmiShiftSize == 0); +#endif STATIC_ASSERT(kSmiTagSize == 1); STATIC_ASSERT(kSmiTag == 0); + // The builtin_index register contains the builtin index as a Smi. // Untagging is folded into the indexing operand below. +#ifdef V8_TARGET_ARCH_PPC64 ShiftRightArithImm(builtin_index, builtin_index, kSmiShift - kSystemPointerSizeLog2); +#else + ShiftLeftImm(builtin_index, builtin_index, + Operand(kSystemPointerSizeLog2 - kSmiTagSize)); +#endif addi(builtin_index, builtin_index, Operand(IsolateData::builtin_entry_table_offset())); LoadPX(builtin_index, MemOperand(kRootRegister, builtin_index)); @@ -2987,7 +3198,7 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination, DCHECK(root_array_available()); Label if_code_is_off_heap, out; - Register scratch = r11; + Register scratch = kScratchReg; DCHECK(!AreAliased(destination, scratch)); DCHECK(!AreAliased(code_object, scratch)); @@ -3076,18 +3287,30 @@ void TurboAssembler::CallForDeoptimization(Address target, int deopt_id) { } void TurboAssembler::ZeroExtByte(Register dst, Register src) { +#if V8_TARGET_ARCH_PPC64 clrldi(dst, src, Operand(56)); +#else + clrlwi(dst, src, Operand(24)); +#endif } void TurboAssembler::ZeroExtHalfWord(Register dst, Register src) { +#if V8_TARGET_ARCH_PPC64 clrldi(dst, src, Operand(48)); +#else + clrlwi(dst, src, Operand(16)); +#endif } void TurboAssembler::ZeroExtWord32(Register dst, Register src) { +#if V8_TARGET_ARCH_PPC64 clrldi(dst, src, Operand(32)); +#else + clrlwi(dst, src, Operand(32)); +#endif } } // namespace internal } // namespace v8 -#endif // V8_TARGET_ARCH_PPC +#endif // V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h index b2bbecfaec0046..1fba54ca090534 100644 --- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h +++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h @@ -69,6 +69,19 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { public: using TurboAssemblerBase::TurboAssemblerBase; + #if V8_TARGET_ARCH_PPC + void ConvertIntToFloatingPointNoPPC64(Register src, + DoubleRegister double_dst, + bool result_is_a_float, + bool src_is_unsigned); + void ConvertDoubleToInt32NoPPC64(DoubleRegister src, + Register dest, + Register dest_hi, + FPRoundingMode rounding_mode = kRoundToZero); + void RsqrtNewtonStep(DoubleRegister dst, DoubleRegister src, RCBit rc); + void TruncateApproximatedDouble(DoubleRegister dst, DoubleRegister src, unsigned int bits); + #endif + // Converts the integer (untagged smi) in |src| to a double, storing // the result to |dst| void ConvertIntToDouble(Register src, DoubleRegister dst); @@ -585,13 +598,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { extsw(scratch, value); cmp(scratch, value, cr); } -#else - inline void TestIfInt32(Register hi_word, Register lo_word, Register scratch, - CRegister cr = cr7) { - // High bits must be identical to fit into an 32-bit integer - srawi(scratch, lo_word, 31); - cmp(scratch, hi_word, cr); - } #endif // Overflow handling functions. diff --git a/deps/v8/src/codegen/ppc/register-ppc.h b/deps/v8/src/codegen/ppc/register-ppc.h index 63a9fd803cd971..1aa1d338eb1efb 100644 --- a/deps/v8/src/codegen/ppc/register-ppc.h +++ b/deps/v8/src/codegen/ppc/register-ppc.h @@ -145,7 +145,11 @@ const int kNumSafepointRegisters = 32; // The following constants describe the stack frame linkage area as // defined by the ABI. Note that kNumRequiredStackFrameSlots must // satisfy alignment requirements (rounding up if required). -#if V8_TARGET_ARCH_PPC64 && \ +#if V8_TARGET_ARCH_PPC +const int kNumRequiredStackFrameSlots = 4; +const int kStackFrameLRSlot = 1; +const int kStackFrameExtraParamSlot = 2; +#elif V8_TARGET_ARCH_PPC64 && \ (V8_TARGET_LITTLE_ENDIAN || \ (defined(_CALL_ELF) && _CALL_ELF == 2)) // ELFv2 ABI // [0] back chain @@ -300,6 +304,7 @@ constexpr Register kContextRegister = r30; constexpr Register kAllocateSizeRegister = r4; constexpr Register kSpeculationPoisonRegister = r14; constexpr Register kInterpreterAccumulatorRegister = r3; +constexpr Register kScratchReg = r11; constexpr Register kInterpreterBytecodeOffsetRegister = r15; constexpr Register kInterpreterBytecodeArrayRegister = r16; constexpr Register kInterpreterDispatchTableRegister = r17; diff --git a/deps/v8/src/codegen/register-arch.h b/deps/v8/src/codegen/register-arch.h index aa668a9158eb21..21a72330169cd7 100644 --- a/deps/v8/src/codegen/register-arch.h +++ b/deps/v8/src/codegen/register-arch.h @@ -16,7 +16,7 @@ #include "src/codegen/arm64/register-arm64.h" #elif V8_TARGET_ARCH_ARM #include "src/codegen/arm/register-arm.h" -#elif V8_TARGET_ARCH_PPC +#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 #include "src/codegen/ppc/register-ppc.h" #elif V8_TARGET_ARCH_MIPS #include "src/codegen/mips/register-mips.h" diff --git a/deps/v8/src/codegen/register-configuration.cc b/deps/v8/src/codegen/register-configuration.cc index c8f768e6dea83b..5752b463392ea3 100644 --- a/deps/v8/src/codegen/register-configuration.cc +++ b/deps/v8/src/codegen/register-configuration.cc @@ -60,6 +60,8 @@ static int get_num_allocatable_double_registers() { kMaxAllocatableDoubleRegisterCount; #elif V8_TARGET_ARCH_PPC kMaxAllocatableDoubleRegisterCount; +#elif V8_TARGET_ARCH_PPC64 + kMaxAllocatableDoubleRegisterCount; #elif V8_TARGET_ARCH_S390 kMaxAllocatableDoubleRegisterCount; #else diff --git a/deps/v8/src/codegen/reloc-info.cc b/deps/v8/src/codegen/reloc-info.cc index a889a8b9c7bfea..63a6ab99a75afc 100644 --- a/deps/v8/src/codegen/reloc-info.cc +++ b/deps/v8/src/codegen/reloc-info.cc @@ -329,7 +329,7 @@ bool RelocInfo::OffHeapTargetIsCodedSpecially() { return false; #elif defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_MIPS) || \ defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_PPC) || \ - defined(V8_TARGET_ARCH_S390) + defined(V8_TARGET_ARCH_PPC64) || defined(V8_TARGET_ARCH_S390) return true; #endif } diff --git a/deps/v8/src/common/globals.h b/deps/v8/src/common/globals.h index ac48a5a1bc3108..aba3cb3f87f353 100644 --- a/deps/v8/src/common/globals.h +++ b/deps/v8/src/common/globals.h @@ -42,6 +42,9 @@ namespace internal { #if (V8_TARGET_ARCH_PPC && !V8_HOST_ARCH_PPC) #define USE_SIMULATOR 1 #endif +#if (V8_TARGET_ARCH_PPC64 && !V8_HOST_ARCH_PPC64) +#define USE_SIMULATOR 1 +#endif #if (V8_TARGET_ARCH_MIPS && !V8_HOST_ARCH_MIPS) #define USE_SIMULATOR 1 #endif @@ -55,7 +58,7 @@ namespace internal { // Determine whether the architecture uses an embedded constant pool // (contiguous constant pool embedded in code object). -#if V8_TARGET_ARCH_PPC +#if V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 #define V8_EMBEDDED_CONSTANT_POOL true #else #define V8_EMBEDDED_CONSTANT_POOL false @@ -179,7 +182,8 @@ constexpr int kSystemPointerSizeLog2 = 3; constexpr intptr_t kIntptrSignBit = static_cast(uintptr_t{0x8000000000000000}); constexpr bool kRequiresCodeRange = true; -#if V8_HOST_ARCH_PPC && V8_TARGET_ARCH_PPC && V8_OS_LINUX +#if (V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64) && \ + (V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64) && V8_OS_LINUX constexpr size_t kMaximalCodeRangeSize = 512 * MB; constexpr size_t kMinExpectedOSPageSize = 64 * KB; // OS page on PPC Linux #elif V8_TARGET_ARCH_ARM64 @@ -199,7 +203,8 @@ constexpr size_t kReservedCodeRangePages = 0; #else constexpr int kSystemPointerSizeLog2 = 2; constexpr intptr_t kIntptrSignBit = 0x80000000; -#if V8_HOST_ARCH_PPC && V8_TARGET_ARCH_PPC && V8_OS_LINUX +#if (V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64) && \ + (V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64) && V8_OS_LINUX constexpr bool kRequiresCodeRange = false; constexpr size_t kMaximalCodeRangeSize = 0 * MB; constexpr size_t kMinimumCodeRangeSize = 0 * MB; @@ -332,7 +337,7 @@ F FUNCTION_CAST(Address addr) { // Determine whether the architecture uses function descriptors // which provide a level of indirection between the function pointer // and the function entrypoint. -#if V8_HOST_ARCH_PPC && \ +#if (V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64) && \ (V8_OS_AIX || (V8_TARGET_ARCH_PPC64 && V8_TARGET_BIG_ENDIAN && \ (!defined(_CALL_ELF) || _CALL_ELF == 1))) #define USES_FUNCTION_DESCRIPTORS 1 diff --git a/deps/v8/src/compiler/backend/instruction-codes.h b/deps/v8/src/compiler/backend/instruction-codes.h index 589c1bda3b1509..e9be4e124dc7a3 100644 --- a/deps/v8/src/compiler/backend/instruction-codes.h +++ b/deps/v8/src/compiler/backend/instruction-codes.h @@ -21,6 +21,8 @@ #include "src/compiler/backend/x64/instruction-codes-x64.h" #elif V8_TARGET_ARCH_PPC #include "src/compiler/backend/ppc/instruction-codes-ppc.h" +#elif V8_TARGET_ARCH_PPC64 +#include "src/compiler/backend/ppc64/instruction-codes-ppc64.h" #elif V8_TARGET_ARCH_S390 #include "src/compiler/backend/s390/instruction-codes-s390.h" #else diff --git a/deps/v8/src/compiler/backend/instruction-selector.cc b/deps/v8/src/compiler/backend/instruction-selector.cc index d565a4696395db..44053b8899a8dd 100644 --- a/deps/v8/src/compiler/backend/instruction-selector.cc +++ b/deps/v8/src/compiler/backend/instruction-selector.cc @@ -2376,8 +2376,6 @@ void InstructionSelector::VisitWord64ReverseBits(Node* node) { void InstructionSelector::VisitWord64Popcnt(Node* node) { UNIMPLEMENTED(); } -void InstructionSelector::VisitWord64Equal(Node* node) { UNIMPLEMENTED(); } - void InstructionSelector::VisitInt64Add(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitInt64AddWithOverflow(Node* node) { @@ -2394,22 +2392,10 @@ void InstructionSelector::VisitInt64Mul(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitInt64Div(Node* node) { UNIMPLEMENTED(); } -void InstructionSelector::VisitInt64LessThan(Node* node) { UNIMPLEMENTED(); } - -void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) { - UNIMPLEMENTED(); -} - void InstructionSelector::VisitUint64Div(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitInt64Mod(Node* node) { UNIMPLEMENTED(); } -void InstructionSelector::VisitUint64LessThan(Node* node) { UNIMPLEMENTED(); } - -void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) { - UNIMPLEMENTED(); -} - void InstructionSelector::VisitUint64Mod(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitChangeInt32ToInt64(Node* node) { @@ -2539,7 +2525,8 @@ void InstructionSelector::VisitWord32PairShr(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitWord32PairSar(Node* node) { UNIMPLEMENTED(); } #endif // V8_TARGET_ARCH_64_BIT -#if !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS +#if !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && \ + !V8_TARGET_ARCH_PPC void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) { UNIMPLEMENTED(); } @@ -2576,9 +2563,10 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) { UNIMPLEMENTED(); } #endif // !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS + // && !V8_TARGET_ARCH_PPC #if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS64 && \ - !V8_TARGET_ARCH_S390 && !V8_TARGET_ARCH_PPC + !V8_TARGET_ARCH_S390 && !V8_TARGET_ARCH_PPC64 void InstructionSelector::VisitWord64AtomicLoad(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitWord64AtomicStore(Node* node) { @@ -2602,8 +2590,9 @@ void InstructionSelector::VisitWord64AtomicExchange(Node* node) { void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) { UNIMPLEMENTED(); } -#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_PPC - // !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_S390 +#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && + // !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_S390 && + // !V8_TARGET_ARCH_PPC64 #if !V8_TARGET_ARCH_X64 #if !V8_TARGET_ARCH_ARM64 diff --git a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc index d91d9f1e9a95a8..77d6d10642e471 100644 --- a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc +++ b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc @@ -17,14 +17,14 @@ #include "src/wasm/wasm-code-manager.h" #include "src/wasm/wasm-objects.h" +#include + namespace v8 { namespace internal { namespace compiler { #define __ tasm()-> -#define kScratchReg r11 - // Adds PPC-specific methods to convert InstructionOperands. class PPCOperandConverter final : public InstructionOperandConverter { public: @@ -674,6 +674,50 @@ void EmitWordLoadPoisoningIfNeeded( __ sync(); \ } while (false) +#define ASSEMBLE_ATOMIC_PAIR_BINOP(instr) \ + do { \ + Label binop; \ + __ lwsync(); \ + __ bind(&binop); \ + MemOperand operand = MemOperand(i.InputRegister(0), i.InputRegister(1)); \ + __ lwarx(i.OutputRegister(0), operand); \ + __ instr(kScratchReg, i.OutputRegister(0), i.InputRegister(2)); \ + __ stwcx(kScratchReg, operand); \ + __ bne(&binop, cr0); \ + __ addi(i.TempRegister(0), i.InputRegister(1), Operand(4)); \ + operand = MemOperand(i.InputRegister(0), i.TempRegister(0)); \ + __ lwarx(i.OutputRegister(1), operand); \ + __ instr(kScratchReg, i.OutputRegister(1), i.InputRegister(3)); \ + __ stwcx(kScratchReg, operand); \ + __ bne(&binop, cr0); \ + __ sync(); \ + } while (false) + +#define ASSEMBLE_ATOMIC_PAIR_COMPARE_EXCHANGE(instr) \ + do { \ + Label loop; \ + Label exit; \ + __ ZeroExtWord32(r0, i.InputRegister(2)); \ + __ ZeroExtWord32(r0, i.InputRegister(3)); \ + __ lwsync(); \ + __ bind(&loop); \ + MemOperand operand = MemOperand(i.InputRegister(0), i.InputRegister(1)); \ + __ lwarx(i.OutputRegister(0), operand); \ + __ cmpw(i.OutputRegister(0), r0, cr0); \ + __ bne(&exit, cr0); \ + __ stwcx(i.InputRegister(3), operand); \ + __ bne(&loop, cr0); \ + __ addi(i.TempRegister(0), i.InputRegister(1), Operand(4)); \ + operand = MemOperand(i.InputRegister(0), i.TempRegister(0)); \ + __ lwarx(i.OutputRegister(1), operand); \ + __ cmpw(i.OutputRegister(1), r0, cr0); \ + __ bne(&exit, cr0); \ + __ stwcx(i.InputRegister(2), operand); \ + __ bne(&loop, cr0); \ + __ bind(&exit); \ + __ sync(); \ + } while (false) + void CodeGenerator::AssembleDeconstructFrame() { __ LeaveFrame(StackFrame::MANUAL); } @@ -1141,11 +1185,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ cmpl(sp, i.InputRegister(kValueIndex), cr0); break; } - case kArchTruncateDoubleToI: + case kArchTruncateDoubleToI: { __ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(), i.InputDoubleRegister(0), DetermineStubCallMode()); DCHECK_EQ(LeaveRC, i.OutputRCBit()); break; + } case kArchStoreWithWriteBarrier: { RecordWriteMode mode = static_cast(MiscField::decode(instr->opcode())); @@ -1628,7 +1673,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ASSEMBLE_FLOAT_UNOP_RC(fabs, 0); break; case kPPC_SqrtDouble: +#ifdef V8_TARGET_ARCH_PPC64 ASSEMBLE_FLOAT_UNOP_RC(fsqrt, MiscField::decode(instr->opcode())); +#else + __ RsqrtNewtonStep(i.OutputDoubleRegister(), i.InputDoubleRegister(0), i.OutputRCBit()); + if (MiscField::decode(instr->opcode())) { + __ frsp(i.OutputDoubleRegister(), i.OutputDoubleRegister()); + } +#endif break; case kPPC_FloorDouble: ASSEMBLE_FLOAT_UNOP_RC(frim, MiscField::decode(instr->opcode())); @@ -1668,11 +1720,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kPPC_Cmp32: ASSEMBLE_COMPARE(cmpw, cmplw); break; -#if V8_TARGET_ARCH_PPC64 case kPPC_Cmp64: ASSEMBLE_COMPARE(cmp, cmpl); break; -#endif case kPPC_CmpDouble: ASSEMBLE_FLOAT_COMPARE(fcmpu); break; @@ -1820,6 +1870,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; case kPPC_DoubleToInt32: case kPPC_DoubleToUint32: +#if !V8_TARGET_ARCH_PPC64 + { + Label conv_inv, conv_ok; + Register scratch = kScratchReg; + CRegister cr = cr7; + int crbit = v8::internal::Assembler::encode_crbit( + cr, static_cast(VXCVI % CRWIDTH)); + __ mtfsb0(VXCVI); + __ ConvertDoubleToInt32NoPPC64(i.InputDoubleRegister(0), i.OutputRegister(), scratch); + __ mcrfs(cr, VXCVI); + __ bc(__ branch_offset(&conv_inv), BT, crbit); + __ addi(scratch, i.OutputRegister(), Operand(0)); + __ b(&conv_ok); + __ bind(&conv_inv); + __ li(i.OutputRegister(), Operand(0)); + __ addi(scratch, i.OutputRegister(), Operand(1)); + __ bind(&conv_ok); + __ cmp(scratch, i.OutputRegister(), cr); + break; + } +#endif case kPPC_DoubleToInt64: { #if V8_TARGET_ARCH_PPC64 bool check_conversion = @@ -1990,11 +2061,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kWord32AtomicLoadInt16: case kPPC_AtomicLoadUint16: case kPPC_AtomicLoadWord32: - case kPPC_AtomicLoadWord64: case kPPC_AtomicStoreUint8: case kPPC_AtomicStoreUint16: case kPPC_AtomicStoreWord32: +#if V8_TARGET_ARCH_PPC64 + case kPPC_AtomicLoadWord64: case kPPC_AtomicStoreWord64: +#endif UNREACHABLE(); case kWord32AtomicExchangeInt8: ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lbarx, stbcx); @@ -2013,9 +2086,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kPPC_AtomicExchangeWord32: ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lwarx, stwcx); break; +#if V8_TARGET_ARCH_PPC64 case kPPC_AtomicExchangeWord64: ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldarx, stdcx); break; +#endif case kWord32AtomicCompareExchangeInt8: ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_SIGN_EXT(cmp, lbarx, stbcx, extsb); break; @@ -2031,11 +2106,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kPPC_AtomicCompareExchangeWord32: ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(cmpw, lwarx, stwcx, ZeroExtWord32); break; +#if V8_TARGET_ARCH_PPC64 case kPPC_AtomicCompareExchangeWord64: ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(cmp, ldarx, stdcx, mr); break; +#endif -#define ATOMIC_BINOP_CASE(op, inst) \ +#define ATOMIC_BINOP_CASE_COMMON(op, inst) \ case kPPC_Atomic##op##Int8: \ ASSEMBLE_ATOMIC_BINOP_SIGN_EXT(inst, lbarx, stbcx, extsb); \ break; \ @@ -2053,11 +2130,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; \ case kPPC_Atomic##op##Uint32: \ ASSEMBLE_ATOMIC_BINOP(inst, lwarx, stwcx); \ - break; \ - case kPPC_Atomic##op##Int64: \ - case kPPC_Atomic##op##Uint64: \ - ASSEMBLE_ATOMIC_BINOP(inst, ldarx, stdcx); \ break; + +#if V8_TARGET_ARCH_PPC64 +#define ATOMIC_BINOP_CASE(op, inst) \ + ATOMIC_BINOP_CASE_COMMON(op, inst) \ + case kPPC_Atomic##op##Int64: \ + case kPPC_Atomic##op##Uint64: \ + ASSEMBLE_ATOMIC_BINOP(inst, ldarx, stdcx); \ + break; +#else +#define ATOMIC_BINOP_CASE(op, inst) ATOMIC_BINOP_CASE_COMMON(op, inst) +#endif ATOMIC_BINOP_CASE(Add, add) ATOMIC_BINOP_CASE(Sub, sub) ATOMIC_BINOP_CASE(And, and_) @@ -2068,7 +2152,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kPPC_ByteRev32: { Register input = i.InputRegister(0); Register output = i.OutputRegister(); +#if V8_TARGET_ARCH_PPC64 Register temp1 = r0; +#else + Register temp1 = output; +#endif __ rotlwi(temp1, input, 8); __ rlwimi(temp1, input, 24, 0, 7); __ rlwimi(temp1, input, 24, 16, 23); @@ -2094,8 +2182,85 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } #endif // V8_TARGET_ARCH_PPC64 +#ifdef V8_TARGET_ARCH_PPC + case kPPC_AtomicPairLoadWord32: { + Label atomic_pair_load; + __ lwsync(); + __ bind(&atomic_pair_load); + __ addi(i.TempRegister(0), i.InputRegister(1), Operand(4)); + __ lwarx(i.OutputRegister(0), + MemOperand(i.InputRegister(0), i.InputRegister(1))); + __ lwsync(); + __ lwz(i.OutputRegister(1), + MemOperand(i.InputRegister(0), i.TempRegister(0))); + __ lwsync(); + __ stwcx(i.OutputRegister(0), + MemOperand(i.InputRegister(0), i.InputRegister(1))); + __ bne(&atomic_pair_load, cr0); + __ sync(); + break; + } + case kPPC_AtomicPairStoreWord32: { + Label atomic_pair_store; + __ lwsync(); + __ bind(&atomic_pair_store); + __ addi(i.TempRegister(0), i.InputRegister(1), Operand(4)); + __ lwarx(kScratchReg, MemOperand(i.InputRegister(0), i.InputRegister(1))); + __ lwsync(); + __ stw(i.InputRegister(3), + MemOperand(i.InputRegister(0), i.TempRegister(0))); + __ lwsync(); + __ stwcx(i.InputRegister(2), + MemOperand(i.InputRegister(0), i.InputRegister(1))); + __ bne(&atomic_pair_store, cr0); + __ sync(); + DCHECK_EQ(LeaveRC, i.OutputRCBit()); + break; + } + case kPPC_AtomicPairAddWord32: { + ASSEMBLE_ATOMIC_PAIR_BINOP(add); + break; + } + case kPPC_AtomicPairSubWord32: { + ASSEMBLE_ATOMIC_PAIR_BINOP(sub); + break; + } + case kPPC_AtomicPairAndWord32: { + ASSEMBLE_ATOMIC_PAIR_BINOP(and_); + break; + } + case kPPC_AtomicPairOrWord32: { + ASSEMBLE_ATOMIC_PAIR_BINOP(orx); + break; + } + case kPPC_AtomicPairXorWord32: { + ASSEMBLE_ATOMIC_PAIR_BINOP(xor_); + break; + } + case kPPC_AtomicPairExchangeWord32: { + do { + Label exchange; + __ lwsync(); + __ bind(&exchange); + MemOperand operand = MemOperand(i.InputRegister(0), i.InputRegister(1)); + __ lwarx(i.OutputRegister(0), operand); + __ stwcx(i.InputRegister(2), operand); + __ addi(i.TempRegister(0), i.InputRegister(1), Operand(4)); + operand = MemOperand(i.InputRegister(0), i.TempRegister(0)); + __ lwarx(i.OutputRegister(1), operand); + __ stwcx(i.InputRegister(3), operand); + __ bne(&exchange, cr0); + __ sync(); + } while (false); + break; + } + case kPPC_AtomicPairCompareExchangeWord32: { + ASSEMBLE_ATOMIC_PAIR_COMPARE_EXCHANGE(cmpw); + break; + } +#endif default: - UNREACHABLE(); + UNREACHABLE(); } return kSuccess; } // NOLINT(readability/fn_size) @@ -2111,14 +2276,8 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) { Condition cond = FlagsConditionToCondition(condition, op); if (op == kPPC_CmpDouble) { - // check for unordered if necessary - if (cond == le) { + if (cond != ne) __ bunordered(flabel, cr); - // Unnecessary for eq/lt since only FU bit will be set. - } else if (cond == gt) { - __ bunordered(tlabel, cr); - // Unnecessary for ne/ge since only FU bit will be set. - } } __ b(cond, tlabel, cr); if (!branch->fallthru) __ b(flabel); // no fallthru to flabel. @@ -2204,13 +2363,10 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr, CRegister cr = cr0; Condition cond = FlagsConditionToCondition(condition, op); if (op == kPPC_CmpDouble) { - // check for unordered if necessary - if (cond == le) { + if (cond != ne) { __ bunordered(&end, cr); - // Unnecessary for eq/lt since only FU bit will be set. - } else if (cond == gt) { + } else { __ bunordered(tlabel, cr); - // Unnecessary for ne/ge since only FU bit will be set. } } __ b(cond, tlabel, cr); @@ -2234,11 +2390,11 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr, Condition cond = FlagsConditionToCondition(condition, op); if (op == kPPC_CmpDouble) { // check for unordered if necessary - if (cond == le) { + if (cond != ne) { reg_value = 0; __ li(reg, Operand::Zero()); __ bunordered(&done, cr); - } else if (cond == gt) { + } else { reg_value = 1; __ li(reg, Operand(1)); __ bunordered(&done, cr); diff --git a/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h b/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h index f37529bd884eaf..2fb71a6f1824ed 100644 --- a/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h +++ b/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h @@ -118,13 +118,11 @@ namespace compiler { V(PPC_LoadWordU16) \ V(PPC_LoadWordS32) \ V(PPC_LoadWordU32) \ - V(PPC_LoadWord64) \ V(PPC_LoadFloat32) \ V(PPC_LoadDouble) \ V(PPC_StoreWord8) \ V(PPC_StoreWord16) \ V(PPC_StoreWord32) \ - V(PPC_StoreWord64) \ V(PPC_StoreFloat32) \ V(PPC_StoreDouble) \ V(PPC_ByteRev32) \ @@ -138,59 +136,54 @@ namespace compiler { V(PPC_AtomicStoreUint8) \ V(PPC_AtomicStoreUint16) \ V(PPC_AtomicStoreWord32) \ - V(PPC_AtomicStoreWord64) \ V(PPC_AtomicLoadUint8) \ V(PPC_AtomicLoadUint16) \ V(PPC_AtomicLoadWord32) \ - V(PPC_AtomicLoadWord64) \ V(PPC_AtomicExchangeUint8) \ V(PPC_AtomicExchangeUint16) \ V(PPC_AtomicExchangeWord32) \ - V(PPC_AtomicExchangeWord64) \ V(PPC_AtomicCompareExchangeUint8) \ V(PPC_AtomicCompareExchangeUint16) \ V(PPC_AtomicCompareExchangeWord32) \ - V(PPC_AtomicCompareExchangeWord64) \ V(PPC_AtomicAddUint8) \ V(PPC_AtomicAddUint16) \ V(PPC_AtomicAddUint32) \ - V(PPC_AtomicAddUint64) \ V(PPC_AtomicAddInt8) \ V(PPC_AtomicAddInt16) \ V(PPC_AtomicAddInt32) \ - V(PPC_AtomicAddInt64) \ V(PPC_AtomicSubUint8) \ V(PPC_AtomicSubUint16) \ V(PPC_AtomicSubUint32) \ - V(PPC_AtomicSubUint64) \ V(PPC_AtomicSubInt8) \ V(PPC_AtomicSubInt16) \ V(PPC_AtomicSubInt32) \ - V(PPC_AtomicSubInt64) \ V(PPC_AtomicAndUint8) \ V(PPC_AtomicAndUint16) \ V(PPC_AtomicAndUint32) \ - V(PPC_AtomicAndUint64) \ V(PPC_AtomicAndInt8) \ V(PPC_AtomicAndInt16) \ V(PPC_AtomicAndInt32) \ - V(PPC_AtomicAndInt64) \ V(PPC_AtomicOrUint8) \ V(PPC_AtomicOrUint16) \ V(PPC_AtomicOrUint32) \ - V(PPC_AtomicOrUint64) \ V(PPC_AtomicOrInt8) \ V(PPC_AtomicOrInt16) \ V(PPC_AtomicOrInt32) \ - V(PPC_AtomicOrInt64) \ V(PPC_AtomicXorUint8) \ V(PPC_AtomicXorUint16) \ V(PPC_AtomicXorUint32) \ - V(PPC_AtomicXorUint64) \ V(PPC_AtomicXorInt8) \ V(PPC_AtomicXorInt16) \ V(PPC_AtomicXorInt32) \ - V(PPC_AtomicXorInt64) + V(PPC_AtomicPairLoadWord32) \ + V(PPC_AtomicPairStoreWord32) \ + V(PPC_AtomicPairAddWord32) \ + V(PPC_AtomicPairSubWord32) \ + V(PPC_AtomicPairAndWord32) \ + V(PPC_AtomicPairOrWord32) \ + V(PPC_AtomicPairXorWord32) \ + V(PPC_AtomicPairExchangeWord32) \ + V(PPC_AtomicPairCompareExchangeWord32) // Addressing modes represent the "shape" of inputs to an instruction. // Many instructions support multiple addressing modes. Addressing modes diff --git a/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc index 61c2d2be3bd5a7..8f2a98eb16dde5 100644 --- a/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc +++ b/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc @@ -124,80 +124,97 @@ int InstructionScheduler::GetTargetInstructionFlags( case kPPC_LoadWordU16: case kPPC_LoadWordS32: case kPPC_LoadWordU32: - case kPPC_LoadWord64: case kPPC_LoadFloat32: case kPPC_LoadDouble: case kPPC_AtomicLoadUint8: case kPPC_AtomicLoadUint16: case kPPC_AtomicLoadWord32: - case kPPC_AtomicLoadWord64: case kPPC_Peek: +#ifdef V8_TARGET_ARCH_PPC64 + case kPPC_LoadWord64: + case kPPC_AtomicLoadWord64: +#else + case kPPC_AtomicPairLoadWord32: +#endif return kIsLoadOperation; case kPPC_StoreWord8: case kPPC_StoreWord16: case kPPC_StoreWord32: - case kPPC_StoreWord64: case kPPC_StoreFloat32: case kPPC_StoreDouble: case kPPC_Push: case kPPC_PushFrame: case kPPC_StoreToStackSlot: case kPPC_Sync: +#ifdef V8_TARGET_ARCH_PPC64 + case kPPC_StoreWord64: +#endif return kHasSideEffect; case kPPC_AtomicStoreUint8: case kPPC_AtomicStoreUint16: case kPPC_AtomicStoreWord32: - case kPPC_AtomicStoreWord64: case kPPC_AtomicExchangeUint8: case kPPC_AtomicExchangeUint16: case kPPC_AtomicExchangeWord32: - case kPPC_AtomicExchangeWord64: case kPPC_AtomicCompareExchangeUint8: case kPPC_AtomicCompareExchangeUint16: case kPPC_AtomicCompareExchangeWord32: - case kPPC_AtomicCompareExchangeWord64: case kPPC_AtomicAddUint8: case kPPC_AtomicAddUint16: case kPPC_AtomicAddUint32: - case kPPC_AtomicAddUint64: case kPPC_AtomicAddInt8: case kPPC_AtomicAddInt16: case kPPC_AtomicAddInt32: - case kPPC_AtomicAddInt64: case kPPC_AtomicSubUint8: case kPPC_AtomicSubUint16: case kPPC_AtomicSubUint32: - case kPPC_AtomicSubUint64: case kPPC_AtomicSubInt8: case kPPC_AtomicSubInt16: case kPPC_AtomicSubInt32: - case kPPC_AtomicSubInt64: case kPPC_AtomicAndUint8: case kPPC_AtomicAndUint16: case kPPC_AtomicAndUint32: - case kPPC_AtomicAndUint64: case kPPC_AtomicAndInt8: case kPPC_AtomicAndInt16: case kPPC_AtomicAndInt32: - case kPPC_AtomicAndInt64: case kPPC_AtomicOrUint8: case kPPC_AtomicOrUint16: case kPPC_AtomicOrUint32: - case kPPC_AtomicOrUint64: case kPPC_AtomicOrInt8: case kPPC_AtomicOrInt16: case kPPC_AtomicOrInt32: - case kPPC_AtomicOrInt64: case kPPC_AtomicXorUint8: case kPPC_AtomicXorUint16: case kPPC_AtomicXorUint32: - case kPPC_AtomicXorUint64: case kPPC_AtomicXorInt8: case kPPC_AtomicXorInt16: case kPPC_AtomicXorInt32: +#ifdef V8_TARGET_ARCH_PPC64 + case kPPC_AtomicStoreWord64: + case kPPC_AtomicExchangeWord64: + case kPPC_AtomicCompareExchangeWord64: + case kPPC_AtomicAddUint64: + case kPPC_AtomicAddInt64: + case kPPC_AtomicSubUint64: + case kPPC_AtomicSubInt64: + case kPPC_AtomicAndUint64: + case kPPC_AtomicAndInt64: + case kPPC_AtomicOrUint64: + case kPPC_AtomicOrInt64: + case kPPC_AtomicXorUint64: case kPPC_AtomicXorInt64: +#else + case kPPC_AtomicPairStoreWord32: + case kPPC_AtomicPairAddWord32: + case kPPC_AtomicPairSubWord32: + case kPPC_AtomicPairAndWord32: + case kPPC_AtomicPairOrWord32: + case kPPC_AtomicPairXorWord32: + case kPPC_AtomicPairExchangeWord32: + case kPPC_AtomicPairCompareExchangeWord32: +#endif return kHasSideEffect; #define CASE(Name) case k##Name: diff --git a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc index ef8490a7265398..eac9794dfd6e2b 100644 --- a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc +++ b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc @@ -174,6 +174,7 @@ void InstructionSelector::VisitLoad(Node* node) { Node* offset = node->InputAt(1); InstructionCode opcode = kArchNop; ImmediateMode mode = kInt16Imm; + switch (load_rep.representation()) { case MachineRepresentation::kFloat32: opcode = kPPC_LoadFloat32; @@ -188,9 +189,15 @@ void InstructionSelector::VisitLoad(Node* node) { case MachineRepresentation::kWord16: opcode = load_rep.IsSigned() ? kPPC_LoadWordS16 : kPPC_LoadWordU16; break; +#if !V8_TARGET_ARCH_PPC64 + case MachineRepresentation::kTaggedSigned: // Fall through. + case MachineRepresentation::kTaggedPointer: // Fall through. + case MachineRepresentation::kTagged: // Fall through. +#endif case MachineRepresentation::kWord32: opcode = kPPC_LoadWordU32; break; +#if V8_TARGET_ARCH_PPC64 case MachineRepresentation::kTaggedSigned: // Fall through. case MachineRepresentation::kTaggedPointer: // Fall through. case MachineRepresentation::kTagged: // Fall through. @@ -198,6 +205,9 @@ void InstructionSelector::VisitLoad(Node* node) { opcode = kPPC_LoadWord64; mode = kInt16Imm_4ByteAligned; break; +#else + case MachineRepresentation::kWord64: +#endif case MachineRepresentation::kCompressedSigned: // Fall through. case MachineRepresentation::kCompressedPointer: // Fall through. case MachineRepresentation::kCompressed: // Fall through. @@ -307,6 +317,7 @@ void InstructionSelector::VisitStore(Node* node) { case MachineRepresentation::kTaggedSigned: // Fall through. case MachineRepresentation::kTaggedPointer: // Fall through. case MachineRepresentation::kTagged: // Fall through. + mode = kInt16Imm_4ByteAligned; #endif case MachineRepresentation::kWord32: opcode = kPPC_StoreWord32; @@ -1473,13 +1484,11 @@ void VisitWord32Compare(InstructionSelector* selector, Node* node, VisitWordCompare(selector, node, kPPC_Cmp32, cont, false, mode); } -#if V8_TARGET_ARCH_PPC64 void VisitWord64Compare(InstructionSelector* selector, Node* node, FlagsContinuation* cont) { ImmediateMode mode = (CompareLogical(cont) ? kInt16Imm_Unsigned : kInt16Imm); VisitWordCompare(selector, node, kPPC_Cmp64, cont, false, mode); } -#endif // Shared routine for multiple float32 compare operations. void VisitFloat32Compare(InstructionSelector* selector, Node* node, @@ -1533,7 +1542,6 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value, case IrOpcode::kUint32LessThanOrEqual: cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); return VisitWord32Compare(this, value, cont); -#if V8_TARGET_ARCH_PPC64 case IrOpcode::kWord64Equal: cont->OverwriteAndNegateIfEqual(kEqual); return VisitWord64Compare(this, value, cont); @@ -1549,7 +1557,6 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value, case IrOpcode::kUint64LessThanOrEqual: cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); return VisitWord64Compare(this, value, cont); -#endif case IrOpcode::kFloat32Equal: cont->OverwriteAndNegateIfEqual(kEqual); return VisitFloat32Compare(this, value, cont); @@ -1622,7 +1629,6 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value, // case IrOpcode::kWord32Shl: // case IrOpcode::kWord32Shr: // case IrOpcode::kWord32Ror: -#if V8_TARGET_ARCH_PPC64 case IrOpcode::kInt64Sub: return VisitWord64Compare(this, value, cont); case IrOpcode::kWord64And: @@ -1637,7 +1643,6 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value, // case IrOpcode::kWord64Shl: // case IrOpcode::kWord64Shr: // case IrOpcode::kWord64Ror: -#endif case IrOpcode::kStackPointerGreaterThan: cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition); return VisitStackPointerGreaterThan(value, cont); @@ -1710,7 +1715,6 @@ void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) { VisitWord32Compare(this, node, &cont); } -#if V8_TARGET_ARCH_PPC64 void InstructionSelector::VisitWord64Equal(Node* const node) { FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node); VisitWord64Compare(this, node, &cont); @@ -1737,7 +1741,6 @@ void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) { FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node); VisitWord64Compare(this, node, &cont); } -#endif void InstructionSelector::VisitInt32MulWithOverflow(Node* node) { if (Node* ovf = NodeProperties::FindProjection(node, 1)) { @@ -1862,15 +1865,19 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) { void InstructionSelector::VisitWord32AtomicLoad(Node* node) { VisitLoad(node); } +#ifdef V8_TARGET_ARCH_PPC64 void InstructionSelector::VisitWord64AtomicLoad(Node* node) { VisitLoad(node); } +#endif void InstructionSelector::VisitWord32AtomicStore(Node* node) { VisitStore(node); } +#ifdef V8_TARGET_ARCH_PPC64 void InstructionSelector::VisitWord64AtomicStore(Node* node) { VisitStore(node); } +#endif void VisitAtomicExchange(InstructionSelector* selector, Node* node, ArchOpcode opcode) { @@ -1911,6 +1918,7 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) { VisitAtomicExchange(this, node, opcode); } +#ifdef V8_TARGET_ARCH_PPC64 void InstructionSelector::VisitWord64AtomicExchange(Node* node) { ArchOpcode opcode = kArchNop; MachineType type = AtomicOpType(node->op()); @@ -1928,6 +1936,7 @@ void InstructionSelector::VisitWord64AtomicExchange(Node* node) { } VisitAtomicExchange(this, node, opcode); } +#endif void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node, ArchOpcode opcode) { @@ -1974,6 +1983,7 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) { VisitAtomicCompareExchange(this, node, opcode); } +#ifdef V8_TARGET_ARCH_PPC64 void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) { MachineType type = AtomicOpType(node->op()); ArchOpcode opcode = kArchNop; @@ -1991,12 +2001,17 @@ void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) { } VisitAtomicCompareExchange(this, node, opcode); } +#endif void VisitAtomicBinaryOperation(InstructionSelector* selector, Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op, ArchOpcode uint16_op, - ArchOpcode int32_op, ArchOpcode uint32_op, - ArchOpcode int64_op, ArchOpcode uint64_op) { + ArchOpcode int32_op, ArchOpcode uint32_op +#if V8_TARGET_ARCH_PPC64 + , + ArchOpcode int64_op, ArchOpcode uint64_op +#endif +) { PPCOperandGenerator g(selector); Node* base = node->InputAt(0); Node* index = node->InputAt(1); @@ -2017,10 +2032,12 @@ void VisitAtomicBinaryOperation(InstructionSelector* selector, Node* node, opcode = int32_op; } else if (type == MachineType::Uint32()) { opcode = uint32_op; +#if V8_TARGET_ARCH_PPC64 } else if (type == MachineType::Int64()) { opcode = int64_op; } else if (type == MachineType::Uint64()) { opcode = uint64_op; +#endif } else { UNREACHABLE(); return; @@ -2056,14 +2073,33 @@ void InstructionSelector::VisitWord64AtomicBinaryOperation( UNREACHABLE(); } -#define VISIT_ATOMIC_BINOP(op) \ +#if V8_TARGET_ARCH_PPC64 +#define VISIT_ATOMIC_BINOP32( + op) void InstructionSelector::VisitWord32Atomic##op(Node* node) { + VisitAtomicBinaryOperation( + this, node, kPPC_Atomic##op##Int8, kPPC_Atomic##op##Uint8, + kPPC_Atomic##op##Int16, kPPC_Atomic##op##Uint16, + kPPC_Atomic##op##Int32, kPPC_Atomic##op##Uint32, + kPPC_Atomic##op##Int64, kPPC_Atomic##op##Uint64); + } +#else +#define VISIT_ATOMIC_BINOP32(op) \ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \ VisitAtomicBinaryOperation( \ this, node, kPPC_Atomic##op##Int8, kPPC_Atomic##op##Uint8, \ kPPC_Atomic##op##Int16, kPPC_Atomic##op##Uint16, \ - kPPC_Atomic##op##Int32, kPPC_Atomic##op##Uint32, \ - kPPC_Atomic##op##Int64, kPPC_Atomic##op##Uint64); \ - } \ + kPPC_Atomic##op##Int32, kPPC_Atomic##op##Uint32); \ + } +#endif + VISIT_ATOMIC_BINOP32(Add) + VISIT_ATOMIC_BINOP32(Sub) + VISIT_ATOMIC_BINOP32(And) + VISIT_ATOMIC_BINOP32(Or) + VISIT_ATOMIC_BINOP32(Xor) +#undef VISIT_ATOMIC_BINOP32 + +#ifdef V8_TARGET_ARCH_PPC64 +#define VISIT_ATOMIC_BINOP64(op) \ void InstructionSelector::VisitWord64Atomic##op(Node* node) { \ VisitAtomicBinaryOperation( \ this, node, kPPC_Atomic##op##Int8, kPPC_Atomic##op##Uint8, \ @@ -2071,16 +2107,17 @@ void InstructionSelector::VisitWord64AtomicBinaryOperation( kPPC_Atomic##op##Int32, kPPC_Atomic##op##Uint32, \ kPPC_Atomic##op##Int64, kPPC_Atomic##op##Uint64); \ } -VISIT_ATOMIC_BINOP(Add) -VISIT_ATOMIC_BINOP(Sub) -VISIT_ATOMIC_BINOP(And) -VISIT_ATOMIC_BINOP(Or) -VISIT_ATOMIC_BINOP(Xor) -#undef VISIT_ATOMIC_BINOP - -void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) { - UNREACHABLE(); -} + VISIT_ATOMIC_BINOP64(Add) + VISIT_ATOMIC_BINOP64(Sub) + VISIT_ATOMIC_BINOP64(And) + VISIT_ATOMIC_BINOP64(Or) + VISIT_ATOMIC_BINOP64(Xor) +#undef VISIT_ATOMIC_BINOP64 +#endif + + void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) { + UNREACHABLE(); + } void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) { UNREACHABLE(); @@ -2393,6 +2430,7 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) { UNIMPLEMENTED(); } // static MachineOperatorBuilder::Flags InstructionSelector::SupportedMachineOperatorFlags() { +#if V8_TARGET_ARCH_PPC64 return MachineOperatorBuilder::kFloat32RoundDown | MachineOperatorBuilder::kFloat64RoundDown | MachineOperatorBuilder::kFloat32RoundUp | @@ -2403,6 +2441,11 @@ InstructionSelector::SupportedMachineOperatorFlags() { MachineOperatorBuilder::kWord32Popcnt | MachineOperatorBuilder::kWord64Popcnt; // We omit kWord32ShiftIsSafe as s[rl]w use 0x3F as a mask rather than 0x1F. +#else +return MachineOperatorBuilder::kNoFlags; + //return MachineOperatorBuilder::kWord32Popcnt | + // MachineOperatorBuilder::kWord64Popcnt; +#endif } // static @@ -2412,6 +2455,122 @@ InstructionSelector::AlignmentRequirements() { FullUnalignedAccessSupport(); } +#if V8_TARGET_ARCH_PPC +void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) { + PPCOperandGenerator g(this); + Node* base = node->InputAt(0); + Node* offset = node->InputAt(1); + InstructionCode opcode = kPPC_AtomicPairLoadWord32; + + if (node->opcode() == IrOpcode::kPoisonedLoad && + poisoning_level_ != PoisoningMitigationLevel::kDontPoison) { + opcode |= MiscField::encode(kMemoryAccessPoisoned); + } + + Node* projection0 = NodeProperties::FindProjection(node, 0); + Node* projection1 = NodeProperties::FindProjection(node, 1); + + InstructionOperand inputs[] = {g.UseUniqueRegister(base), + g.UseUniqueRegister(offset)}; + + InstructionOperand outputs[] = {g.DefineAsFixed(projection0, r3), + g.DefineAsFixed(projection1, r4)}; + + InstructionOperand temps[] = {g.TempRegister()}; + + Emit(opcode, arraysize(outputs), outputs, arraysize(inputs), inputs, + arraysize(temps), temps); +} + +void InstructionSelector::VisitWord32AtomicPairStore(Node* node) { + PPCOperandGenerator g(this); + Node* base = node->InputAt(0); + Node* offset = node->InputAt(1); + Node* value_low = node->InputAt(2); + Node* value_high = node->InputAt(3); + + InstructionCode opcode = kPPC_AtomicPairStoreWord32; + + InstructionOperand inputs[] = { + g.UseUniqueRegister(base), g.UseUniqueRegister(offset), + g.UseUniqueRegister(value_low), g.UseUniqueRegister(value_high)}; + + InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()}; + + Emit(opcode, 0, nullptr, arraysize(inputs), inputs, arraysize(temps), temps); +} + +void VisitWord32AtomicPairBinOp(InstructionSelector* selector, Node* node, + ArchOpcode opcode) { + PPCOperandGenerator g(selector); + Node* base = node->InputAt(0); + Node* offset = node->InputAt(1); + Node* value_low = node->InputAt(2); + Node* value_high = node->InputAt(3); + + InstructionCode code = opcode; + + InstructionOperand inputs[] = { + g.UseFixed(value_low, r3), g.UseFixed(value_high, r4), + g.UseUniqueRegister(base), g.UseUniqueRegister(offset)}; + + Node* projection0 = NodeProperties::FindProjection(node, 0); + Node* projection1 = NodeProperties::FindProjection(node, 1); + + InstructionOperand outputs[] = {g.DefineAsFixed(projection0, r3), + g.DefineAsFixed(projection1, r4)}; + + InstructionOperand temps[] = {g.TempRegister()}; + + selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs, + arraysize(temps), temps); +} + +void InstructionSelector::VisitWord32AtomicPairAdd(Node* node) { + VisitWord32AtomicPairBinOp(this, node, kPPC_AtomicPairAddWord32); +} + +void InstructionSelector::VisitWord32AtomicPairSub(Node* node) { + VisitWord32AtomicPairBinOp(this, node, kPPC_AtomicPairSubWord32); +} + +void InstructionSelector::VisitWord32AtomicPairAnd(Node* node) { + VisitWord32AtomicPairBinOp(this, node, kPPC_AtomicPairAndWord32); +} + +void InstructionSelector::VisitWord32AtomicPairOr(Node* node) { + VisitWord32AtomicPairBinOp(this, node, kPPC_AtomicPairOrWord32); +} + +void InstructionSelector::VisitWord32AtomicPairXor(Node* node) { + VisitWord32AtomicPairBinOp(this, node, kPPC_AtomicPairXorWord32); +} + +void InstructionSelector::VisitWord32AtomicPairExchange(Node* node) { + VisitWord32AtomicPairBinOp(this, node, kPPC_AtomicPairExchangeWord32); +} + +void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) { + PPCOperandGenerator g(this); + AddressingMode addressing_mode = kMode_MRI; + InstructionOperand inputs[] = {g.UseFixed(node->InputAt(2), r5), + g.UseFixed(node->InputAt(3), r6), + g.UseFixed(node->InputAt(4), r7), + g.UseFixed(node->InputAt(5), r8), + g.UseUniqueRegister(node->InputAt(0)), + g.UseUniqueRegister(node->InputAt(1))}; + InstructionCode code = kPPC_AtomicPairCompareExchangeWord32 | + AddressingModeField::encode(addressing_mode); + Node* projection0 = NodeProperties::FindProjection(node, 0); + Node* projection1 = NodeProperties::FindProjection(node, 1); + InstructionOperand outputs[] = {g.DefineAsFixed(projection0, r3), + g.DefineAsFixed(projection1, r4)}; + InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()}; + Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs, + arraysize(temps), temps); +} +#endif + } // namespace compiler } // namespace internal } // namespace v8 diff --git a/deps/v8/src/compiler/backend/ppc/unwinding-info-writer-ppc.cc b/deps/v8/src/compiler/backend/ppc/unwinding-info-writer-ppc.cc new file mode 100644 index 00000000000000..b387b1076a8adb --- /dev/null +++ b/deps/v8/src/compiler/backend/ppc/unwinding-info-writer-ppc.cc @@ -0,0 +1,105 @@ +// Copyright 2016 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/compiler/backend/ppc/unwinding-info-writer-ppc.h" +#include "src/compiler/backend/instruction.h" + +namespace v8 { +namespace internal { +namespace compiler { +void UnwindingInfoWriter::BeginInstructionBlock(int pc_offset, + const InstructionBlock* block) { + if (!enabled()) return; + + block_will_exit_ = false; + + DCHECK_LT(block->rpo_number().ToInt(), + static_cast(block_initial_states_.size())); + const BlockInitialState* initial_state = + block_initial_states_[block->rpo_number().ToInt()]; + if (!initial_state) return; + if (initial_state->saved_lr_ != saved_lr_) { + eh_frame_writer_.AdvanceLocation(pc_offset); + if (initial_state->saved_lr_) { + eh_frame_writer_.RecordRegisterSavedToStack(kLrDwarfCode, + kSystemPointerSize); + eh_frame_writer_.RecordRegisterSavedToStack(fp, 0); + } else { + eh_frame_writer_.RecordRegisterFollowsInitialRule(kLrDwarfCode); + } + saved_lr_ = initial_state->saved_lr_; + } +} + +void UnwindingInfoWriter::EndInstructionBlock(const InstructionBlock* block) { + if (!enabled() || block_will_exit_) return; + + for (const RpoNumber& successor : block->successors()) { + int successor_index = successor.ToInt(); + DCHECK_LT(successor_index, static_cast(block_initial_states_.size())); + const BlockInitialState* existing_state = + block_initial_states_[successor_index]; + + // If we already had an entry for this BB, check that the values are the + // same we are trying to insert. + if (existing_state) { + DCHECK_EQ(existing_state->saved_lr_, saved_lr_); + } else { + block_initial_states_[successor_index] = + new (zone_) BlockInitialState(saved_lr_); + } + } +} + +void UnwindingInfoWriter::MarkFrameConstructed(int at_pc) { + if (!enabled()) return; + + // Regardless of the type of frame constructed, the relevant part of the + // layout is always the one in the diagram: + // + // | .... | higher addresses + // +----------+ ^ + // | LR | | | + // +----------+ | | + // | saved FP | | | + // +----------+ <-- FP v + // | .... | stack growth + // + // The LR is pushed on the stack, and we can record this fact at the end of + // the construction, since the LR itself is not modified in the process. + eh_frame_writer_.AdvanceLocation(at_pc); + eh_frame_writer_.RecordRegisterSavedToStack(kLrDwarfCode, + kSystemPointerSize); + eh_frame_writer_.RecordRegisterSavedToStack(fp, 0); + saved_lr_ = true; +} + +void UnwindingInfoWriter::MarkFrameDeconstructed(int at_pc) { + if (!enabled()) return; + + // The lr is restored by the last operation in LeaveFrame(). + eh_frame_writer_.AdvanceLocation(at_pc); + eh_frame_writer_.RecordRegisterFollowsInitialRule(kLrDwarfCode); + saved_lr_ = false; +} + +void UnwindingInfoWriter::MarkLinkRegisterOnTopOfStack(int pc_offset) { + if (!enabled()) return; + + eh_frame_writer_.AdvanceLocation(pc_offset); + eh_frame_writer_.SetBaseAddressRegisterAndOffset(sp, 0); + eh_frame_writer_.RecordRegisterSavedToStack(kLrDwarfCode, 0); +} + +void UnwindingInfoWriter::MarkPopLinkRegisterFromTopOfStack(int pc_offset) { + if (!enabled()) return; + + eh_frame_writer_.AdvanceLocation(pc_offset); + eh_frame_writer_.SetBaseAddressRegisterAndOffset(fp, 0); + eh_frame_writer_.RecordRegisterFollowsInitialRule(kLrDwarfCode); +} + +} // namespace compiler +} // namespace internal +} // namespace v8 diff --git a/deps/v8/src/compiler/backend/ppc/unwinding-info-writer-ppc.h b/deps/v8/src/compiler/backend/ppc/unwinding-info-writer-ppc.h new file mode 100644 index 00000000000000..e96a48308fb609 --- /dev/null +++ b/deps/v8/src/compiler/backend/ppc/unwinding-info-writer-ppc.h @@ -0,0 +1,73 @@ +// Copyright 2016 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_BACKEND_PPC_UNWINDING_INFO_WRITER_PPC_H_ +#define V8_COMPILER_BACKEND_PPC_UNWINDING_INFO_WRITER_PPC_H_ + +#include "src/diagnostics/eh-frame.h" +#include "src/flags/flags.h" + +namespace v8 { +namespace internal { +namespace compiler { + +class InstructionBlock; + +class UnwindingInfoWriter { + public: + explicit UnwindingInfoWriter(Zone* zone) + : zone_(zone), + eh_frame_writer_(zone), + saved_lr_(false), + block_will_exit_(false), + block_initial_states_(zone) { + if (enabled()) eh_frame_writer_.Initialize(); + } + + void SetNumberOfInstructionBlocks(int number) { + if (enabled()) block_initial_states_.resize(number); + } + + void BeginInstructionBlock(int pc_offset, const InstructionBlock* block); + void EndInstructionBlock(const InstructionBlock* block); + + void MarkLinkRegisterOnTopOfStack(int pc_offset); + void MarkPopLinkRegisterFromTopOfStack(int pc_offset); + + void MarkFrameConstructed(int at_pc); + void MarkFrameDeconstructed(int at_pc); + + void MarkBlockWillExit() { block_will_exit_ = true; } + + void Finish(int code_size) { + if (enabled()) eh_frame_writer_.Finish(code_size); + } + + EhFrameWriter* eh_frame_writer() { + return enabled() ? &eh_frame_writer_ : nullptr; + } + + private: + bool enabled() const { return FLAG_perf_prof_unwinding_info; } + + class BlockInitialState : public ZoneObject { + public: + explicit BlockInitialState(bool saved_lr) : saved_lr_(saved_lr) {} + + bool saved_lr_; + }; + + Zone* zone_; + EhFrameWriter eh_frame_writer_; + bool saved_lr_; + bool block_will_exit_; + + ZoneVector block_initial_states_; +}; + +} // namespace compiler +} // namespace internal +} // namespace v8 + +#endif // V8_COMPILER_BACKEND_PPC_UNWINDING_INFO_WRITER_PPC_H_ diff --git a/deps/v8/src/compiler/backend/ppc64/instruction-codes-ppc64.h b/deps/v8/src/compiler/backend/ppc64/instruction-codes-ppc64.h new file mode 100644 index 00000000000000..87a330599ed9fc --- /dev/null +++ b/deps/v8/src/compiler/backend/ppc64/instruction-codes-ppc64.h @@ -0,0 +1,213 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_BACKEND_PPC64_INSTRUCTION_CODES_PPC64_H_ +#define V8_COMPILER_BACKEND_PPC64_INSTRUCTION_CODES_PPC64_H_ + +namespace v8 { +namespace internal { +namespace compiler { + +// PPC-specific opcodes that specify which assembly sequence to emit. +// Most opcodes specify a single instruction. +#define TARGET_ARCH_OPCODE_LIST(V) \ + V(PPC_Peek) \ + V(PPC_Sync) \ + V(PPC_And) \ + V(PPC_AndComplement) \ + V(PPC_Or) \ + V(PPC_OrComplement) \ + V(PPC_Xor) \ + V(PPC_ShiftLeft32) \ + V(PPC_ShiftLeft64) \ + V(PPC_ShiftLeftPair) \ + V(PPC_ShiftRight32) \ + V(PPC_ShiftRight64) \ + V(PPC_ShiftRightPair) \ + V(PPC_ShiftRightAlg32) \ + V(PPC_ShiftRightAlg64) \ + V(PPC_ShiftRightAlgPair) \ + V(PPC_RotRight32) \ + V(PPC_RotRight64) \ + V(PPC_Not) \ + V(PPC_RotLeftAndMask32) \ + V(PPC_RotLeftAndClear64) \ + V(PPC_RotLeftAndClearLeft64) \ + V(PPC_RotLeftAndClearRight64) \ + V(PPC_Add32) \ + V(PPC_Add64) \ + V(PPC_AddWithOverflow32) \ + V(PPC_AddPair) \ + V(PPC_AddDouble) \ + V(PPC_Sub) \ + V(PPC_SubWithOverflow32) \ + V(PPC_SubPair) \ + V(PPC_SubDouble) \ + V(PPC_Mul32) \ + V(PPC_Mul32WithHigh32) \ + V(PPC_Mul64) \ + V(PPC_MulHigh32) \ + V(PPC_MulHighU32) \ + V(PPC_MulPair) \ + V(PPC_MulDouble) \ + V(PPC_Div32) \ + V(PPC_Div64) \ + V(PPC_DivU32) \ + V(PPC_DivU64) \ + V(PPC_DivDouble) \ + V(PPC_Mod32) \ + V(PPC_Mod64) \ + V(PPC_ModU32) \ + V(PPC_ModU64) \ + V(PPC_ModDouble) \ + V(PPC_Neg) \ + V(PPC_NegDouble) \ + V(PPC_SqrtDouble) \ + V(PPC_FloorDouble) \ + V(PPC_CeilDouble) \ + V(PPC_TruncateDouble) \ + V(PPC_RoundDouble) \ + V(PPC_MaxDouble) \ + V(PPC_MinDouble) \ + V(PPC_AbsDouble) \ + V(PPC_Cntlz32) \ + V(PPC_Cntlz64) \ + V(PPC_Popcnt32) \ + V(PPC_Popcnt64) \ + V(PPC_Cmp32) \ + V(PPC_Cmp64) \ + V(PPC_CmpDouble) \ + V(PPC_Tst32) \ + V(PPC_Tst64) \ + V(PPC_Push) \ + V(PPC_PushFrame) \ + V(PPC_StoreToStackSlot) \ + V(PPC_ExtendSignWord8) \ + V(PPC_ExtendSignWord16) \ + V(PPC_ExtendSignWord32) \ + V(PPC_Uint32ToUint64) \ + V(PPC_Int64ToInt32) \ + V(PPC_Int64ToFloat32) \ + V(PPC_Int64ToDouble) \ + V(PPC_Uint64ToFloat32) \ + V(PPC_Uint64ToDouble) \ + V(PPC_Int32ToFloat32) \ + V(PPC_Int32ToDouble) \ + V(PPC_Uint32ToFloat32) \ + V(PPC_Uint32ToDouble) \ + V(PPC_Float32ToDouble) \ + V(PPC_Float64SilenceNaN) \ + V(PPC_DoubleToInt32) \ + V(PPC_DoubleToUint32) \ + V(PPC_DoubleToInt64) \ + V(PPC_DoubleToUint64) \ + V(PPC_DoubleToFloat32) \ + V(PPC_DoubleExtractLowWord32) \ + V(PPC_DoubleExtractHighWord32) \ + V(PPC_DoubleInsertLowWord32) \ + V(PPC_DoubleInsertHighWord32) \ + V(PPC_DoubleConstruct) \ + V(PPC_BitcastInt32ToFloat32) \ + V(PPC_BitcastFloat32ToInt32) \ + V(PPC_BitcastInt64ToDouble) \ + V(PPC_BitcastDoubleToInt64) \ + V(PPC_LoadWordS8) \ + V(PPC_LoadWordU8) \ + V(PPC_LoadWordS16) \ + V(PPC_LoadWordU16) \ + V(PPC_LoadWordS32) \ + V(PPC_LoadWordU32) \ + V(PPC_LoadWord64) \ + V(PPC_LoadFloat32) \ + V(PPC_LoadDouble) \ + V(PPC_StoreWord8) \ + V(PPC_StoreWord16) \ + V(PPC_StoreWord32) \ + V(PPC_StoreWord64) \ + V(PPC_StoreFloat32) \ + V(PPC_StoreDouble) \ + V(PPC_ByteRev32) \ + V(PPC_ByteRev64) \ + V(PPC_CompressSigned) \ + V(PPC_CompressPointer) \ + V(PPC_CompressAny) \ + V(PPC_AtomicStoreUint8) \ + V(PPC_AtomicStoreUint16) \ + V(PPC_AtomicStoreWord32) \ + V(PPC_AtomicStoreWord64) \ + V(PPC_AtomicLoadUint8) \ + V(PPC_AtomicLoadUint16) \ + V(PPC_AtomicLoadWord32) \ + V(PPC_AtomicLoadWord64) \ + V(PPC_AtomicExchangeUint8) \ + V(PPC_AtomicExchangeUint16) \ + V(PPC_AtomicExchangeWord32) \ + V(PPC_AtomicExchangeWord64) \ + V(PPC_AtomicCompareExchangeUint8) \ + V(PPC_AtomicCompareExchangeUint16) \ + V(PPC_AtomicCompareExchangeWord32) \ + V(PPC_AtomicCompareExchangeWord64) \ + V(PPC_AtomicAddUint8) \ + V(PPC_AtomicAddUint16) \ + V(PPC_AtomicAddUint32) \ + V(PPC_AtomicAddUint64) \ + V(PPC_AtomicAddInt8) \ + V(PPC_AtomicAddInt16) \ + V(PPC_AtomicAddInt32) \ + V(PPC_AtomicAddInt64) \ + V(PPC_AtomicSubUint8) \ + V(PPC_AtomicSubUint16) \ + V(PPC_AtomicSubUint32) \ + V(PPC_AtomicSubUint64) \ + V(PPC_AtomicSubInt8) \ + V(PPC_AtomicSubInt16) \ + V(PPC_AtomicSubInt32) \ + V(PPC_AtomicSubInt64) \ + V(PPC_AtomicAndUint8) \ + V(PPC_AtomicAndUint16) \ + V(PPC_AtomicAndUint32) \ + V(PPC_AtomicAndUint64) \ + V(PPC_AtomicAndInt8) \ + V(PPC_AtomicAndInt16) \ + V(PPC_AtomicAndInt32) \ + V(PPC_AtomicAndInt64) \ + V(PPC_AtomicOrUint8) \ + V(PPC_AtomicOrUint16) \ + V(PPC_AtomicOrUint32) \ + V(PPC_AtomicOrUint64) \ + V(PPC_AtomicOrInt8) \ + V(PPC_AtomicOrInt16) \ + V(PPC_AtomicOrInt32) \ + V(PPC_AtomicOrInt64) \ + V(PPC_AtomicXorUint8) \ + V(PPC_AtomicXorUint16) \ + V(PPC_AtomicXorUint32) \ + V(PPC_AtomicXorUint64) \ + V(PPC_AtomicXorInt8) \ + V(PPC_AtomicXorInt16) \ + V(PPC_AtomicXorInt32) \ + V(PPC_AtomicXorInt64) + +// Addressing modes represent the "shape" of inputs to an instruction. +// Many instructions support multiple addressing modes. Addressing modes +// are encoded into the InstructionCode of the instruction and tell the +// code generator after register allocation which assembler method to call. +// +// We use the following local notation for addressing modes: +// +// R = register +// O = register or stack slot +// D = double register +// I = immediate (handle, external, int32) +// MRI = [register + immediate] +// MRR = [register + register] +#define TARGET_ADDRESSING_MODE_LIST(V) \ + V(MRI) /* [%r0 + K] */ \ + V(MRR) /* [%r0 + %r1] */ + +} // namespace compiler +} // namespace internal +} // namespace v8 + +#endif // V8_COMPILER_BACKEND_PPC64_INSTRUCTION_CODES_PPC64_H_ diff --git a/deps/v8/src/compiler/backend/unwinding-info-writer.h b/deps/v8/src/compiler/backend/unwinding-info-writer.h index d3a52b34b7712a..266e885afad662 100644 --- a/deps/v8/src/compiler/backend/unwinding-info-writer.h +++ b/deps/v8/src/compiler/backend/unwinding-info-writer.h @@ -13,6 +13,8 @@ #include "src/compiler/backend/arm64/unwinding-info-writer-arm64.h" #elif V8_TARGET_ARCH_X64 #include "src/compiler/backend/x64/unwinding-info-writer-x64.h" +#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 +#include "src/compiler/backend/ppc/unwinding-info-writer-ppc.h" #else // Placeholder for unsupported architectures. diff --git a/deps/v8/src/compiler/c-linkage.cc b/deps/v8/src/compiler/c-linkage.cc index 428ba058a7f904..25f67056e81ed6 100644 --- a/deps/v8/src/compiler/c-linkage.cc +++ b/deps/v8/src/compiler/c-linkage.cc @@ -99,7 +99,7 @@ namespace { #define CALLEE_SAVE_FP_REGISTERS \ f20.bit() | f22.bit() | f24.bit() | f26.bit() | f28.bit() | f30.bit() -#elif V8_TARGET_ARCH_PPC64 +#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 // =========================================================================== // == ppc & ppc64 ============================================================ // =========================================================================== diff --git a/deps/v8/src/debug/debug-evaluate.cc b/deps/v8/src/debug/debug-evaluate.cc index e18702359e1335..137aca46d850e1 100644 --- a/deps/v8/src/debug/debug-evaluate.cc +++ b/deps/v8/src/debug/debug-evaluate.cc @@ -1049,7 +1049,8 @@ void DebugEvaluate::VerifyTransitiveBuiltins(Isolate* isolate) { } } CHECK(!failed); -#if defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_MIPS64) +#if defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64) || \ + defined(V8_TARGET_ARCH_MIPS64) // Isolate-independent builtin calls and jumps do not emit reloc infos // on PPC. We try to avoid using PC relative code due to performance // issue with especially older hardwares. diff --git a/deps/v8/src/debug/ppc/debug-ppc.cc b/deps/v8/src/debug/ppc/debug-ppc.cc index a5b41c46fe842a..b8f150b48461c5 100644 --- a/deps/v8/src/debug/ppc/debug-ppc.cc +++ b/deps/v8/src/debug/ppc/debug-ppc.cc @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -#if V8_TARGET_ARCH_PPC +#if V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 #include "src/debug/debug.h" @@ -52,4 +52,4 @@ const bool LiveEdit::kFrameDropperSupported = true; } // namespace internal } // namespace v8 -#endif // V8_TARGET_ARCH_PPC +#endif // V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 diff --git a/deps/v8/src/diagnostics/eh-frame.cc b/deps/v8/src/diagnostics/eh-frame.cc index 45d693a4764a9b..be3e0ee68b9733 100644 --- a/deps/v8/src/diagnostics/eh-frame.cc +++ b/deps/v8/src/diagnostics/eh-frame.cc @@ -10,7 +10,8 @@ #include "src/codegen/code-desc.h" #if !defined(V8_TARGET_ARCH_X64) && !defined(V8_TARGET_ARCH_ARM) && \ - !defined(V8_TARGET_ARCH_ARM64) + !defined(V8_TARGET_ARCH_ARM64) && \ + !defined(V8_TARGET_ARCH_PPC) && !defined(V8_TARGET_ARCH_PPC64) // Placeholders for unsupported architectures. @@ -320,6 +321,7 @@ void EhFrameWriter::RecordRegisterSavedToStack(int register_code, int offset) { } } +#if !V8_TARGET_ARCH_PPC && !V8_TARGET_ARCH_PPC64 void EhFrameWriter::RecordRegisterNotModified(Register name) { DCHECK_EQ(writer_state_, InternalState::kInitialized); WriteOpcode(EhFrameConstants::DwarfOpcodes::kSameValue); @@ -334,6 +336,7 @@ void EhFrameWriter::RecordRegisterFollowsInitialRule(Register name) { << EhFrameConstants::kFollowInitialRuleMaskSize) | (code & EhFrameConstants::kFollowInitialRuleMask)); } +#endif void EhFrameWriter::Finish(int code_size) { DCHECK_EQ(writer_state_, InternalState::kInitialized); diff --git a/deps/v8/src/diagnostics/eh-frame.h b/deps/v8/src/diagnostics/eh-frame.h index a9d76a27436d76..fb9ceee6e2f8e1 100644 --- a/deps/v8/src/diagnostics/eh-frame.h +++ b/deps/v8/src/diagnostics/eh-frame.h @@ -102,6 +102,11 @@ class V8_EXPORT_PRIVATE EhFrameWriter { RecordRegisterSavedToStack(RegisterToDwarfCode(name), offset); } +#if V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 +void RecordRegisterNotModified(const int & code); +void RecordRegisterFollowsInitialRule(const int & code); +#endif + // The register has not been modified from the previous frame. void RecordRegisterNotModified(Register name); @@ -168,11 +173,12 @@ class V8_EXPORT_PRIVATE EhFrameWriter { // Write nops until the size reaches a multiple of 8 bytes. void WritePaddingToAlignedSize(int unpadded_size); - +public: // Internal version that directly accepts a DWARF register code, needed for // handling pseudo-registers on some platforms. void RecordRegisterSavedToStack(int register_code, int offset); +private: int GetProcedureAddressOffset() const { return fde_offset() + EhFrameConstants::kProcedureAddressOffsetInFde; } diff --git a/deps/v8/src/diagnostics/perf-jit.h b/deps/v8/src/diagnostics/perf-jit.h index 36ab844110038d..120d68f30063b3 100644 --- a/deps/v8/src/diagnostics/perf-jit.h +++ b/deps/v8/src/diagnostics/perf-jit.h @@ -79,6 +79,7 @@ class PerfJitLogger : public CodeEventLogger { static const uint32_t kElfMachARM = 40; static const uint32_t kElfMachMIPS = 10; static const uint32_t kElfMachARM64 = 183; + static const uint32_t kElfMachPPC = 20; uint32_t GetElfMach() { #if V8_TARGET_ARCH_IA32 @@ -91,6 +92,8 @@ class PerfJitLogger : public CodeEventLogger { return kElfMachMIPS; #elif V8_TARGET_ARCH_ARM64 return kElfMachARM64; +#elif V8_TARGET_ARCH_PPC + return kElfMachPPC; #else UNIMPLEMENTED(); return 0; diff --git a/deps/v8/src/diagnostics/ppc/disasm-ppc.cc b/deps/v8/src/diagnostics/ppc/disasm-ppc.cc index e7d26858e5b000..99767f17dc4554 100644 --- a/deps/v8/src/diagnostics/ppc/disasm-ppc.cc +++ b/deps/v8/src/diagnostics/ppc/disasm-ppc.cc @@ -27,7 +27,7 @@ #include #include -#if V8_TARGET_ARCH_PPC +#if V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 #include "src/base/platform/platform.h" #include "src/codegen/macro-assembler.h" @@ -1519,4 +1519,4 @@ void Disassembler::Disassemble(FILE* f, byte* begin, byte* end, } // namespace disasm -#endif // V8_TARGET_ARCH_PPC +#endif // V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 diff --git a/deps/v8/src/diagnostics/ppc/eh-frame-ppc.cc b/deps/v8/src/diagnostics/ppc/eh-frame-ppc.cc new file mode 100644 index 00000000000000..a29f88fa4cd440 --- /dev/null +++ b/deps/v8/src/diagnostics/ppc/eh-frame-ppc.cc @@ -0,0 +1,85 @@ +// Copyright 2016 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/codegen/ppc/constants-ppc.h" +#include "src/diagnostics/eh-frame.h" + +namespace v8 { +namespace internal { + +const int EhFrameConstants::kCodeAlignmentFactor = 4; +// all PPC are 4 bytes instruction +#ifdef V8_TARGET_ARCH_PPC64 +const int EhFrameConstants::kDataAlignmentFactor = -8; // 64-bit always -8 +#else +const int EhFrameConstants::kDataAlignmentFactor = -4; +#endif + +void EhFrameWriter::RecordRegisterNotModified(const int & code) { + DCHECK_EQ(writer_state_, InternalState::kInitialized); + WriteOpcode(EhFrameConstants::DwarfOpcodes::kSameValue); + WriteULeb128(code); +} + +void EhFrameWriter::RecordRegisterNotModified(Register name) { + int code = RegisterToDwarfCode(name); + return RecordRegisterNotModified(code); +} + +void EhFrameWriter::RecordRegisterFollowsInitialRule(const int & code) { + DCHECK_EQ(writer_state_, InternalState::kInitialized); + DCHECK_LE(code, EhFrameConstants::kFollowInitialRuleMask); + WriteByte((EhFrameConstants::kFollowInitialRuleTag + << EhFrameConstants::kFollowInitialRuleMaskSize) | + (code & EhFrameConstants::kFollowInitialRuleMask)); +} + +void EhFrameWriter::RecordRegisterFollowsInitialRule(Register name) { + int code = RegisterToDwarfCode(name); + return RecordRegisterFollowsInitialRule(code); +} + +void EhFrameWriter::WriteReturnAddressRegisterCode() { + WriteULeb128(kLrDwarfCode); +} + +void EhFrameWriter::WriteInitialStateInCie() { + SetBaseAddressRegisterAndOffset(fp, 0); + RecordRegisterNotModified(kLrDwarfCode); +} + +// static +int EhFrameWriter::RegisterToDwarfCode(Register name) { + switch (name.code()) { + case kRegCode_fp: + return kFpDwarfCode; + case kRegCode_sp: + return kSpDwarfCode; + case kRegCode_r0: + return kR0DwarfCode; + default: + UNIMPLEMENTED(); + return -1; + } +} + +#ifdef ENABLE_DISASSEMBLER + +// static +const char* EhFrameDisassembler::DwarfRegisterCodeToString(int code) { + switch (code) { + case kFpDwarfCode: + return "fp"; + case kSpDwarfCode: + return "sp"; + default: + UNIMPLEMENTED(); + return nullptr; + } +} + +#endif + +} // namespace internal +} // namespace v8 diff --git a/deps/v8/src/execution/frame-constants.h b/deps/v8/src/execution/frame-constants.h index a6e5c9522cc350..56e29311119778 100644 --- a/deps/v8/src/execution/frame-constants.h +++ b/deps/v8/src/execution/frame-constants.h @@ -365,7 +365,7 @@ inline static int FrameSlotToFPOffset(int slot) { #include "src/execution/arm64/frame-constants-arm64.h" // NOLINT #elif V8_TARGET_ARCH_ARM #include "src/execution/arm/frame-constants-arm.h" // NOLINT -#elif V8_TARGET_ARCH_PPC +#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 #include "src/execution/ppc/frame-constants-ppc.h" // NOLINT #elif V8_TARGET_ARCH_MIPS #include "src/execution/mips/frame-constants-mips.h" // NOLINT diff --git a/deps/v8/src/execution/ppc/frame-constants-ppc.cc b/deps/v8/src/execution/ppc/frame-constants-ppc.cc index 05cde9c8eef854..97bef56a56dffe 100644 --- a/deps/v8/src/execution/ppc/frame-constants-ppc.cc +++ b/deps/v8/src/execution/ppc/frame-constants-ppc.cc @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -#if V8_TARGET_ARCH_PPC +#if V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 #include "src/execution/ppc/frame-constants-ppc.h" @@ -32,4 +32,4 @@ int BuiltinContinuationFrameConstants::PaddingSlotCount(int register_count) { } // namespace internal } // namespace v8 -#endif // V8_TARGET_ARCH_PPC +#endif // V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 diff --git a/deps/v8/src/execution/ppc/simulator-ppc.cc b/deps/v8/src/execution/ppc/simulator-ppc.cc index ab8786713b196d..1b5e582856fbf1 100644 --- a/deps/v8/src/execution/ppc/simulator-ppc.cc +++ b/deps/v8/src/execution/ppc/simulator-ppc.cc @@ -922,8 +922,18 @@ bool Simulator::OverflowFrom(int32_t alu_out, int32_t left, int32_t right, } static void decodeObjectPair(ObjectPair* pair, intptr_t* x, intptr_t* y) { +#if V8_HOST_ARCH_64_BIT *x = static_cast(pair->x); *y = static_cast(pair->y); +#else +#if V8_TARGET_BIG_ENDIAN + *x = static_cast(*pair >> 32); + *y = static_cast(*pair); +#else + *x = static_cast(*pair); + *y = static_cast(*pair >> 32); +#endif +#endif } // Calls into the V8 runtime. diff --git a/deps/v8/src/execution/simulator-base.h b/deps/v8/src/execution/simulator-base.h index 6eca3f2b47e95b..58aa753a334d9e 100644 --- a/deps/v8/src/execution/simulator-base.h +++ b/deps/v8/src/execution/simulator-base.h @@ -121,6 +121,7 @@ class SimulatorBase { // - V8_TARGET_ARCH_MIPS: swi (software-interrupt) // - V8_TARGET_ARCH_MIPS64: swi (software-interrupt) // - V8_TARGET_ARCH_PPC: svc (Supervisor Call) +// - V8_TARGET_ARCH_PPC64: svc (Supervisor Call) // - V8_TARGET_ARCH_S390: svc (Supervisor Call) class Redirection { public: diff --git a/deps/v8/src/execution/simulator.h b/deps/v8/src/execution/simulator.h index 58b173694fc441..9c7e35e19d9abb 100644 --- a/deps/v8/src/execution/simulator.h +++ b/deps/v8/src/execution/simulator.h @@ -18,7 +18,7 @@ #include "src/execution/arm64/simulator-arm64.h" #elif V8_TARGET_ARCH_ARM #include "src/execution/arm/simulator-arm.h" -#elif V8_TARGET_ARCH_PPC +#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 #include "src/execution/ppc/simulator-ppc.h" #elif V8_TARGET_ARCH_MIPS #include "src/execution/mips/simulator-mips.h" diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc index f01821b5651f4b..f3f0211a647d8a 100644 --- a/deps/v8/src/interpreter/interpreter-assembler.cc +++ b/deps/v8/src/interpreter/interpreter-assembler.cc @@ -1608,7 +1608,8 @@ bool InterpreterAssembler::TargetSupportsUnalignedAccess() { #if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 return false; #elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390 || \ - V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC + V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC || \ + V8_TARGET_ARCH_PPC64 return true; #else #error "Unknown Architecture" diff --git a/deps/v8/src/libsampler/sampler.cc b/deps/v8/src/libsampler/sampler.cc index e445dfc65a7087..03a9f6034d0bff 100644 --- a/deps/v8/src/libsampler/sampler.cc +++ b/deps/v8/src/libsampler/sampler.cc @@ -372,7 +372,9 @@ void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info, void SignalHandler::FillRegisterState(void* context, RegisterState* state) { // Extracting the sample from the context is extremely machine dependent. ucontext_t* ucontext = reinterpret_cast(context); -#if !(V8_OS_OPENBSD || (V8_OS_LINUX && (V8_HOST_ARCH_PPC || V8_HOST_ARCH_S390))) +#if !(V8_OS_OPENBSD || \ + (V8_OS_LINUX && \ + (V8_HOST_ARCH_PPC || V8_HOST_ARCH_S390 || V8_HOST_ARCH_PPC64))) mcontext_t& mcontext = ucontext->uc_mcontext; #endif #if V8_OS_LINUX @@ -413,7 +415,7 @@ void SignalHandler::FillRegisterState(void* context, RegisterState* state) { state->pc = reinterpret_cast(mcontext.pc); state->sp = reinterpret_cast(mcontext.gregs[29]); state->fp = reinterpret_cast(mcontext.gregs[30]); -#elif V8_HOST_ARCH_PPC +#elif V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64 #if V8_LIBC_GLIBC state->pc = reinterpret_cast(ucontext->uc_mcontext.regs->nip); state->sp = diff --git a/deps/v8/src/logging/log.cc b/deps/v8/src/logging/log.cc index 9b86a16031e84f..6ddd609dc31929 100644 --- a/deps/v8/src/logging/log.cc +++ b/deps/v8/src/logging/log.cc @@ -559,6 +559,8 @@ void LowLevelLogger::LogCodeInfo() { const char arch[] = "arm"; #elif V8_TARGET_ARCH_PPC const char arch[] = "ppc"; +#elif V8_TARGET_ARCH_PPC64 + const char arch[] = "ppc64"; #elif V8_TARGET_ARCH_MIPS const char arch[] = "mips"; #elif V8_TARGET_ARCH_ARM64 diff --git a/deps/v8/src/objects/code.h b/deps/v8/src/objects/code.h index 6a5ac9f31a8e5b..64abf9cc2d31fa 100644 --- a/deps/v8/src/objects/code.h +++ b/deps/v8/src/objects/code.h @@ -415,6 +415,9 @@ class Code : public HeapObject { static constexpr int kHeaderPaddingSize = 20; #elif V8_TARGET_ARCH_MIPS static constexpr int kHeaderPaddingSize = 20; +#elif V8_TARGET_ARCH_PPC + static constexpr int kHeaderPaddingSize = + FLAG_enable_embedded_constant_pool ? 16 : 0; #elif V8_TARGET_ARCH_PPC64 static constexpr int kHeaderPaddingSize = FLAG_enable_embedded_constant_pool ? 28 : 0; diff --git a/deps/v8/src/profiler/tick-sample.cc b/deps/v8/src/profiler/tick-sample.cc index 4963b642c65dfb..edabb92fa39b26 100644 --- a/deps/v8/src/profiler/tick-sample.cc +++ b/deps/v8/src/profiler/tick-sample.cc @@ -110,7 +110,7 @@ bool SimulatorHelper::FillRegisters(Isolate* isolate, } state->sp = reinterpret_cast(simulator->get_register(Simulator::sp)); state->fp = reinterpret_cast(simulator->get_register(Simulator::fp)); -#elif V8_TARGET_ARCH_PPC +#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 if (!simulator->has_bad_pc()) { state->pc = reinterpret_cast(simulator->get_pc()); } diff --git a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc index 335fe87e2f0b5f..0746fb6e879d0b 100644 --- a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc +++ b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -#if V8_TARGET_ARCH_PPC +#if V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 #include "src/regexp/ppc/regexp-macro-assembler-ppc.h" @@ -1353,4 +1353,4 @@ void RegExpMacroAssemblerPPC::LoadCurrentCharacterUnchecked(int cp_offset, } // namespace internal } // namespace v8 -#endif // V8_TARGET_ARCH_PPC +#endif // V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 diff --git a/deps/v8/src/regexp/regexp-macro-assembler-arch.h b/deps/v8/src/regexp/regexp-macro-assembler-arch.h index 2dc6739e421201..8ec12a0ae62390 100644 --- a/deps/v8/src/regexp/regexp-macro-assembler-arch.h +++ b/deps/v8/src/regexp/regexp-macro-assembler-arch.h @@ -15,7 +15,7 @@ #include "src/regexp/arm64/regexp-macro-assembler-arm64.h" #elif V8_TARGET_ARCH_ARM #include "src/regexp/arm/regexp-macro-assembler-arm.h" -#elif V8_TARGET_ARCH_PPC +#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 #include "src/regexp/ppc/regexp-macro-assembler-ppc.h" #elif V8_TARGET_ARCH_MIPS #include "src/regexp/mips/regexp-macro-assembler-mips.h" diff --git a/deps/v8/src/regexp/regexp.cc b/deps/v8/src/regexp/regexp.cc index e0bc4b8e32347a..9e8cce1b3bc1f0 100644 --- a/deps/v8/src/regexp/regexp.cc +++ b/deps/v8/src/regexp/regexp.cc @@ -810,7 +810,7 @@ bool RegExpImpl::Compile(Isolate* isolate, Zone* zone, RegExpCompileData* data, #elif V8_TARGET_ARCH_S390 macro_assembler.reset(new RegExpMacroAssemblerS390( isolate, zone, mode, (data->capture_count + 1) * 2)); -#elif V8_TARGET_ARCH_PPC +#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 macro_assembler.reset(new RegExpMacroAssemblerPPC( isolate, zone, mode, (data->capture_count + 1) * 2)); #elif V8_TARGET_ARCH_MIPS diff --git a/deps/v8/src/runtime/runtime-utils.h b/deps/v8/src/runtime/runtime-utils.h index 2d6fbc585ffe64..0b7b1f60cbb162 100644 --- a/deps/v8/src/runtime/runtime-utils.h +++ b/deps/v8/src/runtime/runtime-utils.h @@ -126,7 +126,7 @@ static inline ObjectPair MakePair(Object x, Object y) { #if defined(V8_TARGET_LITTLE_ENDIAN) return x.ptr() | (static_cast(y.ptr()) << 32); #elif defined(V8_TARGET_BIG_ENDIAN) - return y->ptr() | (static_cast(x->ptr()) << 32); + return y.ptr() | (static_cast(x.ptr()) << 32); #else #error Unknown endianness #endif diff --git a/deps/v8/src/snapshot/deserializer.h b/deps/v8/src/snapshot/deserializer.h index 8dce1b3f3fea30..cb638ebb30f735 100644 --- a/deps/v8/src/snapshot/deserializer.h +++ b/deps/v8/src/snapshot/deserializer.h @@ -28,7 +28,7 @@ class Object; // of objects found in code. #if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \ defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_S390) || \ - V8_EMBEDDED_CONSTANT_POOL + defined(V8_TARGET_ARCH_PPC64) || V8_EMBEDDED_CONSTANT_POOL #define V8_CODE_EMBEDS_OBJECT_POINTER 1 #else #define V8_CODE_EMBEDS_OBJECT_POINTER 0 diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.h b/deps/v8/src/wasm/baseline/liftoff-assembler.h index 766ce71db11b62..22c8d0acc1f0db 100644 --- a/deps/v8/src/wasm/baseline/liftoff-assembler.h +++ b/deps/v8/src/wasm/baseline/liftoff-assembler.h @@ -817,7 +817,7 @@ class LiftoffStackSlots { #include "src/wasm/baseline/arm64/liftoff-assembler-arm64.h" #elif V8_TARGET_ARCH_ARM #include "src/wasm/baseline/arm/liftoff-assembler-arm.h" -#elif V8_TARGET_ARCH_PPC +#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 #include "src/wasm/baseline/ppc/liftoff-assembler-ppc.h" #elif V8_TARGET_ARCH_MIPS #include "src/wasm/baseline/mips/liftoff-assembler-mips.h" diff --git a/deps/v8/src/wasm/jump-table-assembler.cc b/deps/v8/src/wasm/jump-table-assembler.cc index 7c41c0a209cd75..9af088aa25bb50 100644 --- a/deps/v8/src/wasm/jump-table-assembler.cc +++ b/deps/v8/src/wasm/jump-table-assembler.cc @@ -184,7 +184,7 @@ void JumpTableAssembler::NopBytes(int bytes) { } } -#elif V8_TARGET_ARCH_PPC64 +#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index, Address lazy_compile_target) { int start = pc_offset(); diff --git a/deps/v8/src/wasm/jump-table-assembler.h b/deps/v8/src/wasm/jump-table-assembler.h index 8889c18e9c5192..9fbab46e7e709b 100644 --- a/deps/v8/src/wasm/jump-table-assembler.h +++ b/deps/v8/src/wasm/jump-table-assembler.h @@ -184,6 +184,11 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler { static constexpr int kJumpTableSlotSize = 7 * kInstrSize; static constexpr int kLazyCompileTableSlotSize = 12 * kInstrSize; static constexpr int kJumpTableStubSlotSize = 7 * kInstrSize; +#elif V8_TARGET_ARCH_PPC + static constexpr int kJumpTableLineSize = 24; + static constexpr int kJumpTableSlotSize = 16; + static constexpr int kLazyCompileTableSlotSize = 12 * kInstrSize; + static constexpr int kJumpTableStubSlotSize = 16; #elif V8_TARGET_ARCH_MIPS static constexpr int kJumpTableLineSize = 6 * kInstrSize; static constexpr int kJumpTableSlotSize = 4 * kInstrSize; diff --git a/deps/v8/src/wasm/wasm-serialization.cc b/deps/v8/src/wasm/wasm-serialization.cc index 81460b9fe29912..42306e08384242 100644 --- a/deps/v8/src/wasm/wasm-serialization.cc +++ b/deps/v8/src/wasm/wasm-serialization.cc @@ -367,7 +367,7 @@ void NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) { writer->WriteVector(code->source_positions()); writer->WriteVector(Vector::cast(code->protected_instructions())); #if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_ARM || \ - V8_TARGET_ARCH_PPC + V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 // On platforms that don't support misaligned word stores, copy to an aligned // buffer if necessary so we can relocate the serialized code. std::unique_ptr aligned_buffer;