Skip to content

Commit

Permalink
[vm/compiler] Remove speculative shift instructions
Browse files Browse the repository at this point in the history
SpeculattiveShiftInt64Op and SpeculativeShiftUint32Op instructions are
only used in JIT. The only advantage of these instructions over
non-speculative shifts is that they can be hoisted out of the loops
in more cases. However, speculative instructions can suffer from deopt
storm, introduce unnecessary differences in IL and generated code
between JIT and AOT and add sufficient amount of extra code to maintain.

TEST=ci

Change-Id: I689e44b54832e2af76cbc99b9fc4bc937b9f2c87
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/394566
Commit-Queue: Alexander Markov <[email protected]>
Reviewed-by: Slava Egorov <[email protected]>
  • Loading branch information
alexmarkov authored and Commit Queue committed Nov 14, 2024
1 parent 843c47b commit a22daf8
Show file tree
Hide file tree
Showing 10 changed files with 26 additions and 626 deletions.
10 changes: 0 additions & 10 deletions runtime/vm/compiler/backend/constant_propagator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1197,20 +1197,10 @@ void ConstantPropagator::VisitShiftInt64Op(ShiftInt64OpInstr* instr) {
VisitBinaryIntegerOp(instr);
}

void ConstantPropagator::VisitSpeculativeShiftInt64Op(
SpeculativeShiftInt64OpInstr* instr) {
VisitBinaryIntegerOp(instr);
}

void ConstantPropagator::VisitShiftUint32Op(ShiftUint32OpInstr* instr) {
VisitBinaryIntegerOp(instr);
}

void ConstantPropagator::VisitSpeculativeShiftUint32Op(
SpeculativeShiftUint32OpInstr* instr) {
VisitBinaryIntegerOp(instr);
}

void ConstantPropagator::VisitBoxInt64(BoxInt64Instr* instr) {
VisitBox(instr);
}
Expand Down
21 changes: 4 additions & 17 deletions runtime/vm/compiler/backend/il.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2285,27 +2285,16 @@ BinaryIntegerOpInstr* BinaryIntegerOpInstr::Make(Representation representation,
case kUnboxedUint32:
if ((op_kind == Token::kSHL) || (op_kind == Token::kSHR) ||
(op_kind == Token::kUSHR)) {
if (CompilerState::Current().is_aot()) {
op = new ShiftUint32OpInstr(op_kind, left, right, deopt_id,
right_range);
} else {
op = new SpeculativeShiftUint32OpInstr(op_kind, left, right, deopt_id,
right_range);
}
op =
new ShiftUint32OpInstr(op_kind, left, right, deopt_id, right_range);
} else {
op = new BinaryUint32OpInstr(op_kind, left, right, deopt_id);
}
break;
case kUnboxedInt64:
if ((op_kind == Token::kSHL) || (op_kind == Token::kSHR) ||
(op_kind == Token::kUSHR)) {
if (CompilerState::Current().is_aot()) {
op = new ShiftInt64OpInstr(op_kind, left, right, deopt_id,
right_range);
} else {
op = new SpeculativeShiftInt64OpInstr(op_kind, left, right, deopt_id,
right_range);
}
op = new ShiftInt64OpInstr(op_kind, left, right, deopt_id, right_range);
} else {
op = new BinaryInt64OpInstr(op_kind, left, right, deopt_id);
}
Expand Down Expand Up @@ -2430,10 +2419,8 @@ Definition* BinaryIntegerOpInstr::Canonicalize(FlowGraph* flow_graph) {
return right()->definition();
} else if ((rhs > 0) && Utils::IsPowerOfTwo(rhs)) {
const int64_t shift_amount = Utils::ShiftForPowerOfTwo(rhs);
const Representation shift_amount_rep =
CompilerState::Current().is_aot() ? kUnboxedInt64 : kTagged;
ConstantInstr* constant_shift_amount = flow_graph->GetConstant(
Smi::Handle(Smi::New(shift_amount)), shift_amount_rep);
Smi::Handle(Smi::New(shift_amount)), kUnboxedInt64);
BinaryIntegerOpInstr* shift = BinaryIntegerOpInstr::Make(
representation(), Token::kSHL, left()->CopyWithType(),
new Value(constant_shift_amount), GetDeoptId(), can_overflow(),
Expand Down
73 changes: 7 additions & 66 deletions runtime/vm/compiler/backend/il.h
Original file line number Diff line number Diff line change
Expand Up @@ -503,7 +503,6 @@ struct InstrAttrs {
M(CaseInsensitiveCompare, kNoGC) \
M(BinaryInt64Op, kNoGC) \
M(ShiftInt64Op, kNoGC) \
M(SpeculativeShiftInt64Op, kNoGC) \
M(UnaryInt64Op, kNoGC) \
M(CheckArrayBound, kNoGC) \
M(GenericCheckBound, kNoGC) \
Expand All @@ -529,7 +528,6 @@ struct InstrAttrs {
M(BoxLanes, _) \
M(BinaryUint32Op, kNoGC) \
M(ShiftUint32Op, kNoGC) \
M(SpeculativeShiftUint32Op, kNoGC) \
M(UnaryUint32Op, kNoGC) \
M(BoxUint32, _) \
M(UnboxUint32, kNoGC) \
Expand Down Expand Up @@ -8739,8 +8737,7 @@ class UnboxInt64Instr : public UnboxIntegerInstr {

bool Definition::IsInt64Definition() {
return (Type()->ToCid() == kMintCid) || IsBinaryInt64Op() ||
IsUnaryInt64Op() || IsShiftInt64Op() || IsSpeculativeShiftInt64Op() ||
IsBoxInt64() || IsUnboxInt64();
IsUnaryInt64Op() || IsShiftInt64Op() || IsBoxInt64() || IsUnboxInt64();
}

// Calls into the runtime and performs a case-insensitive comparison of the
Expand Down Expand Up @@ -9495,6 +9492,9 @@ class ShiftInt64OpInstr : public ShiftIntegerOpInstr {
: ShiftIntegerOpInstr(op_kind, left, right, deopt_id, right_range) {}

virtual bool ComputeCanDeoptimize() const { return false; }
virtual bool ComputeCanDeoptimizeAfterCall() const {
return !CompilerState::Current().is_aot();
}
virtual bool MayThrow() const { return !IsShiftCountInRange(); }

virtual Representation representation() const { return kUnboxedInt64; }
Expand All @@ -9512,37 +9512,6 @@ class ShiftInt64OpInstr : public ShiftIntegerOpInstr {
DISALLOW_COPY_AND_ASSIGN(ShiftInt64OpInstr);
};

// Speculative int64 shift. Takes unboxed int64 and smi.
// Deoptimizes if right operand is negative or greater than kShiftCountLimit.
class SpeculativeShiftInt64OpInstr : public ShiftIntegerOpInstr {
public:
SpeculativeShiftInt64OpInstr(Token::Kind op_kind,
Value* left,
Value* right,
intptr_t deopt_id,
Range* right_range = nullptr)
: ShiftIntegerOpInstr(op_kind, left, right, deopt_id, right_range) {}

virtual bool ComputeCanDeoptimize() const {
ASSERT(!can_overflow());
return !IsShiftCountInRange();
}

virtual Representation representation() const { return kUnboxedInt64; }

virtual Representation RequiredInputRepresentation(intptr_t idx) const {
ASSERT((idx == 0) || (idx == 1));
return (idx == 0) ? kUnboxedInt64 : kTagged;
}

DECLARE_INSTRUCTION(SpeculativeShiftInt64Op)

DECLARE_EMPTY_SERIALIZATION(SpeculativeShiftInt64OpInstr, ShiftIntegerOpInstr)

private:
DISALLOW_COPY_AND_ASSIGN(SpeculativeShiftInt64OpInstr);
};

// Non-speculative uint32 shift. Takes unboxed uint32 and unboxed int64.
// Throws if right operand is negative.
class ShiftUint32OpInstr : public ShiftIntegerOpInstr {
Expand All @@ -9555,6 +9524,9 @@ class ShiftUint32OpInstr : public ShiftIntegerOpInstr {
: ShiftIntegerOpInstr(op_kind, left, right, deopt_id, right_range) {}

virtual bool ComputeCanDeoptimize() const { return false; }
virtual bool ComputeCanDeoptimizeAfterCall() const {
return !CompilerState::Current().is_aot();
}
virtual bool MayThrow() const {
return !IsShiftCountInRange(kUint32ShiftCountLimit);
}
Expand All @@ -9576,37 +9548,6 @@ class ShiftUint32OpInstr : public ShiftIntegerOpInstr {
DISALLOW_COPY_AND_ASSIGN(ShiftUint32OpInstr);
};

// Speculative uint32 shift. Takes unboxed uint32 and smi.
// Deoptimizes if right operand is negative.
class SpeculativeShiftUint32OpInstr : public ShiftIntegerOpInstr {
public:
SpeculativeShiftUint32OpInstr(Token::Kind op_kind,
Value* left,
Value* right,
intptr_t deopt_id,
Range* right_range = nullptr)
: ShiftIntegerOpInstr(op_kind, left, right, deopt_id, right_range) {}

virtual bool ComputeCanDeoptimize() const { return !IsShiftCountInRange(); }

virtual Representation representation() const { return kUnboxedUint32; }

virtual Representation RequiredInputRepresentation(intptr_t idx) const {
ASSERT((idx == 0) || (idx == 1));
return (idx == 0) ? kUnboxedUint32 : kTagged;
}

DECLARE_INSTRUCTION(SpeculativeShiftUint32Op)

DECLARE_EMPTY_SERIALIZATION(SpeculativeShiftUint32OpInstr,
ShiftIntegerOpInstr)

private:
static constexpr intptr_t kUint32ShiftCountLimit = 31;

DISALLOW_COPY_AND_ASSIGN(SpeculativeShiftUint32OpInstr);
};

class UnaryDoubleOpInstr : public TemplateDefinition<1, NoThrow, Pure> {
public:
UnaryDoubleOpInstr(Token::Kind op_kind,
Expand Down
98 changes: 0 additions & 98 deletions runtime/vm/compiler/backend/il_arm.cc
Original file line number Diff line number Diff line change
Expand Up @@ -6667,53 +6667,6 @@ void ShiftInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
}

LocationSummary* SpeculativeShiftInt64OpInstr::MakeLocationSummary(
Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 2;
const intptr_t kNumTemps = 0;
LocationSummary* summary = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
summary->set_in(0, Location::Pair(Location::RequiresRegister(),
Location::RequiresRegister()));
summary->set_in(1, LocationWritableRegisterOrSmiConstant(right()));
summary->set_out(0, Location::Pair(Location::RequiresRegister(),
Location::RequiresRegister()));
return summary;
}

void SpeculativeShiftInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
PairLocation* left_pair = locs()->in(0).AsPairLocation();
Register left_lo = left_pair->At(0).reg();
Register left_hi = left_pair->At(1).reg();
PairLocation* out_pair = locs()->out(0).AsPairLocation();
Register out_lo = out_pair->At(0).reg();
Register out_hi = out_pair->At(1).reg();
ASSERT(!can_overflow());

if (locs()->in(1).IsConstant()) {
EmitShiftInt64ByConstant(compiler, op_kind(), out_lo, out_hi, left_lo,
left_hi, locs()->in(1).constant());
} else {
// Code for a variable shift amount.
Register shift = locs()->in(1).reg();
__ SmiUntag(shift);

// Deopt if shift is larger than 63 or less than 0 (or not a smi).
if (!IsShiftCountInRange()) {
ASSERT(CanDeoptimize());
compiler::Label* deopt =
compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryInt64Op);

__ CompareImmediate(shift, kShiftCountLimit);
__ b(deopt, HI);
}

EmitShiftInt64ByRegister(compiler, op_kind(), out_lo, out_hi, left_lo,
left_hi, shift);
}
}

class ShiftUint32OpSlowPath : public ThrowErrorSlowPathCode {
public:
explicit ShiftUint32OpSlowPath(ShiftUint32OpInstr* instruction)
Expand Down Expand Up @@ -6799,57 +6752,6 @@ void ShiftUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
}

LocationSummary* SpeculativeShiftUint32OpInstr::MakeLocationSummary(
Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 2;
const intptr_t kNumTemps = 1;
LocationSummary* summary = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
summary->set_in(0, Location::RequiresRegister());
summary->set_in(1, LocationRegisterOrSmiConstant(right()));
summary->set_temp(0, Location::RequiresRegister());
summary->set_out(0, Location::RequiresRegister());
return summary;
}

void SpeculativeShiftUint32OpInstr::EmitNativeCode(
FlowGraphCompiler* compiler) {
Register left = locs()->in(0).reg();
Register out = locs()->out(0).reg();
Register temp = locs()->temp(0).reg();
ASSERT(left != out);

if (locs()->in(1).IsConstant()) {
EmitShiftUint32ByConstant(compiler, op_kind(), out, left,
locs()->in(1).constant());
} else {
Register right = locs()->in(1).reg();
const bool shift_count_in_range =
IsShiftCountInRange(kUint32ShiftCountLimit);

__ SmiUntag(temp, right);
right = temp;

// Deopt if shift count is negative.
if (!shift_count_in_range) {
ASSERT(CanDeoptimize());
compiler::Label* deopt =
compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryInt64Op);

__ CompareImmediate(right, 0);
__ b(deopt, LT);
}

EmitShiftUint32ByRegister(compiler, op_kind(), out, left, right);

if (!shift_count_in_range) {
__ CompareImmediate(right, kUint32ShiftCountLimit);
__ LoadImmediate(out, 0, HI);
}
}
}

LocationSummary* UnaryInt64OpInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
Expand Down
Loading

0 comments on commit a22daf8

Please sign in to comment.