Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feature: more Shl+Shr impls #328

Merged
merged 5 commits into from
Oct 13, 2023
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
166 changes: 111 additions & 55 deletions src/bits.rs
Original file line number Diff line number Diff line change
Expand Up @@ -548,106 +548,162 @@
impl_bit_op!(BitAnd, bitand, BitAndAssign, bitand_assign);
impl_bit_op!(BitXor, bitxor, BitXorAssign, bitxor_assign);

impl<const BITS: usize, const LIMBS: usize> ShlAssign<usize> for Uint<BITS, LIMBS> {
#[inline(always)]
fn shl_assign(&mut self, rhs: usize) {
*self = self.wrapping_shl(rhs);
}
}
impl<const BITS: usize, const LIMBS: usize> Shl<Self> for Uint<BITS, LIMBS> {
type Output = Self;

impl<const BITS: usize, const LIMBS: usize> ShlAssign<&usize> for Uint<BITS, LIMBS> {
#[inline(always)]
fn shl_assign(&mut self, rhs: &usize) {
*self = self.wrapping_shl(*rhs);
fn shl(self, rhs: Uint<BITS, LIMBS>) -> Self::Output {
// This check shortcuts, and prevents panics on the `[0]` later
if BITS == 0 {
return self;
}
// Rationale: if BITS is larger than 2**64 - 1, it means we're running
// on a 128-bit platform with 2.3 exabytes of memory. In this case,
// the code produces incorrect output.
self.wrapping_shl(rhs.as_limbs()[0] as usize)

Check warning on line 563 in src/bits.rs

View check run for this annotation

Codecov / codecov/patch

src/bits.rs#L555-L563

Added lines #L555 - L563 were not covered by tests
}
}

impl<const BITS: usize, const LIMBS: usize> Shl<usize> for Uint<BITS, LIMBS> {
impl<const BITS: usize, const LIMBS: usize> Shl<&Self> for Uint<BITS, LIMBS> {
type Output = Self;

#[inline(always)]
fn shl(self, rhs: usize) -> Self {
self.wrapping_shl(rhs)
fn shl(self, rhs: &Uint<BITS, LIMBS>) -> Self::Output {
self << *rhs

Check warning on line 572 in src/bits.rs

View check run for this annotation

Codecov / codecov/patch

src/bits.rs#L571-L572

Added lines #L571 - L572 were not covered by tests
}
}

impl<const BITS: usize, const LIMBS: usize> Shl<usize> for &Uint<BITS, LIMBS> {
type Output = Uint<BITS, LIMBS>;
impl<const BITS: usize, const LIMBS: usize> Shr<Self> for Uint<BITS, LIMBS> {
type Output = Self;

#[inline(always)]
fn shl(self, rhs: usize) -> Self::Output {
self.wrapping_shl(rhs)
fn shr(self, rhs: Uint<BITS, LIMBS>) -> Self::Output {
// This check shortcuts, and prevents panics on the `[0]` later
if BITS == 0 {
return self;
}
// Rationale: if BITS is larger than 2**64 - 1, it means we're running
// on a 128-bit platform with 2.3 exabytes of memory. In this case,
// the code produces incorrect output.
self.wrapping_shl(rhs.as_limbs()[0] as usize)

Check warning on line 588 in src/bits.rs

View check run for this annotation

Codecov / codecov/patch

src/bits.rs#L580-L588

Added lines #L580 - L588 were not covered by tests
}
}

impl<const BITS: usize, const LIMBS: usize> Shl<&usize> for Uint<BITS, LIMBS> {
impl<const BITS: usize, const LIMBS: usize> Shr<&Self> for Uint<BITS, LIMBS> {
type Output = Self;

#[inline(always)]
fn shl(self, rhs: &usize) -> Self {
self.wrapping_shl(*rhs)
fn shr(self, rhs: &Uint<BITS, LIMBS>) -> Self::Output {
self >> *rhs

Check warning on line 597 in src/bits.rs

View check run for this annotation

Codecov / codecov/patch

src/bits.rs#L596-L597

Added lines #L596 - L597 were not covered by tests
}
}

impl<const BITS: usize, const LIMBS: usize> Shl<&usize> for &Uint<BITS, LIMBS> {
type Output = Uint<BITS, LIMBS>;

impl<const BITS: usize, const LIMBS: usize> ShlAssign<Self> for Uint<BITS, LIMBS> {
#[inline(always)]
fn shl(self, rhs: &usize) -> Self::Output {
self.wrapping_shl(*rhs)
fn shl_assign(&mut self, rhs: Self) {
*self = *self << rhs;

Check warning on line 604 in src/bits.rs

View check run for this annotation

Codecov / codecov/patch

src/bits.rs#L603-L604

Added lines #L603 - L604 were not covered by tests
}
}

impl<const BITS: usize, const LIMBS: usize> ShrAssign<usize> for Uint<BITS, LIMBS> {
impl<const BITS: usize, const LIMBS: usize> ShlAssign<&Self> for Uint<BITS, LIMBS> {
#[inline(always)]
fn shr_assign(&mut self, rhs: usize) {
*self = self.wrapping_shr(rhs);
fn shl_assign(&mut self, rhs: &Self) {
*self = *self << rhs;

Check warning on line 611 in src/bits.rs

View check run for this annotation

Codecov / codecov/patch

src/bits.rs#L610-L611

Added lines #L610 - L611 were not covered by tests
}
}

impl<const BITS: usize, const LIMBS: usize> ShrAssign<&usize> for Uint<BITS, LIMBS> {
impl<const BITS: usize, const LIMBS: usize> ShrAssign<Self> for Uint<BITS, LIMBS> {
#[inline(always)]
fn shr_assign(&mut self, rhs: &usize) {
*self = self.wrapping_shr(*rhs);
fn shr_assign(&mut self, rhs: Self) {
*self = *self >> rhs;

Check warning on line 618 in src/bits.rs

View check run for this annotation

Codecov / codecov/patch

src/bits.rs#L617-L618

Added lines #L617 - L618 were not covered by tests
}
}

impl<const BITS: usize, const LIMBS: usize> Shr<usize> for Uint<BITS, LIMBS> {
type Output = Self;

impl<const BITS: usize, const LIMBS: usize> ShrAssign<&Self> for Uint<BITS, LIMBS> {
#[inline(always)]
fn shr(self, rhs: usize) -> Self {
self.wrapping_shr(rhs)
fn shr_assign(&mut self, rhs: &Self) {
*self = *self >> rhs;

Check warning on line 625 in src/bits.rs

View check run for this annotation

Codecov / codecov/patch

src/bits.rs#L624-L625

Added lines #L624 - L625 were not covered by tests
}
}

impl<const BITS: usize, const LIMBS: usize> Shr<usize> for &Uint<BITS, LIMBS> {
type Output = Uint<BITS, LIMBS>;
macro_rules! impl_shift {
(@main $u:ty) => {
impl<const BITS: usize, const LIMBS: usize> Shl<$u> for Uint<BITS, LIMBS> {
type Output = Self;

#[inline(always)]
fn shr(self, rhs: usize) -> Self::Output {
self.wrapping_shr(rhs)
}
}
#[inline(always)]
fn shl(self, rhs: $u) -> Self::Output {
self.wrapping_shl(rhs as usize)
}
}

impl<const BITS: usize, const LIMBS: usize> Shr<&usize> for Uint<BITS, LIMBS> {
type Output = Self;
impl<const BITS: usize, const LIMBS: usize> Shr<$u> for Uint<BITS, LIMBS> {
type Output = Self;

#[inline(always)]
fn shr(self, rhs: &usize) -> Self {
self.wrapping_shr(*rhs)
}
}
#[inline(always)]
fn shr(self, rhs: $u) -> Self::Output {
self.wrapping_shr(rhs as usize)
}
}
};

impl<const BITS: usize, const LIMBS: usize> Shr<&usize> for &Uint<BITS, LIMBS> {
type Output = Uint<BITS, LIMBS>;
(@ref $u:ty) => {
impl<const BITS: usize, const LIMBS: usize> Shl<&$u> for Uint<BITS, LIMBS> {
type Output = Self;

#[inline(always)]
fn shr(self, rhs: &usize) -> Self::Output {
self.wrapping_shr(*rhs)
}
#[inline(always)]
fn shl(self, rhs: &$u) -> Self::Output {
<Self>::shl(self, *rhs)
}

Check warning on line 657 in src/bits.rs

View check run for this annotation

Codecov / codecov/patch

src/bits.rs#L655-L657

Added lines #L655 - L657 were not covered by tests
}

impl<const BITS: usize, const LIMBS: usize> Shr<&$u> for Uint<BITS, LIMBS> {
type Output = Self;

#[inline(always)]
fn shr(self, rhs: &$u) -> Self::Output {
<Self>::shr(self, *rhs)
}

Check warning on line 666 in src/bits.rs

View check run for this annotation

Codecov / codecov/patch

src/bits.rs#L664-L666

Added lines #L664 - L666 were not covered by tests
}
};

(@assign $u:ty) => {
impl<const BITS: usize, const LIMBS: usize> ShlAssign<$u> for Uint<BITS, LIMBS> {
#[allow(clippy::inline_always)]
#[inline(always)]
fn shl_assign(&mut self, rhs: $u) {
*self = *self << rhs;
}

Check warning on line 676 in src/bits.rs

View check run for this annotation

Codecov / codecov/patch

src/bits.rs#L674-L676

Added lines #L674 - L676 were not covered by tests
}

impl<const BITS: usize, const LIMBS: usize> ShrAssign<$u> for Uint<BITS, LIMBS> {
#[allow(clippy::inline_always)]
#[inline(always)]
fn shr_assign(&mut self, rhs: $u) {
*self = *self >> rhs;
}
}
};

($u:ty) => {
impl_shift!(@main $u);
impl_shift!(@ref $u);
impl_shift!(@assign $u);
impl_shift!(@assign &$u);
};

($u:ty, $($tail:ty),*) => {
impl_shift!($u);
impl_shift!($($tail),*);
};
}

impl_shift!(usize, u8, u16, u32, isize, i8, i16, i32);

// Only when losslessy castable to usize.
#[cfg(target_pointer_width = "64")]
impl_shift!(u64, i64);
Comment on lines +703 to +705
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

below is lossless too, it just pads


#[cfg(test)]
mod tests {
use super::*;
Expand Down
20 changes: 0 additions & 20 deletions src/support/num_traits.rs
Original file line number Diff line number Diff line change
Expand Up @@ -125,26 +125,6 @@ impl<const BITS: usize, const LIMBS: usize> CheckedRem for Uint<BITS, LIMBS> {
}
}

// TODO: Move out of support.
impl<const BITS: usize, const LIMBS: usize> Shl<u32> for Uint<BITS, LIMBS> {
type Output = Self;

#[inline(always)]
fn shl(self, rhs: u32) -> Self::Output {
<Self>::shl(self, rhs as usize)
}
}

// TODO: Move out of support lib into.
impl<const BITS: usize, const LIMBS: usize> Shr<u32> for Uint<BITS, LIMBS> {
type Output = Self;

#[inline(always)]
fn shr(self, rhs: u32) -> Self::Output {
<Self>::shr(self, rhs as usize)
}
}

impl<const BITS: usize, const LIMBS: usize> CheckedShl for Uint<BITS, LIMBS> {
#[inline(always)]
fn checked_shl(&self, other: u32) -> Option<Self> {
Expand Down
Loading