Skip to content

Commit

Permalink
Merge pull request #328 from recmo/prestwich/shift
Browse files Browse the repository at this point in the history
feature: more Shl+Shr impls
  • Loading branch information
prestwich authored Oct 13, 2023
2 parents 0fdcd4d + 17f7300 commit 1c84b2d
Show file tree
Hide file tree
Showing 2 changed files with 111 additions and 75 deletions.
166 changes: 111 additions & 55 deletions src/bits.rs
Original file line number Diff line number Diff line change
Expand Up @@ -548,106 +548,162 @@ impl_bit_op!(BitOr, bitor, BitOrAssign, bitor_assign);
impl_bit_op!(BitAnd, bitand, BitAndAssign, bitand_assign);
impl_bit_op!(BitXor, bitxor, BitXorAssign, bitxor_assign);

impl<const BITS: usize, const LIMBS: usize> ShlAssign<usize> for Uint<BITS, LIMBS> {
#[inline(always)]
fn shl_assign(&mut self, rhs: usize) {
*self = self.wrapping_shl(rhs);
}
}
impl<const BITS: usize, const LIMBS: usize> Shl<Self> for Uint<BITS, LIMBS> {
type Output = Self;

impl<const BITS: usize, const LIMBS: usize> ShlAssign<&usize> for Uint<BITS, LIMBS> {
#[inline(always)]
fn shl_assign(&mut self, rhs: &usize) {
*self = self.wrapping_shl(*rhs);
fn shl(self, rhs: Uint<BITS, LIMBS>) -> Self::Output {
// This check shortcuts, and prevents panics on the `[0]` later
if BITS == 0 {
return self;
}
// Rationale: if BITS is larger than 2**64 - 1, it means we're running
// on a 128-bit platform with 2.3 exabytes of memory. In this case,
// the code produces incorrect output.
self.wrapping_shl(rhs.as_limbs()[0] as usize)
}
}

impl<const BITS: usize, const LIMBS: usize> Shl<usize> for Uint<BITS, LIMBS> {
impl<const BITS: usize, const LIMBS: usize> Shl<&Self> for Uint<BITS, LIMBS> {
type Output = Self;

#[inline(always)]
fn shl(self, rhs: usize) -> Self {
self.wrapping_shl(rhs)
fn shl(self, rhs: &Uint<BITS, LIMBS>) -> Self::Output {
self << *rhs
}
}

impl<const BITS: usize, const LIMBS: usize> Shl<usize> for &Uint<BITS, LIMBS> {
type Output = Uint<BITS, LIMBS>;
impl<const BITS: usize, const LIMBS: usize> Shr<Self> for Uint<BITS, LIMBS> {
type Output = Self;

#[inline(always)]
fn shl(self, rhs: usize) -> Self::Output {
self.wrapping_shl(rhs)
fn shr(self, rhs: Uint<BITS, LIMBS>) -> Self::Output {
// This check shortcuts, and prevents panics on the `[0]` later
if BITS == 0 {
return self;
}
// Rationale: if BITS is larger than 2**64 - 1, it means we're running
// on a 128-bit platform with 2.3 exabytes of memory. In this case,
// the code produces incorrect output.
self.wrapping_shl(rhs.as_limbs()[0] as usize)
}
}

impl<const BITS: usize, const LIMBS: usize> Shl<&usize> for Uint<BITS, LIMBS> {
impl<const BITS: usize, const LIMBS: usize> Shr<&Self> for Uint<BITS, LIMBS> {
type Output = Self;

#[inline(always)]
fn shl(self, rhs: &usize) -> Self {
self.wrapping_shl(*rhs)
fn shr(self, rhs: &Uint<BITS, LIMBS>) -> Self::Output {
self >> *rhs
}
}

impl<const BITS: usize, const LIMBS: usize> Shl<&usize> for &Uint<BITS, LIMBS> {
type Output = Uint<BITS, LIMBS>;

impl<const BITS: usize, const LIMBS: usize> ShlAssign<Self> for Uint<BITS, LIMBS> {
#[inline(always)]
fn shl(self, rhs: &usize) -> Self::Output {
self.wrapping_shl(*rhs)
fn shl_assign(&mut self, rhs: Self) {
*self = *self << rhs;
}
}

impl<const BITS: usize, const LIMBS: usize> ShrAssign<usize> for Uint<BITS, LIMBS> {
impl<const BITS: usize, const LIMBS: usize> ShlAssign<&Self> for Uint<BITS, LIMBS> {
#[inline(always)]
fn shr_assign(&mut self, rhs: usize) {
*self = self.wrapping_shr(rhs);
fn shl_assign(&mut self, rhs: &Self) {
*self = *self << rhs;
}
}

impl<const BITS: usize, const LIMBS: usize> ShrAssign<&usize> for Uint<BITS, LIMBS> {
impl<const BITS: usize, const LIMBS: usize> ShrAssign<Self> for Uint<BITS, LIMBS> {
#[inline(always)]
fn shr_assign(&mut self, rhs: &usize) {
*self = self.wrapping_shr(*rhs);
fn shr_assign(&mut self, rhs: Self) {
*self = *self >> rhs;
}
}

impl<const BITS: usize, const LIMBS: usize> Shr<usize> for Uint<BITS, LIMBS> {
type Output = Self;

impl<const BITS: usize, const LIMBS: usize> ShrAssign<&Self> for Uint<BITS, LIMBS> {
#[inline(always)]
fn shr(self, rhs: usize) -> Self {
self.wrapping_shr(rhs)
fn shr_assign(&mut self, rhs: &Self) {
*self = *self >> rhs;
}
}

impl<const BITS: usize, const LIMBS: usize> Shr<usize> for &Uint<BITS, LIMBS> {
type Output = Uint<BITS, LIMBS>;
macro_rules! impl_shift {
(@main $u:ty) => {
impl<const BITS: usize, const LIMBS: usize> Shl<$u> for Uint<BITS, LIMBS> {
type Output = Self;

#[inline(always)]
fn shr(self, rhs: usize) -> Self::Output {
self.wrapping_shr(rhs)
}
}
#[inline(always)]
fn shl(self, rhs: $u) -> Self::Output {
self.wrapping_shl(rhs as usize)
}
}

impl<const BITS: usize, const LIMBS: usize> Shr<&usize> for Uint<BITS, LIMBS> {
type Output = Self;
impl<const BITS: usize, const LIMBS: usize> Shr<$u> for Uint<BITS, LIMBS> {
type Output = Self;

#[inline(always)]
fn shr(self, rhs: &usize) -> Self {
self.wrapping_shr(*rhs)
}
}
#[inline(always)]
fn shr(self, rhs: $u) -> Self::Output {
self.wrapping_shr(rhs as usize)
}
}
};

impl<const BITS: usize, const LIMBS: usize> Shr<&usize> for &Uint<BITS, LIMBS> {
type Output = Uint<BITS, LIMBS>;
(@ref $u:ty) => {
impl<const BITS: usize, const LIMBS: usize> Shl<&$u> for Uint<BITS, LIMBS> {
type Output = Self;

#[inline(always)]
fn shr(self, rhs: &usize) -> Self::Output {
self.wrapping_shr(*rhs)
}
#[inline(always)]
fn shl(self, rhs: &$u) -> Self::Output {
<Self>::shl(self, *rhs)
}
}

impl<const BITS: usize, const LIMBS: usize> Shr<&$u> for Uint<BITS, LIMBS> {
type Output = Self;

#[inline(always)]
fn shr(self, rhs: &$u) -> Self::Output {
<Self>::shr(self, *rhs)
}
}
};

(@assign $u:ty) => {
impl<const BITS: usize, const LIMBS: usize> ShlAssign<$u> for Uint<BITS, LIMBS> {
#[allow(clippy::inline_always)]
#[inline(always)]
fn shl_assign(&mut self, rhs: $u) {
*self = *self << rhs;
}
}

impl<const BITS: usize, const LIMBS: usize> ShrAssign<$u> for Uint<BITS, LIMBS> {
#[allow(clippy::inline_always)]
#[inline(always)]
fn shr_assign(&mut self, rhs: $u) {
*self = *self >> rhs;
}
}
};

($u:ty) => {
impl_shift!(@main $u);
impl_shift!(@ref $u);
impl_shift!(@assign $u);
impl_shift!(@assign &$u);
};

($u:ty, $($tail:ty),*) => {
impl_shift!($u);
impl_shift!($($tail),*);
};
}

impl_shift!(usize, u8, u16, u32, isize, i8, i16, i32);

// Only when losslessy castable to usize.
#[cfg(target_pointer_width = "64")]
impl_shift!(u64, i64);

#[cfg(test)]
mod tests {
use super::*;
Expand Down
20 changes: 0 additions & 20 deletions src/support/num_traits.rs
Original file line number Diff line number Diff line change
Expand Up @@ -125,26 +125,6 @@ impl<const BITS: usize, const LIMBS: usize> CheckedRem for Uint<BITS, LIMBS> {
}
}

// TODO: Move out of support.
impl<const BITS: usize, const LIMBS: usize> Shl<u32> for Uint<BITS, LIMBS> {
type Output = Self;

#[inline(always)]
fn shl(self, rhs: u32) -> Self::Output {
<Self>::shl(self, rhs as usize)
}
}

// TODO: Move out of support lib into.
impl<const BITS: usize, const LIMBS: usize> Shr<u32> for Uint<BITS, LIMBS> {
type Output = Self;

#[inline(always)]
fn shr(self, rhs: u32) -> Self::Output {
<Self>::shr(self, rhs as usize)
}
}

impl<const BITS: usize, const LIMBS: usize> CheckedShl for Uint<BITS, LIMBS> {
#[inline(always)]
fn checked_shl(&self, other: u32) -> Option<Self> {
Expand Down

0 comments on commit 1c84b2d

Please sign in to comment.