Skip to content

Commit

Permalink
Use shape parameter instead of newshape
Browse files Browse the repository at this point in the history
- `newshape` was deprecated in NumPy 2.1.
  • Loading branch information
rwnobrega committed Oct 31, 2024
1 parent fde974b commit aa0bc1d
Show file tree
Hide file tree
Showing 9 changed files with 37 additions and 18 deletions.
2 changes: 1 addition & 1 deletion src/komm/_error_control_block/decoders/bcjr.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def decode_bcjr(code: TerminatedConvolutionalCode, r: npt.ArrayLike, *, snr: flo
initial_state_distribution = np.eye(1, fsm.num_states, 0)
final_state_distribution = np.eye(1, fsm.num_states, 0)

z = np.reshape(r, newshape=(-1, n0))
z = np.reshape(r, shape=(-1, n0))
input_posteriors = fsm.forward_backward(
z,
metric_function=metric_function,
Expand Down
2 changes: 1 addition & 1 deletion src/komm/_error_control_block/decoders/viterbi.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ def decode_viterbi(code: TerminatedConvolutionalCode, r: npt.ArrayLike, metric_f
initial_metrics = np.full(fsm.num_states, fill_value=np.inf)
initial_metrics[0] = 0.0

z = np.reshape(r, newshape=(-1, n0))
z = np.reshape(r, shape=(-1, n0))
xs_hat, final_metrics = fsm.viterbi(z, metric_function=metric_function, initial_metrics=initial_metrics)

if code.mode == "direct-truncation":
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ def __call__(self, in0: npt.ArrayLike) -> np.ndarray:
self.convolutional_code.finite_state_machine,
)
input_sequence_hat = fsm.viterbi_streaming(
observed_sequence=np.reshape(in0, newshape=(-1, n)),
observed_sequence=np.reshape(in0, shape=(-1, n)),
metric_function=self.metric_function,
memory=self.memory,
)
Expand Down
39 changes: 29 additions & 10 deletions src/komm/_modulation/Modulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,16 +91,24 @@ def __init__(self, constellation, labeling):

self._labeling = np.array(labeling, dtype=int)
if self._labeling.shape != (self._order, self._bits_per_symbol):
raise ValueError("The shape of `labeling` must be ({}, {})".format(self._order, self._bits_per_symbol))
raise ValueError(
"The shape of `labeling` must be ({}, {})".format(
self._order, self._bits_per_symbol
)
)
if np.any(self._labeling < 0) or np.any(self._labeling > 1):
raise ValueError("The elements of `labeling` must be either 0 or 1")
if len(set(tuple(row) for row in self._labeling)) != self._order:
raise ValueError("The rows of `labeling` must be distinct")

self._inverse_labeling = dict(zip(map(tuple, self._labeling), range(self._order)))
self._inverse_labeling = dict(
zip(map(tuple, self._labeling), range(self._order))
)

def __repr__(self):
args = "constellation={}, labeling={}".format(self._constellation.tolist(), self._labeling.tolist())
args = "constellation={}, labeling={}".format(
self._constellation.tolist(), self._labeling.tolist()
)
return "{}({})".format(self.__class__.__name__, args)

@property
Expand Down Expand Up @@ -177,7 +185,10 @@ def energy_per_symbol(self):
>>> mod.energy_per_symbol
np.float64(0.75)
"""
return np.real(np.dot(self._constellation, self._constellation.conj())) / self._order
return (
np.real(np.dot(self._constellation, self._constellation.conj()))
/ self._order
)

@property
def energy_per_bit(self):
Expand Down Expand Up @@ -262,18 +273,24 @@ def modulate(self, bits):
m = self._bits_per_symbol
n_symbols = len(bits) // m
if len(bits) != n_symbols * m:
raise ValueError("The length of `bits` must be a multiple of the number of bits per symbol.")
raise ValueError(
"The length of `bits` must be a multiple of the number of bits per symbol."
)
symbols = np.empty(n_symbols, dtype=self._constellation.dtype)
for i, bit_sequence in enumerate(np.reshape(bits, newshape=(n_symbols, m))):
symbols[i] = self._constellation[self._inverse_labeling[tuple(bit_sequence)]]
for i, bit_sequence in enumerate(np.reshape(bits, shape=(n_symbols, m))):
symbols[i] = self._constellation[
self._inverse_labeling[tuple(bit_sequence)]
]
return symbols

def _demodulate_hard(self, received):
# General minimum Euclidean distance hard demodulator.
hard_bits = np.empty((len(received), self._bits_per_symbol), dtype=int)
for i, y in enumerate(received):
hard_bits[i, :] = self._labeling[np.argmin(np.abs(self._constellation - y)), :]
return np.reshape(hard_bits, newshape=-1)
hard_bits[i, :] = self._labeling[
np.argmin(np.abs(self._constellation - y)), :
]
return np.reshape(hard_bits, shape=-1)

def _demodulate_soft(self, received, channel_snr=1.0):
# Computes the L-values (LLR) of each bit.
Expand Down Expand Up @@ -332,5 +349,7 @@ def demodulate(self, received, decision_method="hard", **kwargs):
if decision_method in ["hard", "soft"]:
demodulate = getattr(self, "_demodulate_" + decision_method)
else:
raise ValueError("Parameter `decision_method` should be either 'hard' or 'soft'")
raise ValueError(
"Parameter `decision_method` should be either 'hard' or 'soft'"
)
return demodulate(np.asarray(received), **kwargs)
2 changes: 1 addition & 1 deletion src/komm/_modulation/PAModulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ def _demodulate_hard(self, received):
0,
self._order - 1,
).astype(int)
return np.reshape(self._labeling[indices], newshape=-1)
return np.reshape(self._labeling[indices], shape=-1)

@staticmethod
def _demodulate_soft_pam2(y, gamma):
Expand Down
2 changes: 1 addition & 1 deletion src/komm/_source_coding/FixedToVariableEncoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,5 +33,5 @@ class FixedToVariableEncoder:

def __call__(self, in0: npt.ArrayLike) -> np.ndarray:
k, enc = self.code.source_block_size, self.code.enc_mapping
out0 = np.concatenate([enc[tuple(s)] for s in np.reshape(in0, newshape=(-1, k))])
out0 = np.concatenate([enc[tuple(s)] for s in np.reshape(in0, shape=(-1, k))])
return out0
2 changes: 1 addition & 1 deletion src/komm/_source_coding/VariableToFixedDecoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,5 +33,5 @@ class VariableToFixedDecoder:

def __call__(self, in0: npt.ArrayLike) -> np.ndarray:
n, dec = self.code.target_block_size, self.code.dec_mapping
out0 = np.concatenate([dec[tuple(s)] for s in np.reshape(in0, newshape=(-1, n))])
out0 = np.concatenate([dec[tuple(s)] for s in np.reshape(in0, shape=(-1, n))])
return out0
2 changes: 1 addition & 1 deletion src/komm/_util/bit_operations.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ def int2binlist(integer, width=None):


def _pack(list_, width):
return np.apply_along_axis(_binlist2int, 1, np.reshape(list_, newshape=(-1, width)))
return np.apply_along_axis(_binlist2int, 1, np.reshape(list_, shape=(-1, width)))


def pack(list_, width):
Expand Down
2 changes: 1 addition & 1 deletion tests/test_error_control_convolutional.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ def test_convolutional_space_state_representation_2(feedforward_polynomials, fee

s = np.zeros(nu, dtype=int)

for t, u in enumerate(np.reshape(input_bits, newshape=(-1, k))):
for t, u in enumerate(np.reshape(input_bits, shape=(-1, k))):
s, v = (np.dot(s, A) + np.dot(u, B)) % 2, (np.dot(s, C) + np.dot(u, D)) % 2
output_bits[t * n : (t + 1) * n] = v

Expand Down

0 comments on commit aa0bc1d

Please sign in to comment.