Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Clipping deformer & cosmetic fixes #80

Merged
merged 3 commits into from
Jul 20, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions docs/examples.rst
Original file line number Diff line number Diff line change
Expand Up @@ -21,12 +21,12 @@ contain (deformed) audio and store the deformation history objects.
>>> # Ready to go!

>>> # Loading audio from disk with an existing jams
>>> j_orig = jams.load('existing_jams_file.jams')
>>> existing_jams = jams.load('existing_jams_file.jams')
>>> j_orig = muda.load_jam_audio(existing_jams, 'orig.ogg')
>>> # Ready to go!

>>> # Loading in-memory audio (y, sr) with an existing jams
>>> j_orig = jams.load('existing_jams_file.jams')
>>> existing_jams = jams.load('existing_jams_file.jams')
>>> j_orig = muda.jam_pack(existing_jams, _audio=dict(y=y, sr=sr))
>>> # Ready to go!

Expand Down
2 changes: 1 addition & 1 deletion docs/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ perturbations to annotated music data for the purpose of fitting statistical mod

Introduction
------------
.. note:: Before reading ahead, it is recommended to familiarize yourself with the `JAMS documentation <http://pythonhosted.org/jams/>`_.
.. note:: Before reading ahead, it is recommended to familiarize yourself with the `JAMS documentation <https://jams.readthedocs.io/en/stable/>`_.

The design of `muda` is patterned loosely after the `Transformer` abstraction in `scikit-learn <http://scikit-learn.org/stable/>`_.
At a high level, each input example consists of an audio clip (with sampling rate) as a `numpy.ndarray` and its annotations stored
Expand Down
1 change: 1 addition & 0 deletions muda/deformers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,3 +10,4 @@
from .util import *
from .colorednoise import *
from .ir import *
from .clipping import *
178 changes: 178 additions & 0 deletions muda/deformers/clipping.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,178 @@
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# CREATED:2020-06-17 by Jatin Khilnani <[email protected]>
"""Clipping (waveform/loudness distortion) transformations"""

import numpy as np

from ..base import BaseTransformer, _get_rng

__all__ = ["Clipping", "LinearClipping", "RandomClipping"]


class AbstractClipping(BaseTransformer):
"""Abstract base class for clipping

This contains the deformation function
but does not manage state or parameters.
"""

def __init__(self):
BaseTransformer.__init__(self)

@staticmethod
def audio(mudabox, state):
# Deform the audio
mudabox._audio["y"] = np.clip(mudabox._audio["y"], \
min(mudabox._audio["y"])*state["clip_limit"], \
max(mudabox._audio["y"])*state["clip_limit"]
)


class Clipping(AbstractClipping):
"""Static clipping beyond a fixed limit

This transformation affects the following attributes:
- Audio


Attributes
----------
clip_limit : float or list of floats, strictly (0,1)
The amplitude fraction beyond which the waveform is clipped.

Examples
--------
>>> D = muda.deformers.Clipping(clip_limit=0.75)
>>> out_jams = list(D.transform(jam_in))

See Also
--------
LinearClipping
RandomClipping
"""

def __init__(self, clip_limit=0.8):
"""Clipping"""
AbstractClipping.__init__(self)

self.clip_limit = np.atleast_1d(clip_limit).flatten()
if np.any(self.clip_limit <= 0.0) or np.any(self.clip_limit >= 1.0):
raise ValueError("clip_limit parameter domain is strictly (0,1).")
self.clip_limit = self.clip_limit.tolist()

def states(self, jam):
for clip_limit in self.clip_limit:
yield dict(clip_limit=clip_limit)


class LinearClipping(AbstractClipping):
"""Linearly spaced clipping.

`n_samples` are generated with clipping spaced linearly
between `lower` and `upper`.

This transformation affects the following attributes:
- Audio

Attributes
----------
n_samples : int > 0
Number of deformations to generate

lower : float > 0.0
upper : float in (lower, 1.0)
Minimum and maximum bounds on the clip parameters

See Also
--------
Clipping
RandomClipping
"""

def __init__(self, n_samples=3, lower=0.4, upper=0.8):
AbstractClipping.__init__(self)

if n_samples <= 0:
raise ValueError("n_samples must be strictly positive.")

if lower <= 0.0 or lower >= 1.0:
raise ValueError("lower parameter domain is strictly (0,1).")

if upper <= lower:
raise ValueError("upper must be strictly larger than lower.")

if upper >= 1.0:
raise ValueError("upper parameter domain is strictly (0,1).")

self.n_samples = n_samples
self.lower = float(lower)
self.upper = float(upper)

def states(self, jam):
clip_limits = np.linspace(
self.lower, self.upper, num=self.n_samples, endpoint=True
)

for clip_limit in clip_limits:
yield dict(clip_limit=clip_limit)


class RandomClipping(AbstractClipping):
"""Random clipping

For each deformation, the clip_limit parameter is drawn from a
Beta distribution with parameters `(a, b)`

This transformation affects the following attributes:
- Audio

Attributes
----------
n_samples : int > 0
The number of samples to generate per input

a : float > 0.0
b : float > 0.0
Parameters of the Beta distribution from which
clip_limit parameter is sampled.

rng : None, int, or np.random.RandomState
The random number generator state.

If `None`, then `np.random` is used.

If `int`, then `rng` becomes the seed for the random state.

See Also
--------
Clipping
LinearClipping
"""

def __init__(self, n_samples=3, a=1.0, b=1.0, rng=None):

AbstractClipping.__init__(self)

if n_samples <= 0:
raise ValueError("n_samples must be strictly positive.")

if a <= 0.0:
raise ValueError("a(alpha) parameter must be strictly positive.")

if b <= 0.0:
raise ValueError("b(beta) parameter must be strictly positive.")

self.n_samples = n_samples
self.a = a
self.b = b
self.rng = rng
self._rng = _get_rng(rng)

def states(self, jam):
clip_limits = self._rng.beta(
a=self.a, b=self.b, size=self.n_samples
)

for clip_limit in clip_limits:
yield dict(clip_limit=clip_limit)
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,6 @@
],
extras_require={
'docs': ['numpydoc'],
'tests': ['pytest < 4', 'pytest-cov'],
'tests': ['pytest < 4', 'pytest-cov==2.9.0'],
}
)
Loading