Skip to content

Binaural Analysis

This section provides an overview of the binaural analysis tools available in Soundscapy. It includes a brief description of each tool, as well as information on how to access and use them.

AnalysisSettings

Bases: BaseModel

Settings for audio analysis methods.

PARAMETER DESCRIPTION
version

Version of the configuration.

TYPE: str

PythonAcoustics

Settings for PythonAcoustics metrics.

TYPE: LibrarySettings | None

MoSQITo

Settings for MoSQITo metrics.

TYPE: LibrarySettings | None

scikit_maad

Settings for scikit-maad metrics.

TYPE: LibrarySettings | None

default classmethod

default()

Create a default AnalysisSettings object using the package's default configuration file.

RETURNS DESCRIPTION
AnalysisSettings

An instance of AnalysisSettings with default settings.

Source code in soundscapy/audio/analysis_settings.py
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
@classmethod
def default(cls) -> AnalysisSettings:
    """
    Create a default AnalysisSettings object using the package's default configuration file.

    Returns
    -------
    AnalysisSettings
        An instance of AnalysisSettings with default settings.
    """
    config_resource = resources.files("soundscapy.data").joinpath(
        "default_settings.yaml"
    )
    with resources.as_file(config_resource) as f:
        logger.info(f"Loading default configuration from {f}")
        return cls.from_yaml(f)

from_dict classmethod

from_dict(d)

Create an AnalysisSettings object from a dictionary.

PARAMETER DESCRIPTION
d

Dictionary containing the configuration settings.

TYPE: dict

RETURNS DESCRIPTION
AnalysisSettings

An instance of AnalysisSettings.

Source code in soundscapy/audio/analysis_settings.py
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
@classmethod
def from_dict(cls, d: dict) -> AnalysisSettings:
    """
    Create an AnalysisSettings object from a dictionary.

    Parameters
    ----------
    d : dict
        Dictionary containing the configuration settings.

    Returns
    -------
    AnalysisSettings
        An instance of AnalysisSettings.
    """
    return cls(**d)

from_yaml classmethod

from_yaml(filepath)

Create an AnalysisSettings object from a YAML file.

PARAMETER DESCRIPTION
filepath

Path to the YAML configuration file.

TYPE: str | Path

RETURNS DESCRIPTION
AnalysisSettings

An instance of AnalysisSettings.

Source code in soundscapy/audio/analysis_settings.py
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
@classmethod
def from_yaml(cls, filepath: str | Path) -> AnalysisSettings:
    """
    Create an AnalysisSettings object from a YAML file.

    Parameters
    ----------
    filepath : str | Path
        Path to the YAML configuration file.

    Returns
    -------
    AnalysisSettings
        An instance of AnalysisSettings.
    """
    logger.info(f"Loading configuration from {filepath}")
    with open(filepath, "r") as f:
        config_dict = yaml.safe_load(f)
    return cls(**config_dict)

get_enabled_metrics

get_enabled_metrics()

Get a dictionary of enabled metrics.

RETURNS DESCRIPTION
dict[str, dict[str, MetricSettings]]

A dictionary of enabled metrics grouped by library.

Source code in soundscapy/audio/analysis_settings.py
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
def get_enabled_metrics(self) -> dict[str, dict[str, MetricSettings]]:
    """
    Get a dictionary of enabled metrics.

    Returns
    -------
    dict[str, dict[str, MetricSettings]]
        A dictionary of enabled metrics grouped by library.
    """
    enabled_metrics = {}
    for library in ["PythonAcoustics", "MoSQITo", "scikit_maad"]:
        library_settings = getattr(self, library)
        if library_settings:
            enabled_metrics[library] = {
                metric: settings
                for metric, settings in library_settings.root.items()
                if settings.run
            }
    logger.debug(f"Enabled metrics: {enabled_metrics}")
    return enabled_metrics

get_metric_settings

get_metric_settings(library, metric)

Get the settings for a specific metric.

PARAMETER DESCRIPTION
library

The name of the library.

TYPE: str

metric

The name of the metric.

TYPE: str

RETURNS DESCRIPTION
MetricSettings

The settings for the specified metric.

RAISES DESCRIPTION
KeyError

If the specified library or metric is not found.

Source code in soundscapy/audio/analysis_settings.py
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
def get_metric_settings(self, library: str, metric: str) -> MetricSettings:
    """
    Get the settings for a specific metric.

    Parameters
    ----------
    library : str
        The name of the library.
    metric : str
        The name of the metric.

    Returns
    -------
    MetricSettings
        The settings for the specified metric.

    Raises
    ------
    KeyError
        If the specified library or metric is not found.
    """
    library_settings = getattr(self, library)
    if library_settings and metric in library_settings.root:
        return library_settings.root[metric]
    logger.error(f"Metric '{metric}' not found in library '{library}'")
    raise KeyError(f"Metric '{metric}' not found in library '{library}'")

to_yaml

to_yaml(filepath)

Save the current settings to a YAML file.

PARAMETER DESCRIPTION
filepath

Path to save the YAML file.

TYPE: str | Path

Source code in soundscapy/audio/analysis_settings.py
176
177
178
179
180
181
182
183
184
185
186
187
def to_yaml(self, filepath: str | Path) -> None:
    """
    Save the current settings to a YAML file.

    Parameters
    ----------
    filepath : str | Path
        Path to save the YAML file.
    """
    logger.info(f"Saving configuration to {filepath}")
    with open(filepath, "w") as f:
        yaml.dump(self.model_dump(by_alias=True), f)

update_setting

update_setting(library, metric, **kwargs)

Update the settings for a specific metric.

PARAMETER DESCRIPTION
library

The name of the library.

TYPE: str

metric

The name of the metric.

TYPE: str

**kwargs

Keyword arguments to update the metric settings.

DEFAULT: {}

RAISES DESCRIPTION
KeyError

If the specified library or metric is not found.

Source code in soundscapy/audio/analysis_settings.py
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
def update_setting(self, library: str, metric: str, **kwargs) -> None:
    """
    Update the settings for a specific metric.

    Parameters
    ----------
    library : str
        The name of the library.
    metric : str
        The name of the metric.
    **kwargs
        Keyword arguments to update the metric settings.

    Raises
    ------
    KeyError
        If the specified library or metric is not found.
    """
    library_settings = getattr(self, library)
    if library_settings and metric in library_settings.root:
        metric_settings = library_settings.root[metric]
        for key, value in kwargs.items():
            if hasattr(metric_settings, key):
                setattr(metric_settings, key, value)
            else:
                logger.error(f"Invalid setting '{key}' for metric '{metric}'")
    else:
        logger.error(f"Metric '{metric}' not found in library '{library}'")
        raise KeyError(f"Metric '{metric}' not found in library '{library}'")

validate_library_settings classmethod

validate_library_settings(v)

Validate library settings.

Source code in soundscapy/audio/analysis_settings.py
114
115
116
117
118
119
120
@field_validator("*", mode="before")
@classmethod
def validate_library_settings(cls, v):
    """Validate library settings."""
    if isinstance(v, dict):
        return LibrarySettings(root=v)
    return v

ConfigManager

ConfigManager(config_path=None)

Manage configuration settings for audio analysis.

PARAMETER DESCRIPTION
default_config_path

Path to the default configuration file.

TYPE: str | Path | None

Source code in soundscapy/audio/analysis_settings.py
278
279
280
def __init__(self, config_path: str | Path | None = None):
    self.config_path = Path(config_path) if config_path else None
    self.current_config: AnalysisSettings | None = None

generate_minimal_config

generate_minimal_config()

Generate a minimal configuration containing only changes from the default.

RETURNS DESCRIPTION
dict

A dictionary containing the minimal configuration.

RAISES DESCRIPTION
ValueError

If no current configuration is loaded.

Source code in soundscapy/audio/analysis_settings.py
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
def generate_minimal_config(self) -> dict:
    """
    Generate a minimal configuration containing only changes from the default.

    Returns
    -------
    dict
        A dictionary containing the minimal configuration.

    Raises
    ------
    ValueError
        If no current configuration is loaded.
    """
    if not self.current_config:
        raise ValueError("No current configuration loaded.")
    default_config = AnalysisSettings.default()
    current_dict = self.current_config.model_dump()
    default_dict = default_config.model_dump()
    return self._get_diff(current_dict, default_dict)

load_config

load_config(config_path=None)

Load a configuration file or use the default configuration.

PARAMETER DESCRIPTION
config_path

Path to the configuration file. If None, uses the default configuration.

TYPE: str | Path | None DEFAULT: None

RETURNS DESCRIPTION
AnalysisSettings

The loaded configuration.

Source code in soundscapy/audio/analysis_settings.py
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
def load_config(self, config_path: str | Path | None = None) -> AnalysisSettings:
    """
    Load a configuration file or use the default configuration.

    Parameters
    ----------
    config_path : str | Path | None, optional
        Path to the configuration file. If None, uses the default configuration.

    Returns
    -------
    AnalysisSettings
        The loaded configuration.
    """
    if config_path:
        logger.info(f"Loading configuration from {config_path}")
        self.current_config = AnalysisSettings.from_yaml(config_path)
    elif self.config_path:
        logger.info(f"Loading configuration from {self.config_path}")
        self.current_config = AnalysisSettings.from_yaml(self.config_path)
    else:
        logger.info("Loading default configuration")
        self.current_config = AnalysisSettings.default()
    return self.current_config

merge_configs

merge_configs(override_config)

Merge the current configuration with override values and update the current_config.

PARAMETER DESCRIPTION
override_config

Dictionary containing override configuration values.

TYPE: dict

RETURNS DESCRIPTION
AnalysisSettings

The merged configuration.

RAISES DESCRIPTION
ValueError

If no base configuration is loaded.

Source code in soundscapy/audio/analysis_settings.py
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
def merge_configs(self, override_config: Dict) -> AnalysisSettings:
    """
    Merge the current configuration with override values and update the current_config.

    Parameters
    ----------
    override_config : dict
        Dictionary containing override configuration values.

    Returns
    -------
    AnalysisSettings
        The merged configuration.

    Raises
    ------
    ValueError
        If no base configuration is loaded.
    """
    if not self.current_config:
        logger.error("No base configuration loaded")
        raise ValueError("No base configuration loaded.")
    logger.info("Merging configurations")
    merged_dict = self.current_config.model_dump()
    self._deep_update(merged_dict, override_config)
    merged_config = AnalysisSettings(**merged_dict)
    self.current_config = merged_config  # Update the current_config
    return merged_config

save_config

save_config(filepath)

Save the current configuration to a file.

PARAMETER DESCRIPTION
filepath

Path to save the configuration file.

TYPE: str | Path

RAISES DESCRIPTION
ValueError

If no current configuration is loaded.

Source code in soundscapy/audio/analysis_settings.py
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
def save_config(self, filepath: str | Path) -> None:
    """
    Save the current configuration to a file.

    Parameters
    ----------
    filepath : str | Path
        Path to save the configuration file.

    Raises
    ------
    ValueError
        If no current configuration is loaded.
    """
    if self.current_config:
        logger.info(f"Saving configuration to {filepath}")
        self.current_config.to_yaml(filepath)
    else:
        logger.error("No current configuration to save")
        raise ValueError("No current configuration to save.")

LibrarySettings

Bases: RootModel

Settings for a library of metrics.

get_metric_settings

get_metric_settings(metric)

Get the settings for a specific metric.

PARAMETER DESCRIPTION
metric

The name of the metric.

TYPE: str

RETURNS DESCRIPTION
MetricSettings

The settings for the specified metric.

RAISES DESCRIPTION
KeyError

If the specified metric is not found.

Source code in soundscapy/audio/analysis_settings.py
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
def get_metric_settings(self, metric: str) -> MetricSettings:
    """
    Get the settings for a specific metric.

    Parameters
    ----------
    metric : str
        The name of the metric.

    Returns
    -------
    MetricSettings
        The settings for the specified metric.

    Raises
    ------
    KeyError
        If the specified metric is not found.
    """
    if metric in self.root:
        return self.root[metric]
    logger.error(f"Metric '{metric}' not found in library")
    raise KeyError(f"Metric '{metric}' not found in library")

MetricSettings

Bases: BaseModel

Settings for an individual metric.

PARAMETER DESCRIPTION
run

Whether to run this metric.

TYPE: bool

main

The main statistic to calculate.

TYPE: str | int | None

statistics

List of statistics to calculate.

TYPE: list[str | int] | None

channel

List of channels to analyze.

TYPE: list[str]

label

Label for the metric.

TYPE: str

parallel

Whether to run the metric in parallel.

TYPE: bool

func_args

Additional arguments for the metric function.

TYPE: dict[str, Any]

check_main_in_statistics classmethod

check_main_in_statistics(values)

Check that the main statistic is in the statistics list.

Source code in soundscapy/audio/analysis_settings.py
49
50
51
52
53
54
55
56
57
58
@model_validator(mode="before")
@classmethod
def check_main_in_statistics(cls, values):
    """Check that the main statistic is in the statistics list."""
    main = values.get("main")
    statistics = values.get("statistics", [])
    if main and main not in statistics:
        statistics.append(main)
        values["statistics"] = statistics
    return values

Binaural Metrics

soundscapy.audio.binaural

This module provides tools for working with binaural audio signals.

The main class, Binaural, extends the Signal class from the acoustics library to provide specialized functionality for binaural recordings. It supports various psychoacoustic metrics and analysis techniques using libraries such as mosqito, maad, and python-acoustics.

CLASS DESCRIPTION
Binaural : A class for processing and analyzing binaural audio signals.
Notes

This module requires the following external libraries: - acoustics - mosqito - maad - python-acoustics

Examples:

>>> # xdoctest: +SKIP
>>> from soundscapy.audio import Binaural
>>> signal = Binaural.from_wav("audio.wav")
>>> results = signal.process_all_metrics(analysis_settings)

Binaural

Bases: Signal

A class for processing and analyzing binaural audio signals.

This class extends the Signal class from the acoustics library to provide specialized functionality for binaural recordings. It supports various psychoacoustic metrics and analysis techniques using libraries such as mosqito, maad, and python-acoustics.

ATTRIBUTE DESCRIPTION
fs

Sampling frequency of the signal.

TYPE: float

recording

Name or identifier of the recording.

TYPE: str

Notes

This class only supports 2-channel (stereo) audio signals.

__array_finalize__

__array_finalize__(obj)

Finalize the new Binaural object.

This method is called for all new Binaural objects.

PARAMETER DESCRIPTION
obj

The object from which the new object was created.

TYPE: Binaural or None

Source code in soundscapy/audio/binaural.py
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
def __array_finalize__(self, obj):
    """
    Finalize the new Binaural object.

    This method is called for all new Binaural objects.

    Parameters
    ----------
    obj : Binaural or None
        The object from which the new object was created.
    """
    if obj is None:
        return
    self.fs = getattr(obj, "fs", None)
    self.recording = getattr(obj, "recording", None)

__new__

__new__(data, fs, recording='Rec')

Create a new Binaural object.

PARAMETER DESCRIPTION
data

The audio data.

TYPE: array_like

fs

Sampling frequency of the signal.

TYPE: float

recording

Name or identifier of the recording. Default is "Rec".

TYPE: str DEFAULT: 'Rec'

RETURNS DESCRIPTION
Binaural

A new Binaural object.

RAISES DESCRIPTION
ValueError

If the input signal is not 2-channel.

Source code in soundscapy/audio/binaural.py
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
def __new__(cls, data, fs, recording="Rec"):
    """
    Create a new Binaural object.

    Parameters
    ----------
    data : array_like
        The audio data.
    fs : float
        Sampling frequency of the signal.
    recording : str, optional
        Name or identifier of the recording. Default is "Rec".

    Returns
    -------
    Binaural
        A new Binaural object.

    Raises
    ------
    ValueError
        If the input signal is not 2-channel.
    """
    obj = super().__new__(cls, data, fs).view(cls)
    obj.recording = recording
    if obj.channels != 2:
        logger.error(
            f"Attempted to create Binaural object with {obj.channels} channels"
        )
        raise ValueError("Binaural class only supports 2 channels.")
    logger.debug(f"Created new Binaural object: {recording}, fs={fs}")
    return obj

calibrate_to

calibrate_to(decibel, inplace=False)

Calibrate the binaural signal to predefined Leq/dB levels.

This method allows calibration of both channels either to the same level or to different levels for each channel.

PARAMETER DESCRIPTION
decibel

Target calibration value(s) in dB (Leq). If a single value is provided, both channels will be calibrated to this level. If two values are provided, they will be applied to the left and right channels respectively.

TYPE: float or List[float] or Tuple[float, float]

inplace

If True, modify the signal in place. If False, return a new calibrated signal. Default is False.

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION
Binaural

Calibrated Binaural signal. If inplace is True, returns self.

RAISES DESCRIPTION
ValueError

If decibel is not a float, or a list/tuple of two floats.

Examples:

>>> # xdoctest: +SKIP
>>> signal = Binaural.from_wav("audio.wav")
>>> calibrated_signal = signal.calibrate_to([60, 62])  # Calibrate left channel to 60 dB and right to 62 dB
Source code in soundscapy/audio/binaural.py
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
def calibrate_to(
    self,
    decibel: Union[float, List[float], Tuple[float, float]],
    inplace: bool = False,
) -> "Binaural":
    """
    Calibrate the binaural signal to predefined Leq/dB levels.

    This method allows calibration of both channels either to the same level
    or to different levels for each channel.

    Parameters
    ----------
    decibel : float or List[float] or Tuple[float, float]
        Target calibration value(s) in dB (Leq).
        If a single value is provided, both channels will be calibrated to this level.
        If two values are provided, they will be applied to the left and right channels respectively.
    inplace : bool, optional
        If True, modify the signal in place. If False, return a new calibrated signal.
        Default is False.

    Returns
    -------
    Binaural
        Calibrated Binaural signal. If inplace is True, returns self.

    Raises
    ------
    ValueError
        If decibel is not a float, or a list/tuple of two floats.

    Examples
    --------
    >>> # xdoctest: +SKIP
    >>> signal = Binaural.from_wav("audio.wav")
    >>> calibrated_signal = signal.calibrate_to([60, 62])  # Calibrate left channel to 60 dB and right to 62 dB
    """
    logger.info(f"Calibrating Binaural signal to {decibel} dB")
    if isinstance(decibel, (np.ndarray, pd.Series)):  # Force into tuple
        decibel = tuple(decibel)
    if isinstance(decibel, (list, tuple)):
        if len(decibel) == 2:  # Per-channel calibration (recommended)
            logger.debug(
                f"Calibrating channels separately: Left={decibel[0]}dB, Right={decibel[1]}dB"
            )
            decibel = np.array(decibel)
            decibel = decibel[..., None]
            return super().calibrate_to(decibel, inplace)
        elif (
            len(decibel) == 1
        ):  # if one value given in tuple, assume same for both channels
            logger.debug(f"Calibrating both channels to {decibel[0]}dB")
            decibel = decibel[0]
        else:
            logger.error(f"Invalid calibration value: {decibel}")
            raise ValueError(
                "decibel must either be a single value or a 2 value tuple"
            )
    if isinstance(decibel, (int, float)):  # Calibrate both channels to same value
        logger.debug(f"Calibrating both channels to {decibel}dB")
        return super().calibrate_to(decibel, inplace)
    else:
        logger.error(f"Invalid calibration value: {decibel}")
        raise ValueError("decibel must be a single value or a 2 value tuple")

from_wav classmethod

from_wav(filename, calibrate_to=None, normalize=False, resample=None)

Load a wav file and return a Binaural object.

Overrides the Signal.from_wav method to return a Binaural object instead of a Signal object.

PARAMETER DESCRIPTION
filename

Filename of wav file to load.

TYPE: Path or str

calibrate_to

Value(s) to calibrate to in dB (Leq). Can also handle np.ndarray and pd.Series of length 2. If only one value is passed, will calibrate both channels to the same value.

TYPE: float or List or Tuple DEFAULT: None

normalize

Whether to normalize the signal. Default is False.

TYPE: bool DEFAULT: False

resample

New sampling frequency to resample the signal to. Default is None

TYPE: int DEFAULT: None

RETURNS DESCRIPTION
Binaural

Binaural signal object of wav recording.

See Also

acoustics.Signal.from_wav : Base method for loading wav files.

Source code in soundscapy/audio/binaural.py
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
@classmethod
def from_wav(
    cls,
    filename: Union[Path, str],
    calibrate_to: Optional[Union[float, List, Tuple]] = None,
    normalize: bool = False,
    resample: Optional[int] = None,
) -> "Binaural":
    """
    Load a wav file and return a Binaural object.

    Overrides the Signal.from_wav method to return a
    Binaural object instead of a Signal object.

    Parameters
    ----------
    filename : Path or str
        Filename of wav file to load.
    calibrate_to : float or List or Tuple, optional
        Value(s) to calibrate to in dB (Leq).
        Can also handle np.ndarray and pd.Series of length 2.
        If only one value is passed, will calibrate both channels to the same value.
    normalize : bool, optional
        Whether to normalize the signal. Default is False.
    resample : int, optional
        New sampling frequency to resample the signal to. Default is None

    Returns
    -------
    Binaural
        Binaural signal object of wav recording.

    See Also
    --------
    acoustics.Signal.from_wav : Base method for loading wav files.
    """
    logger.info(f"Loading WAV file: {filename}")
    s = super().from_wav(filename, normalize)
    b = cls(s, s.fs, recording=Path(filename).stem)
    if calibrate_to is not None:
        logger.info(f"Calibrating loaded signal to {calibrate_to} dB")
        b.calibrate_to(calibrate_to, inplace=True)
    if resample is not None:
        logger.debug(f"Resampling loaded signal to {resample} Hz")
        b = b.fs_resample(resample)
    return b

fs_resample

fs_resample(fs)

Resample the signal to a new sampling frequency.

PARAMETER DESCRIPTION
fs

New sampling frequency.

TYPE: float

RETURNS DESCRIPTION
Binaural

Resampled Binaural signal. If inplace is True, returns self.

See Also

acoustics.Signal.resample : Base method for resampling signals.

Source code in soundscapy/audio/binaural.py
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
def fs_resample(
    self,
    fs: float,
) -> "Binaural":
    """
    Resample the signal to a new sampling frequency.

    Parameters
    ----------
    fs : float
        New sampling frequency.


    Returns
    -------
    Binaural
        Resampled Binaural signal. If inplace is True, returns self.

    See Also
    --------
    acoustics.Signal.resample : Base method for resampling signals.
    """
    if fs == self.fs:
        logger.info(f"Signal already at {fs} Hz. No resampling needed.")
        return self
    logger.info(f"Resampling signal to {fs} Hz")
    resampled_channels = [
        scipy.signal.resample(channel, int(fs * len(channel) / self.fs))
        for channel in self
    ]
    resampled_channels = np.stack(resampled_channels)
    resampled_b = Binaural(resampled_channels, fs, recording=self.recording)
    return resampled_b

maad_metric

maad_metric(metric, channel=('Left', 'Right'), as_df=True, metric_settings=None, func_args={})

Run a metric from the scikit-maad library.

Currently only supports running all of the alpha indices at once.

PARAMETER DESCRIPTION
metric

The metric to run.

TYPE: (all_temporal_alpha_indices, all_spectral_alpha_indices) DEFAULT: "all_temporal_alpha_indices"

channel

Which channels to process. Default is ("Left", "Right").

TYPE: (tuple, list or str) DEFAULT: ('Left', 'Right')

as_df

Whether to return a dataframe or not. Default is True. If True, returns a MultiIndex Dataframe with ("Recording", "Channel") as the index.

TYPE: bool DEFAULT: True

metric_settings

Settings for metric analysis. Default is None.

TYPE: MetricSettings DEFAULT: None

func_args

Additional arguments to pass to the underlying scikit-maad method.

TYPE: dict DEFAULT: {}

RETURNS DESCRIPTION
dict or DataFrame

Dictionary of results if as_df is False, otherwise a pandas DataFrame.

RAISES DESCRIPTION
ValueError

If metric name is not recognised.

See Also

metrics.maad_metric_1ch metrics.maad_metric_2ch

Source code in soundscapy/audio/binaural.py
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
def maad_metric(
    self,
    metric: str,
    channel: Union[int, Tuple, List, str] = ("Left", "Right"),
    as_df: bool = True,
    metric_settings: Optional[MetricSettings] = None,
    func_args: Dict = {},
) -> Union[Dict, pd.DataFrame]:
    """
    Run a metric from the scikit-maad library.

    Currently only supports running all of the alpha indices at once.

    Parameters
    ----------
    metric : {"all_temporal_alpha_indices", "all_spectral_alpha_indices"}
        The metric to run.
    channel : tuple, list or str, optional
        Which channels to process. Default is ("Left", "Right").
    as_df : bool, optional
        Whether to return a dataframe or not. Default is True.
        If True, returns a MultiIndex Dataframe with ("Recording", "Channel") as the index.
    metric_settings : MetricSettings, optional
        Settings for metric analysis. Default is None.
    func_args : dict, optional
        Additional arguments to pass to the underlying scikit-maad method.

    Returns
    -------
    dict or pd.DataFrame
        Dictionary of results if as_df is False, otherwise a pandas DataFrame.

    Raises
    ------
    ValueError
        If metric name is not recognised.

    See Also
    --------
    metrics.maad_metric_1ch
    metrics.maad_metric_2ch
    """
    logger.info(f"Running maad metric: {metric}")
    if metric_settings:
        logger.debug("Using provided analysis settings")
        if metric not in {
            "all_temporal_alpha_indices",
            "all_spectral_alpha_indices",
        }:
            logger.error(f"Invalid maad metric: {metric}")
            raise ValueError(f"Metric {metric} not recognised")

        if not metric_settings.run:
            logger.info(f"Metric {metric} is disabled in analysis settings")
            return None

        channel = metric_settings.channel
    channel = ("Left", "Right") if channel is None else channel
    s = self._get_channel(channel)
    if s.channels == 1:
        logger.debug("Processing single channel")
        return maad_metric_1ch(s, metric, as_df)
    else:
        logger.debug("Processing both channels")
        return maad_metric_2ch(s, metric, channel, as_df, func_args)

mosqito_metric

mosqito_metric(metric, statistics=(5, 10, 50, 90, 95, 'avg', 'max', 'min', 'kurt', 'skew'), label=None, channel=('Left', 'Right'), as_df=True, return_time_series=False, parallel=True, metric_settings=None, func_args={})

Run a metric from the mosqito library.

PARAMETER DESCRIPTION
metric

Metric to run from mosqito library.

TYPE: (loudness_zwtv, sharpness_din_from_loudness, sharpness_din_perseg, sharpness_tv, roughness_dw) DEFAULT: "loudness_zwtv"

statistics

List of level statistics to calculate (e.g. L_5, L_90, etc.). Default is (5, 10, 50, 90, 95, "avg", "max", "min", "kurt", "skew").

TYPE: tuple or list DEFAULT: (5, 10, 50, 90, 95, 'avg', 'max', 'min', 'kurt', 'skew')

label

Label to use for the metric. If None, will pull from default label for that metric.

TYPE: str DEFAULT: None

channel

Which channels to process. Default is ("Left", "Right").

TYPE: tuple or list of str or str DEFAULT: ('Left', 'Right')

as_df

Whether to return a dataframe or not. Default is True. If True, returns a MultiIndex Dataframe with ("Recording", "Channel") as the index.

TYPE: bool DEFAULT: True

return_time_series

Whether to return the time series of the metric. Default is False. Cannot return time series if as_df is True.

TYPE: bool DEFAULT: False

parallel

Whether to run the channels in parallel. Default is True. If False, will run each channel sequentially.

TYPE: bool DEFAULT: True

metric_settings

Settings for metric analysis. Default is None.

TYPE: MetricSettings DEFAULT: None

func_args

Any settings given here will override those in the other options. Can pass any args or *kwargs to the underlying python acoustics method.

TYPE: dict DEFAULT: {}

RETURNS DESCRIPTION
dict or DataFrame

Dictionary of results if as_df is False, otherwise a pandas DataFrame.

See Also

binaural.mosqito_metric_2ch : Method for running metrics on 2 channels. binaural.mosqito_metric_1ch : Method for running metrics on 1 channel.

Source code in soundscapy/audio/binaural.py
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
def mosqito_metric(
    self,
    metric: str,
    statistics: Union[Tuple, List] = (
        5,
        10,
        50,
        90,
        95,
        "avg",
        "max",
        "min",
        "kurt",
        "skew",
    ),
    label: Optional[str] = None,
    channel: Union[int, Tuple, List, str] = ("Left", "Right"),
    as_df: bool = True,
    return_time_series: bool = False,
    parallel: bool = True,
    metric_settings: Optional[MetricSettings] = None,
    func_args: Dict = {},
) -> Union[Dict, pd.DataFrame]:
    """
    Run a metric from the mosqito library.

    Parameters
    ----------
    metric : {"loudness_zwtv", "sharpness_din_from_loudness", "sharpness_din_perseg", "sharpness_tv", "roughness_dw"}
        Metric to run from mosqito library.
    statistics : tuple or list, optional
        List of level statistics to calculate (e.g. L_5, L_90, etc.).
        Default is (5, 10, 50, 90, 95, "avg", "max", "min", "kurt", "skew").
    label : str, optional
        Label to use for the metric. If None, will pull from default label for that metric.
    channel : tuple or list of str or str, optional
        Which channels to process. Default is ("Left", "Right").
    as_df : bool, optional
        Whether to return a dataframe or not. Default is True.
        If True, returns a MultiIndex Dataframe with ("Recording", "Channel") as the index.
    return_time_series : bool, optional
        Whether to return the time series of the metric. Default is False.
        Cannot return time series if as_df is True.
    parallel : bool, optional
        Whether to run the channels in parallel. Default is True.
        If False, will run each channel sequentially.
    metric_settings : MetricSettings, optional
        Settings for metric analysis. Default is None.
    func_args : dict, optional
        Any settings given here will override those in the other options.
        Can pass any *args or **kwargs to the underlying python acoustics method.

    Returns
    -------
    dict or pd.DataFrame
        Dictionary of results if as_df is False, otherwise a pandas DataFrame.

    See Also
    --------
    binaural.mosqito_metric_2ch : Method for running metrics on 2 channels.
    binaural.mosqito_metric_1ch : Method for running metrics on 1 channel.
    """
    logger.info(f"Running mosqito metric: {metric}")
    if metric_settings:
        logger.debug("Using provided analysis settings")
        if not metric_settings.run:
            logger.info(f"Metric {metric} is disabled in analysis settings")
            return None

        channel = metric_settings.channel
        statistics = metric_settings.statistics
        label = metric_settings.label
        parallel = metric_settings.parallel
        func_args = metric_settings.func_args

    channel = ("Left", "Right") if channel is None else channel
    s = self._get_channel(channel)

    if s.channels == 1:
        logger.debug("Processing single channel")
        return mosqito_metric_1ch(
            s, metric, statistics, label, as_df, return_time_series, func_args
        )
    else:
        logger.debug("Processing both channels")
        return mosqito_metric_2ch(
            s,
            metric,
            statistics,
            label,
            channel,
            as_df,
            return_time_series,
            parallel,
            func_args,
        )

process_all_metrics

process_all_metrics(analysis_settings, parallel=True)

Process all metrics specified in the analysis settings.

This method runs all enabled metrics from the provided AnalysisSettings object and compiles the results into a single DataFrame.

PARAMETER DESCRIPTION
analysis_settings

Configuration object specifying which metrics to run and their parameters.

TYPE: AnalysisSettings

parallel

Whether to run calculations in parallel where possible. Default is True.

TYPE: bool DEFAULT: True

RETURNS DESCRIPTION
DataFrame

A MultiIndex DataFrame containing the results of all processed metrics. The index includes "Recording" and "Channel" levels.

Notes

The parallel option primarily affects the MoSQITo metrics. Other metrics may not benefit from parallelization.

Examples:

>>> # xdoctest: +SKIP
>>> signal = Binaural.from_wav("audio.wav")
>>> settings = AnalysisSettings.from_yaml("settings.yaml")
>>> results = signal.process_all_metrics(settings)
Source code in soundscapy/audio/binaural.py
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
def process_all_metrics(
    self, analysis_settings: AnalysisSettings, parallel: bool = True
) -> pd.DataFrame:
    """
    Process all metrics specified in the analysis settings.

    This method runs all enabled metrics from the provided AnalysisSettings object
    and compiles the results into a single DataFrame.

    Parameters
    ----------
    analysis_settings : AnalysisSettings
        Configuration object specifying which metrics to run and their parameters.
    parallel : bool, optional
        Whether to run calculations in parallel where possible. Default is True.

    Returns
    -------
    pd.DataFrame
        A MultiIndex DataFrame containing the results of all processed metrics.
        The index includes "Recording" and "Channel" levels.

    Notes
    -----
    The parallel option primarily affects the MoSQITo metrics. Other metrics may not benefit from parallelization.

    Examples
    --------
    >>> # xdoctest: +SKIP
    >>> signal = Binaural.from_wav("audio.wav")
    >>> settings = AnalysisSettings.from_yaml("settings.yaml")
    >>> results = signal.process_all_metrics(settings)
    """
    logger.info(f"Processing all metrics for {self.recording}")
    logger.debug(f"Parallel processing: {parallel}")
    return process_all_metrics(self, analysis_settings, parallel)

pyacoustics_metric

pyacoustics_metric(metric, statistics=(5, 10, 50, 90, 95, 'avg', 'max', 'min', 'kurt', 'skew'), label=None, channel=('Left', 'Right'), as_df=True, return_time_series=False, metric_settings=None, func_args={})

Run a metric from the python acoustics library.

PARAMETER DESCRIPTION
metric

The metric to run.

TYPE: (LZeq, Leq, LAeq, LCeq, SEL) DEFAULT: "LZeq"

statistics

List of level statistics to calculate (e.g. L_5, L_90, etc.). Default is (5, 10, 50, 90, 95, "avg", "max", "min", "kurt", "skew").

TYPE: tuple or list DEFAULT: (5, 10, 50, 90, 95, 'avg', 'max', 'min', 'kurt', 'skew')

label

Label to use for the metric. If None, will pull from default label for that metric.

TYPE: str DEFAULT: None

channel

Which channels to process. Default is ("Left", "Right").

TYPE: tuple, list, or str DEFAULT: ('Left', 'Right')

as_df

Whether to return a dataframe or not. Default is True. If True, returns a MultiIndex Dataframe with ("Recording", "Channel") as the index.

TYPE: bool DEFAULT: True

return_time_series

Whether to return the time series of the metric. Default is False. Cannot return time series if as_df is True.

TYPE: bool DEFAULT: False

metric_settings

Settings for metric analysis. Default is None.

TYPE: MetricSettings DEFAULT: None

func_args

Any settings given here will override those in the other options. Can pass any args or *kwargs to the underlying python acoustics method.

TYPE: dict DEFAULT: {}

RETURNS DESCRIPTION
dict or DataFrame

Dictionary of results if as_df is False, otherwise a pandas DataFrame.

See Also

metrics.pyacoustics_metric acoustics.standards_iso_tr_25417_2007.equivalent_sound_pressure_level : Base method for Leq calculation. acoustics.standards.iec_61672_1_2013.sound_exposure_level : Base method for SEL calculation. acoustics.standards.iec_61672_1_2013.time_weighted_sound_level : Base method for Leq level time series calculation.

Source code in soundscapy/audio/binaural.py
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
def pyacoustics_metric(
    self,
    metric: str,
    statistics: Union[Tuple, List] = (
        5,
        10,
        50,
        90,
        95,
        "avg",
        "max",
        "min",
        "kurt",
        "skew",
    ),
    label: Optional[str] = None,
    channel: Union[str, int, List, Tuple] = ("Left", "Right"),
    as_df: bool = True,
    return_time_series: bool = False,
    metric_settings: Optional[MetricSettings] = None,
    func_args: Dict = {},
) -> Union[Dict, pd.DataFrame]:
    """
    Run a metric from the python acoustics library.

    Parameters
    ----------
    metric : {"LZeq", "Leq", "LAeq", "LCeq", "SEL"}
        The metric to run.
    statistics : tuple or list, optional
        List of level statistics to calculate (e.g. L_5, L_90, etc.).
        Default is (5, 10, 50, 90, 95, "avg", "max", "min", "kurt", "skew").
    label : str, optional
        Label to use for the metric. If None, will pull from default label for that metric.
    channel : tuple, list, or str, optional
        Which channels to process. Default is ("Left", "Right").
    as_df : bool, optional
        Whether to return a dataframe or not. Default is True.
        If True, returns a MultiIndex Dataframe with ("Recording", "Channel") as the index.
    return_time_series : bool, optional
        Whether to return the time series of the metric. Default is False.
        Cannot return time series if as_df is True.
    metric_settings : MetricSettings, optional
        Settings for metric analysis. Default is None.
    func_args : dict, optional
        Any settings given here will override those in the other options.
        Can pass any *args or **kwargs to the underlying python acoustics method.

    Returns
    -------
    dict or pd.DataFrame
        Dictionary of results if as_df is False, otherwise a pandas DataFrame.

    See Also
    --------
    metrics.pyacoustics_metric
    acoustics.standards_iso_tr_25417_2007.equivalent_sound_pressure_level : Base method for Leq calculation.
    acoustics.standards.iec_61672_1_2013.sound_exposure_level : Base method for SEL calculation.
    acoustics.standards.iec_61672_1_2013.time_weighted_sound_level : Base method for Leq level time series calculation.
    """
    if metric_settings:
        logger.debug("Using provided analysis settings")
        if not metric_settings.run:
            logger.info(f"Metric {metric} is disabled in analysis settings")
            return None

        channel = metric_settings.channel
        statistics = metric_settings.statistics
        label = metric_settings.label
        func_args = metric_settings.func_args

    channel = ("Left", "Right") if channel is None else channel
    s = self._get_channel(channel)

    if s.channels == 1:
        logger.debug("Processing single channel")
        return pyacoustics_metric_1ch(
            s, metric, statistics, label, as_df, return_time_series, func_args
        )
    else:
        logger.debug("Processing both channels")
        return pyacoustics_metric_2ch(
            s,
            metric,
            statistics,
            label,
            channel,
            as_df,
            return_time_series,
            func_args,
        )

soundscapy.audio.metrics

This module provides functions for calculating various acoustic and psychoacoustic metrics for audio signals. It includes implementations for single-channel and two-channel signals, as well as wrapper functions for different libraries such as python-acoustics, MoSQITo, and scikit-maad.

FUNCTION DESCRIPTION
_stat_calcs : Calculate various statistics for a time series array.
mosqito_metric_1ch : Calculate a MoSQITo psychoacoustic metric for a single channel signal.
maad_metric_1ch : Run a metric from the scikit-maad library on a single channel signal.
pyacoustics_metric_1ch : Run a metric from the pyacoustics library on a single channel object.
pyacoustics_metric_2ch : Run a metric from the python acoustics library on a Binaural object.
mosqito_metric_2ch : Calculate metrics from MoSQITo for a two-channel signal.
maad_metric_2ch : Run a metric from the scikit-maad library on a binaural signal.
prep_multiindex_df : Prepare a MultiIndex dataframe from a dictionary of results.
add_results : Add results to a MultiIndex dataframe.
process_all_metrics : Process all metrics specified in the analysis settings for a binaural signal.
Notes

This module relies on external libraries such as numpy, pandas, maad, mosqito, and scipy. Ensure these dependencies are installed before using this module.

add_results

add_results(results_df, metric_results)

Add results to MultiIndex dataframe.

PARAMETER DESCRIPTION
results_df

MultiIndex dataframe to add results to.

TYPE: DataFrame

metric_results

MultiIndex dataframe of results to add.

TYPE: DataFrame

RETURNS DESCRIPTION
DataFrame

Index includes "Recording" and "Channel" with a column for each index.

RAISES DESCRIPTION
ValueError

If the input DataFrames are not in the expected format.

Source code in soundscapy/audio/metrics.py
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
def add_results(results_df: pd.DataFrame, metric_results: pd.DataFrame):
    """
    Add results to MultiIndex dataframe.

    Parameters
    ----------
    results_df : pd.DataFrame
        MultiIndex dataframe to add results to.
    metric_results : pd.DataFrame
        MultiIndex dataframe of results to add.

    Returns
    -------
    pd.DataFrame
        Index includes "Recording" and "Channel" with a column for each index.

    Raises
    ------
    ValueError
        If the input DataFrames are not in the expected format.
    """
    logger.info("Adding results to MultiIndex DataFrame")
    try:
        # TODO: Add check for whether all of the recordings have rows in the dataframe
        # If not, add new rows first

        if not set(metric_results.columns).issubset(set(results_df.columns)):
            # Check if results_df already has the columns in results
            results_df = results_df.join(metric_results)
        else:
            results_df.update(metric_results, errors="ignore")
        logger.debug("Results added successfully")
        return results_df
    except Exception as e:
        logger.error(f"Error adding results to DataFrame: {str(e)}")
        raise ValueError("Invalid input DataFrame format") from e

maad_metric_1ch

maad_metric_1ch(s, metric, as_df=False, func_args={})

Run a metric from the scikit-maad library (or suite of indices) on a single channel signal.

Currently only supports running all of the alpha indices at once.

PARAMETER DESCRIPTION
s

Single channel signal to calculate the alpha indices for.

TYPE: Signal or Binaural (single channel)

metric

Metric to calculate.

TYPE: (all_temporal_alpha_indices, all_spectral_alpha_indices) DEFAULT: "all_temporal_alpha_indices"

as_df

Whether to return a pandas DataFrame, by default False. If True, returns a MultiIndex Dataframe with ("Recording", "Channel") as the index.

TYPE: bool DEFAULT: False

func_args

Additional keyword arguments to pass to the metric function, by default {}.

TYPE: dict DEFAULT: {}

RETURNS DESCRIPTION
dict or DataFrame

Dictionary of results if as_df is False, otherwise a pandas DataFrame.

RAISES DESCRIPTION
ValueError

If the signal is not single-channel or if an unrecognized metric is specified.

See Also

maad.features.all_spectral_alpha_indices maad.features.all_temporal_alpha_indices

Source code in soundscapy/audio/metrics.py
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
def maad_metric_1ch(s, metric: str, as_df: bool = False, func_args={}):
    """
    Run a metric from the scikit-maad library (or suite of indices) on a single channel signal.

    Currently only supports running all of the alpha indices at once.

    Parameters
    ----------
    s : Signal or Binaural (single channel)
        Single channel signal to calculate the alpha indices for.
    metric : {"all_temporal_alpha_indices", "all_spectral_alpha_indices"}
        Metric to calculate.
    as_df : bool, optional
        Whether to return a pandas DataFrame, by default False.
        If True, returns a MultiIndex Dataframe with ("Recording", "Channel") as the index.
    func_args : dict, optional
        Additional keyword arguments to pass to the metric function, by default {}.

    Returns
    -------
    dict or pd.DataFrame
        Dictionary of results if as_df is False, otherwise a pandas DataFrame.

    Raises
    ------
    ValueError
        If the signal is not single-channel or if an unrecognized metric is specified.

    See Also
    --------
    maad.features.all_spectral_alpha_indices
    maad.features.all_temporal_alpha_indices
    """
    logger.debug(f"Calculating MAAD metric: {metric}")

    # Checks and status
    if s.channels != 1:
        logger.error("Signal must be single channel")
        raise ValueError("Signal must be single channel")

    logger.debug(f"Calculating scikit-maad {metric}")

    # Start the calc
    try:
        if metric == "all_spectral_alpha_indices":
            Sxx, tn, fn, ext = spectrogram(s, s.fs, **func_args)
            res = all_spectral_alpha_indices(Sxx, tn, fn, extent=ext, **func_args)[0]
        elif metric == "all_temporal_alpha_indices":
            res = all_temporal_alpha_indices(s, s.fs, **func_args)
        else:
            logger.error(f"Metric {metric} not recognized")
            raise ValueError(f"Metric {metric} not recognized.")
    except Exception as e:
        logger.error(f"Error calculating {metric}: {str(e)}")
        raise

    if not as_df:
        return res.to_dict("records")[0]
    try:
        res["Recording"] = s.recording
        res.set_index(["Recording"], inplace=True)
        return res
    except AttributeError:
        return res

maad_metric_2ch

maad_metric_2ch(b, metric, channel_names=('Left', 'Right'), as_df=False, func_args={})

Run a metric from the scikit-maad library (or suite of indices) on a binaural signal.

Currently only supports running all the alpha indices at once.

PARAMETER DESCRIPTION
b

Binaural signal to calculate the alpha indices for.

TYPE: Binaural

metric

Metric to calculate.

TYPE: (all_temporal_alpha_indices, all_spectral_alpha_indices) DEFAULT: "all_temporal_alpha_indices"

channel_names

Custom names for the channels, by default ("Left", "Right").

TYPE: tuple of str DEFAULT: ('Left', 'Right')

as_df

Whether to return a pandas DataFrame, by default False. If True, returns a MultiIndex Dataframe with ("Recording", "Channel") as the index.

TYPE: bool DEFAULT: False

func_args

Additional arguments to pass to the metric function, by default {}.

TYPE: dict DEFAULT: {}

RETURNS DESCRIPTION
dict or DataFrame

Dictionary of results if as_df is False, otherwise a pandas DataFrame.

RAISES DESCRIPTION
ValueError

If the input signal is not 2-channel or if an unrecognized metric is specified.

See Also

scikit-maad library maad_metric_1ch

Source code in soundscapy/audio/metrics.py
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
def maad_metric_2ch(
    b,
    metric: str,
    channel_names: Tuple[str, str] = ("Left", "Right"),
    as_df: bool = False,
    func_args={},
):
    """
    Run a metric from the scikit-maad library (or suite of indices) on a binaural signal.

    Currently only supports running all the alpha indices at once.

    Parameters
    ----------
    b : Binaural
        Binaural signal to calculate the alpha indices for.
    metric : {"all_temporal_alpha_indices", "all_spectral_alpha_indices"}
        Metric to calculate.
    channel_names : tuple of str, optional
        Custom names for the channels, by default ("Left", "Right").
    as_df : bool, optional
        Whether to return a pandas DataFrame, by default False.
        If True, returns a MultiIndex Dataframe with ("Recording", "Channel") as the index.
    func_args : dict, optional
        Additional arguments to pass to the metric function, by default {}.

    Returns
    -------
    dict or pd.DataFrame
        Dictionary of results if as_df is False, otherwise a pandas DataFrame.

    Raises
    ------
    ValueError
        If the input signal is not 2-channel or if an unrecognized metric is specified.

    See Also
    --------
    scikit-maad library
    maad_metric_1ch
    """
    logger.debug(f"Calculating MAAD metric for 2 channels: {metric}")

    if b.channels != 2:
        logger.error("Must be 2 channel signal. Use `maad_metric_1ch` instead.")
        raise ValueError("Must be 2 channel signal. Use `maad_metric_1ch` instead.")

    logger.debug(f"Calculating scikit-maad {metric}")

    try:
        res_l = maad_metric_1ch(b[0], metric, as_df=False)
        res_r = maad_metric_1ch(b[1], metric, as_df=False)
        res = {channel_names[0]: res_l, channel_names[1]: res_r}
    except Exception as e:
        logger.error(f"Error calculating MAAD metric {metric} for 2 channels: {str(e)}")
        raise

    if not as_df:
        return res
    try:
        rec = b.recording
    except AttributeError:
        rec = 0
    df = pd.DataFrame.from_dict(res, orient="index")
    df["Recording"] = rec
    df["Channel"] = df.index
    df.set_index(["Recording", "Channel"], inplace=True)
    return df

mosqito_metric_1ch

mosqito_metric_1ch(s, metric, statistics=(5, 10, 50, 90, 95, 'avg', 'max', 'min', 'kurt', 'skew'), label=None, as_df=False, return_time_series=False, func_args={})

Calculate a MoSQITo psychoacoustic metric for a single channel signal.

PARAMETER DESCRIPTION
s

Single channel signal object to analyze.

TYPE: Signal

metric

Name of the metric to calculate. Options are "loudness_zwtv", "roughness_dw", "sharpness_din_from_loudness", "sharpness_din_perseg", or "sharpness_din_tv".

TYPE: str

statistics

Statistics to calculate on the metric results.

TYPE: Tuple[Union[int, str], ...] DEFAULT: (5, 10, 50, 90, 95, 'avg', 'max', 'min', 'kurt', 'skew')

label

Label to use for the metric in the results. If None, uses a default label.

TYPE: str DEFAULT: None

as_df

If True, return results as a pandas DataFrame. Otherwise, return a dictionary.

TYPE: bool DEFAULT: False

return_time_series

If True, include the full time series in the results.

TYPE: bool DEFAULT: False

func_args

Additional arguments to pass to the underlying MoSQITo function.

TYPE: dict DEFAULT: {}

RETURNS DESCRIPTION
Union[dict, DataFrame]

Results of the metric calculation and statistics.

RAISES DESCRIPTION
ValueError

If the input signal is not single-channel or if an unrecognized metric is specified.

Examples:

>>> # xdoctest: +SKIP
>>> from soundscapy.audio import Binaural
>>> signal = Binaural.from_wav("audio.wav", resample=480000)
>>> results = mosqito_metric_1ch(signal[0], "loudness_zwtv", as_df=True)
Source code in soundscapy/audio/metrics.py
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
def mosqito_metric_1ch(
    s,
    metric: str,
    statistics: Tuple[Union[int, str]] = (
        5,
        10,
        50,
        90,
        95,
        "avg",
        "max",
        "min",
        "kurt",
        "skew",
    ),
    label: Optional[str] = None,
    as_df: bool = False,
    return_time_series: bool = False,
    func_args: Dict = {},
) -> Union[Dict, pd.DataFrame]:
    """
    Calculate a MoSQITo psychoacoustic metric for a single channel signal.

    Parameters
    ----------
    s : Signal
        Single channel signal object to analyze.
    metric : str
        Name of the metric to calculate. Options are "loudness_zwtv",
        "roughness_dw", "sharpness_din_from_loudness", "sharpness_din_perseg",
        or "sharpness_din_tv".
    statistics : Tuple[Union[int, str], ...], optional
        Statistics to calculate on the metric results.
    label : str, optional
        Label to use for the metric in the results. If None, uses a default label.
    as_df : bool, optional
        If True, return results as a pandas DataFrame. Otherwise, return a dictionary.
    return_time_series : bool, optional
        If True, include the full time series in the results.
    func_args : dict, optional
        Additional arguments to pass to the underlying MoSQITo function.

    Returns
    -------
    Union[dict, pd.DataFrame]
        Results of the metric calculation and statistics.

    Raises
    ------
    ValueError
        If the input signal is not single-channel or if an unrecognized metric is specified.

    Examples
    --------
    >>> # xdoctest: +SKIP
    >>> from soundscapy.audio import Binaural
    >>> signal = Binaural.from_wav("audio.wav", resample=480000)
    >>> results = mosqito_metric_1ch(signal[0], "loudness_zwtv", as_df=True)
    """
    logger.debug(f"Calculating MoSQITo metric: {metric}")

    # Checks and warnings
    if s.channels != 1:
        logger.error("Signal must be single channel")
        raise ValueError("Signal must be single channel")
    try:
        label = label or DEFAULT_LABELS[metric]
    except KeyError as e:
        logger.error(f"Metric {metric} not recognized")
        raise ValueError(f"Metric {metric} not recognized.") from e
    if as_df and return_time_series:
        logger.warning(
            "Cannot return both a dataframe and time series. Returning dataframe only."
        )
        return_time_series = False

    # Start the calc
    res = {}
    try:
        if metric == "loudness_zwtv":
            N, N_spec, bark_axis, time_axis = loudness_zwtv(s, s.fs, **func_args)
            res = _stat_calcs(label, N, res, statistics)
            if return_time_series:
                res[f"{label}_ts"] = (time_axis, N)
        elif metric == "roughness_dw":
            R, R_spec, bark_axis, time_axis = roughness_dw(s, s.fs, **func_args)
            res = _stat_calcs(label, R, res, statistics)
            if return_time_series:
                res[f"{label}_ts"] = (time_axis, R)
        elif metric == "sharpness_din_from_loudness":
            field_type = func_args.get("field_type", "free")
            N, N_spec, bark_axis, time_axis = loudness_zwtv(
                s, s.fs, field_type=field_type
            )
            res = _stat_calcs("N", N, res, statistics)
            if return_time_series:
                res["N_ts"] = time_axis, N

            func_args.pop("field_type", None)
            S = sharpness_din_from_loudness(N, N_spec, **func_args)
            res = _stat_calcs(label, S, res, statistics)
            if return_time_series:
                res[f"{label}_ts"] = (time_axis, S)
        elif metric == "sharpness_din_perseg":
            S, time_axis = sharpness_din_perseg(s, s.fs, **func_args)
            res = _stat_calcs(label, S, res, statistics)
            if return_time_series:
                res[f"{label}_ts"] = (time_axis, S)
        elif metric == "sharpness_din_tv":
            S, time_axis = sharpness_din_tv(s, s.fs, **func_args)
            res = _stat_calcs(label, S, res, statistics)
            if return_time_series:
                res[f"{label}_ts"] = (time_axis, S)
        else:
            logger.error(f"Metric {metric} not recognized")
            raise ValueError(f"Metric {metric} not recognized.")
    except Exception as e:
        logger.error(f"Error calculating {metric}: {str(e)}")
        raise

    # Return the results in the requested format
    if not as_df:
        return res
    try:
        rec = s.recording
        return pd.DataFrame(res, index=[rec])
    except AttributeError:
        return pd.DataFrame(res, index=[0])

mosqito_metric_2ch

mosqito_metric_2ch(b, metric, statistics=(5, 10, 50, 90, 95, 'avg', 'max', 'min', 'kurt', 'skew'), label=None, channel_names=('Left', 'Right'), as_df=False, return_time_series=False, parallel=True, func_args={})

Calculate metrics from MoSQITo for a two-channel signal with optional parallel processing.

PARAMETER DESCRIPTION
b

Binaural signal to calculate the sound quality indices for.

TYPE: Binaural

metric

TYPE: {"loudness_zwtv", "sharpness_din_from_loudness", "sharpness_din_perseg",

statistics

List of level statistics to calculate (e.g. L_5, L_90, etc.).

TYPE: tuple or list DEFAULT: (5, 10, 50, 90, 95, 'avg', 'max', 'min', 'kurt', 'skew')

label

Label to use for the metric in the results dictionary. If None, will pull from default label for that metric given in DEFAULT_LABELS.

TYPE: str DEFAULT: None

channel_names

Custom names for the channels, by default ("Left", "Right").

TYPE: tuple of str DEFAULT: ('Left', 'Right')

as_df

Whether to return a pandas DataFrame, by default False. If True, returns a MultiIndex Dataframe with ("Recording", "Channel") as the index.

TYPE: bool DEFAULT: False

return_time_series

Whether to return the time series of the metric, by default False. Only works for metrics that return a time series array. Cannot be returned in a dataframe.

TYPE: bool DEFAULT: False

parallel

Whether to process channels in parallel, by default True.

TYPE: bool DEFAULT: True

func_args

Additional arguments to pass to the metric function, by default {}.

TYPE: dict DEFAULT: {}

RETURNS DESCRIPTION
dict or DataFrame

Dictionary of results if as_df is False, otherwise a pandas DataFrame.

RAISES DESCRIPTION
ValueError

If the input signal is not 2-channel.

Source code in soundscapy/audio/metrics.py
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
def mosqito_metric_2ch(
    b,
    metric: str,
    statistics: Union[Tuple, List] = (
        5,
        10,
        50,
        90,
        95,
        "avg",
        "max",
        "min",
        "kurt",
        "skew",
    ),
    label: str = None,
    channel_names: Tuple[str, str] = ("Left", "Right"),
    as_df: bool = False,
    return_time_series: bool = False,
    parallel: bool = True,
    func_args={},
):
    """
    Calculate metrics from MoSQITo for a two-channel signal with optional parallel processing.

    Parameters
    ----------
    b : Binaural
        Binaural signal to calculate the sound quality indices for.
    metric : {"loudness_zwtv", "sharpness_din_from_loudness", "sharpness_din_perseg",
    "sharpness_din_tv", "roughness_dw"}
        Metric to calculate.
    statistics : tuple or list, optional
        List of level statistics to calculate (e.g. L_5, L_90, etc.).
    label : str, optional
        Label to use for the metric in the results dictionary.
        If None, will pull from default label for that metric given in DEFAULT_LABELS.
    channel_names : tuple of str, optional
        Custom names for the channels, by default ("Left", "Right").
    as_df : bool, optional
        Whether to return a pandas DataFrame, by default False.
        If True, returns a MultiIndex Dataframe with ("Recording", "Channel") as the index.
    return_time_series : bool, optional
        Whether to return the time series of the metric, by default False.
        Only works for metrics that return a time series array.
        Cannot be returned in a dataframe.
    parallel : bool, optional
        Whether to process channels in parallel, by default True.
    func_args : dict, optional
        Additional arguments to pass to the metric function, by default {}.

    Returns
    -------
    dict or pd.DataFrame
        Dictionary of results if as_df is False, otherwise a pandas DataFrame.

    Raises
    ------
    ValueError
        If the input signal is not 2-channel.
    """
    logger.debug(f"Calculating MoSQITo metric for 2 channels: {metric}")

    if b.channels != 2:
        logger.error("Must be 2 channel signal. Use `mosqito_metric_1ch` instead.")
        raise ValueError("Must be 2 channel signal. Use `mosqito_metric_1ch` instead.")

    if metric == "sharpness_din_from_loudness":
        logger.debug(
            "Calculating MoSQITo metrics: `sharpness_din` from `loudness_zwtv`"
        )
    else:
        logger.debug(f"Calculating MoSQITo metric: {metric}")

    try:
        if parallel:
            with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
                future_l = executor.submit(
                    mosqito_metric_1ch,
                    b[0],
                    metric,
                    statistics,
                    label,
                    False,
                    return_time_series,
                    func_args,
                )
                future_r = executor.submit(
                    mosqito_metric_1ch,
                    b[1],
                    metric,
                    statistics,
                    label,
                    False,
                    return_time_series,
                    func_args,
                )
                res_l = future_l.result()
                res_r = future_r.result()
        else:
            res_l = mosqito_metric_1ch(
                b[0],
                metric,
                statistics,
                label,
                False,
                return_time_series,
                func_args,
            )
            res_r = mosqito_metric_1ch(
                b[1],
                metric,
                statistics,
                label,
                False,
                return_time_series,
                func_args,
            )

        res = {channel_names[0]: res_l, channel_names[1]: res_r}
    except Exception as e:
        logger.error(
            f"Error calculating MoSQITo metric {metric} for 2 channels: {str(e)}"
        )
        raise

    if not as_df:
        return res
    try:
        rec = b.recording
    except AttributeError:
        rec = 0
    df = pd.DataFrame.from_dict(res, orient="index")
    df["Recording"] = rec
    df["Channel"] = df.index
    df.set_index(["Recording", "Channel"], inplace=True)
    return df

prep_multiindex_df

prep_multiindex_df(dictionary, label='Leq', incl_metric=True)

Prepare a MultiIndex dataframe from a dictionary of results.

PARAMETER DESCRIPTION
dictionary

Dict of results with recording name as key, channels {"Left", "Right"} as second key, and Leq metric as value.

TYPE: dict

label

Name of metric included, by default "Leq".

TYPE: str DEFAULT: 'Leq'

incl_metric

Whether to include the metric value in the resulting dataframe, by default True. If False, will only set up the DataFrame with the proper MultiIndex.

TYPE: bool DEFAULT: True

RETURNS DESCRIPTION
DataFrame

Index includes "Recording" and "Channel" with a column for each index if incl_metric.

RAISES DESCRIPTION
ValueError

If the input dictionary is not in the expected format.

Source code in soundscapy/audio/metrics.py
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
def prep_multiindex_df(dictionary: dict, label: str = "Leq", incl_metric: bool = True):
    """
    Prepare a MultiIndex dataframe from a dictionary of results.

    Parameters
    ----------
    dictionary : dict
        Dict of results with recording name as key, channels {"Left", "Right"} as second key,
        and Leq metric as value.
    label : str, optional
        Name of metric included, by default "Leq".
    incl_metric : bool, optional
        Whether to include the metric value in the resulting dataframe, by default True.
        If False, will only set up the DataFrame with the proper MultiIndex.

    Returns
    -------
    pd.DataFrame
        Index includes "Recording" and "Channel" with a column for each index if `incl_metric`.

    Raises
    ------
    ValueError
        If the input dictionary is not in the expected format.
    """
    logger.info("Preparing MultiIndex DataFrame")
    try:
        new_dict = {}
        for outerKey, innerDict in dictionary.items():
            for innerKey, values in innerDict.items():
                new_dict[(outerKey, innerKey)] = values
        idx = pd.MultiIndex.from_tuples(new_dict.keys())
        df = pd.DataFrame(new_dict.values(), index=idx, columns=[label])
        df.index.names = ["Recording", "Channel"]
        if not incl_metric:
            df = df.drop(columns=[label])
        logger.debug("MultiIndex DataFrame prepared successfully")
        return df
    except Exception as e:
        logger.error(f"Error preparing MultiIndex DataFrame: {str(e)}")
        raise ValueError("Invalid input dictionary format") from e

process_all_metrics

process_all_metrics(b, analysis_settings, parallel=True)

Process all metrics specified in the analysis settings for a binaural signal.

This function runs through all enabled metrics in the provided analysis settings, computes them for the given binaural signal, and compiles the results into a single DataFrame.

PARAMETER DESCRIPTION
b

Binaural signal object to process.

TYPE: Binaural

analysis_settings

Configuration object specifying which metrics to run and their parameters.

TYPE: AnalysisSettings

parallel

If True, run applicable calculations in parallel. Defaults to True.

TYPE: bool DEFAULT: True

RETURNS DESCRIPTION
DataFrame

A MultiIndex DataFrame containing results from all processed metrics. The index includes "Recording" and "Channel" levels.

RAISES DESCRIPTION
ValueError

If there's an error processing any of the metrics.

Notes

The parallel option primarily affects the MoSQITo metrics. Other metrics may not benefit from parallelization.

Examples:

>>> # xdoctest: +SKIP
>>> from soundscapy.audio import Binaural
>>> from soundscapy import AnalysisSettings
>>> signal = Binaural.from_wav("audio.wav", resample=480000)
>>> settings = AnalysisSettings.from_yaml("settings.yaml")
>>> results = process_all_metrics(signal,settings)
Source code in soundscapy/audio/metrics.py
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
def process_all_metrics(
    b, analysis_settings: AnalysisSettings, parallel: bool = True
) -> pd.DataFrame:
    """
    Process all metrics specified in the analysis settings for a binaural signal.

    This function runs through all enabled metrics in the provided analysis settings,
    computes them for the given binaural signal, and compiles the results into a single DataFrame.

    Parameters
    ----------
    b : Binaural
        Binaural signal object to process.
    analysis_settings : AnalysisSettings
        Configuration object specifying which metrics to run and their parameters.
    parallel : bool, optional
        If True, run applicable calculations in parallel. Defaults to True.

    Returns
    -------
    pd.DataFrame
        A MultiIndex DataFrame containing results from all processed metrics.
        The index includes "Recording" and "Channel" levels.

    Raises
    ------
    ValueError
        If there's an error processing any of the metrics.

    Notes
    -----
    The parallel option primarily affects the MoSQITo metrics. Other metrics may not
    benefit from parallelization.

    Examples
    --------
    >>> # xdoctest: +SKIP
    >>> from soundscapy.audio import Binaural
    >>> from soundscapy import AnalysisSettings
    >>> signal = Binaural.from_wav("audio.wav", resample=480000)
    >>> settings = AnalysisSettings.from_yaml("settings.yaml")
    >>> results = process_all_metrics(signal,settings)
    """
    logger.info(f"Processing all metrics for {b.recording}")
    logger.debug(f"Parallel processing: {parallel}")

    idx = pd.MultiIndex.from_tuples(((b.recording, "Left"), (b.recording, "Right")))
    results_df = pd.DataFrame(index=idx)
    results_df.index.names = ["Recording", "Channel"]

    try:
        for (
            library,
            metrics_settings,
        ) in analysis_settings.get_enabled_metrics().items():
            for metric in metrics_settings.keys():
                logger.debug(f"Processing {library} metric: {metric}")
                if library == "PythonAcoustics":
                    results_df = pd.concat(
                        (
                            results_df,
                            b.pyacoustics_metric(
                                metric, metric_settings=metrics_settings[metric]
                            ),
                        ),
                        axis=1,
                    )
                elif library == "MoSQITo":
                    results_df = pd.concat(
                        (
                            results_df,
                            b.mosqito_metric(
                                metric,
                                parallel=parallel,
                                metric_settings=metrics_settings[metric],
                            ),
                        ),
                        axis=1,
                    )
                elif library == "scikit-maad" or library == "scikit_maad":
                    results_df = pd.concat(
                        (
                            results_df,
                            b.maad_metric(
                                metric, metric_settings=metrics_settings[metric]
                            ),
                        ),
                        axis=1,
                    )
        logger.info("All metrics processed successfully")
        return results_df
    except Exception as e:
        logger.error(f"Error processing metrics: {str(e)}")
        raise ValueError("Error processing metrics") from e

pyacoustics_metric_1ch

pyacoustics_metric_1ch(s, metric, statistics=(5, 10, 50, 90, 95, 'avg', 'max', 'min', 'kurt', 'skew'), label=None, as_df=False, return_time_series=False, func_args={})

Run a metric from the pyacoustics library on a single channel object.

PARAMETER DESCRIPTION
s

Single channel signal to calculate the metric for.

TYPE: Signal or Binaural (single channel slice)

metric

The metric to run.

TYPE: (LZeq, Leq, LAeq, LCeq, SEL) DEFAULT: "LZeq"

statistics

List of level statistics to calculate (e.g. L_5, L_90, etc).

TYPE: List[Union[int, str]] DEFAULT: (5, 10, 50, 90, 95, 'avg', 'max', 'min', 'kurt', 'skew')

label

Label to use for the metric in the results dictionary. If None, will pull from default label for that metric given in DEFAULT_LABELS.

TYPE: str DEFAULT: None

as_df

Whether to return a pandas DataFrame, by default False. If True, returns a MultiIndex Dataframe with ("Recording", "Channel") as the index.

TYPE: bool DEFAULT: False

return_time_series

Whether to return the time series of the metric, by default False. Cannot return time series if as_df is True.

TYPE: bool DEFAULT: False

func_args

Additional keyword arguments to pass to the metric function, by default {}.

TYPE: dict DEFAULT: {}

RETURNS DESCRIPTION
dict or DataFrame

Dictionary of the calculated statistics or a pandas DataFrame.

RAISES DESCRIPTION
ValueError

If the signal is not single-channel or if an unrecognized metric is specified.

See Also

acoustics

Source code in soundscapy/audio/metrics.py
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
def pyacoustics_metric_1ch(
    s,
    metric: str,
    statistics: List[Union[int, str]] = (
        5,
        10,
        50,
        90,
        95,
        "avg",
        "max",
        "min",
        "kurt",
        "skew",
    ),
    label: str = None,
    as_df: bool = False,
    return_time_series: bool = False,
    func_args={},
):
    """
    Run a metric from the pyacoustics library on a single channel object.

    Parameters
    ----------
    s : Signal or Binaural (single channel slice)
        Single channel signal to calculate the metric for.
    metric : {"LZeq", "Leq", "LAeq", "LCeq", "SEL"}
        The metric to run.
    statistics : List[Union[int, str]], optional
        List of level statistics to calculate (e.g. L_5, L_90, etc).
    label : str, optional
        Label to use for the metric in the results dictionary.
        If None, will pull from default label for that metric given in DEFAULT_LABELS.
    as_df : bool, optional
        Whether to return a pandas DataFrame, by default False.
        If True, returns a MultiIndex Dataframe with ("Recording", "Channel") as the index.
    return_time_series : bool, optional
        Whether to return the time series of the metric, by default False.
        Cannot return time series if as_df is True.
    func_args : dict, optional
        Additional keyword arguments to pass to the metric function, by default {}.

    Returns
    -------
    dict or pd.DataFrame
        Dictionary of the calculated statistics or a pandas DataFrame.

    Raises
    ------
    ValueError
        If the signal is not single-channel or if an unrecognized metric is specified.

    See Also
    --------
    acoustics
    """
    logger.debug(f"Calculating pyacoustics metric: {metric}")

    if s.channels != 1:
        logger.error("Signal must be single channel")
        raise ValueError("Signal must be single channel")
    try:
        label = label or DEFAULT_LABELS[metric]
    except KeyError as e:
        logger.error(f"Metric {metric} not recognized")
        raise ValueError(f"Metric {metric} not recognized.") from e
    if as_df and return_time_series:
        logger.warning(
            "Cannot return both a dataframe and time series. Returning dataframe only."
        )

        return_time_series = False

    logger.debug(f"Calculating Python Acoustics: {metric} {statistics}")

    res = {}
    try:
        if metric in {"LZeq", "Leq", "LAeq", "LCeq"}:
            if metric in {"LZeq", "Leq"}:
                weighting = "Z"
            elif metric == "LAeq":
                weighting = "A"
            elif metric == "LCeq":
                weighting = "C"
            if "avg" in statistics or "mean" in statistics:
                stat = "avg" if "avg" in statistics else "mean"
                res[f"{label}"] = s.weigh(weighting).leq()
                statistics = list(statistics)
                statistics.remove(stat)
            if len(statistics) > 0:
                res = _stat_calcs(
                    label, s.weigh(weighting).levels(**func_args)[1], res, statistics
                )

            if return_time_series:
                res[f"{label}_ts"] = s.weigh(weighting).levels(**func_args)
        elif metric == "SEL":
            res[f"{label}"] = s.sound_exposure_level()
        else:
            logger.error(f"Metric {metric} not recognized")
            raise ValueError(f"Metric {metric} not recognized.")
    except Exception as e:
        logger.error(f"Error calculating {metric}: {str(e)}")
        raise

    if not as_df:
        return res
    try:
        rec = s.recording
        return pd.DataFrame(res, index=[rec])
    except AttributeError:
        return pd.DataFrame(res, index=[0])

pyacoustics_metric_2ch

pyacoustics_metric_2ch(b, metric, statistics=(5, 10, 50, 90, 95, 'avg', 'max', 'min', 'kurt', 'skew'), label=None, channel_names=('Left', 'Right'), as_df=False, return_time_series=False, func_args={})

Run a metric from the python acoustics library on a Binaural object.

PARAMETER DESCRIPTION
b

Binaural signal to calculate the metric for.

TYPE: Binaural

metric

The metric to run.

TYPE: (LZeq, Leq, LAeq, LCeq, SEL) DEFAULT: "LZeq"

statistics

List of level statistics to calculate (e.g. L_5, L_90, etc).

TYPE: tuple or list DEFAULT: (5, 10, 50, 90, 95, 'avg', 'max', 'min', 'kurt', 'skew')

label

Label to use for the metric in the results dictionary. If None, will pull from default label for that metric given in DEFAULT_LABELS.

TYPE: str DEFAULT: None

channel_names

Custom names for the channels, by default ("Left", "Right").

TYPE: tuple of str DEFAULT: ('Left', 'Right')

as_df

Whether to return a pandas DataFrame, by default False. If True, returns a MultiIndex Dataframe with ("Recording", "Channel") as the index.

TYPE: bool DEFAULT: False

return_time_series

Whether to return the time series of the metric, by default False. Cannot return time series if as_df is True.

TYPE: bool DEFAULT: False

func_args

Arguments to pass to the metric function, by default {}.

TYPE: dict DEFAULT: {}

RETURNS DESCRIPTION
dict or DataFrame

Dictionary of results if as_df is False, otherwise a pandas DataFrame.

RAISES DESCRIPTION
ValueError

If the input signal is not 2-channel.

See Also

pyacoustics_metric_1ch

Source code in soundscapy/audio/metrics.py
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
def pyacoustics_metric_2ch(
    b,
    metric: str,
    statistics: Union[Tuple, List] = (
        5,
        10,
        50,
        90,
        95,
        "avg",
        "max",
        "min",
        "kurt",
        "skew",
    ),
    label: str = None,
    channel_names: Tuple[str, str] = ("Left", "Right"),
    as_df: bool = False,
    return_time_series: bool = False,
    func_args={},
):
    """
    Run a metric from the python acoustics library on a Binaural object.

    Parameters
    ----------
    b : Binaural
        Binaural signal to calculate the metric for.
    metric : {"LZeq", "Leq", "LAeq", "LCeq", "SEL"}
        The metric to run.
    statistics : tuple or list, optional
        List of level statistics to calculate (e.g. L_5, L_90, etc).
    label : str, optional
        Label to use for the metric in the results dictionary.
        If None, will pull from default label for that metric given in DEFAULT_LABELS.
    channel_names : tuple of str, optional
        Custom names for the channels, by default ("Left", "Right").
    as_df : bool, optional
        Whether to return a pandas DataFrame, by default False.
        If True, returns a MultiIndex Dataframe with ("Recording", "Channel") as the index.
    return_time_series : bool, optional
        Whether to return the time series of the metric, by default False.
        Cannot return time series if as_df is True.
    func_args : dict, optional
        Arguments to pass to the metric function, by default {}.

    Returns
    -------
    dict or pd.DataFrame
        Dictionary of results if as_df is False, otherwise a pandas DataFrame.

    Raises
    ------
    ValueError
        If the input signal is not 2-channel.

    See Also
    --------
    pyacoustics_metric_1ch
    """
    logger.debug(f"Calculating pyacoustics metric for 2 channels: {metric}")

    if b.channels != 2:
        logger.error("Must be 2 channel signal. Use `pyacoustics_metric_1ch` instead.")
        raise ValueError(
            "Must be 2 channel signal. Use `pyacoustics_metric_1ch instead`."
        )

    logger.debug(f"Calculating Python Acoustics metrics: {metric}")

    try:
        res_l = pyacoustics_metric_1ch(
            b[0],
            metric,
            statistics,
            label,
            as_df=False,
            return_time_series=return_time_series,
            func_args=func_args,
        )

        res_r = pyacoustics_metric_1ch(
            b[1],
            metric,
            statistics,
            label,
            as_df=False,
            return_time_series=return_time_series,
            func_args=func_args,
        )

        res = {channel_names[0]: res_l, channel_names[1]: res_r}
    except Exception as e:
        logger.error(f"Error calculating {metric} for 2 channels: {str(e)}")
        raise

    if not as_df:
        return res
    try:
        rec = b.recording
    except AttributeError:
        rec = 0
    df = pd.DataFrame.from_dict(res, orient="index")
    df["Recording"] = rec
    df["Channel"] = df.index
    df.set_index(["Recording", "Channel"], inplace=True)
    return df

Parallel Processing

soundscapy.audio.parallel_processing

This module provides functions for parallel processing of binaural audio files.

It includes functions to load and analyze binaural files, as well as to process multiple files in parallel using concurrent.futures.

Functions: load_analyse_binaural: Load and analyze a single binaural file. parallel_process: Process multiple binaural files in parallel.

Note: This module requires the tqdm library for progress bars and concurrent.futures for parallel processing. It uses loguru for logging.

load_analyse_binaural

load_analyse_binaural(wav_file, levels, analysis_settings, parallel_mosqito=True, resample=None)

Load and analyze a single binaural audio file.

PARAMETER DESCRIPTION
resample

TYPE: Optional[int] DEFAULT: None

wav_file

Path to the WAV file.

TYPE: Path

levels

Dictionary with calibration levels for each channel.

TYPE: Dict

analysis_settings

Analysis settings object.

TYPE: AnalysisSettings

parallel_mosqito

Whether to process MoSQITo metrics in parallel. Defaults to True.

TYPE: bool DEFAULT: True

RETURNS DESCRIPTION
DataFrame

DataFrame with analysis results.

Source code in soundscapy/audio/parallel_processing.py
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
def load_analyse_binaural(
    wav_file: Path,
    levels: Dict | List[float],
    analysis_settings: AnalysisSettings,
    parallel_mosqito: bool = True,
    resample: Optional[int] = None,
) -> pd.DataFrame:
    """
    Load and analyze a single binaural audio file.

    Parameters
    ----------
    resample
    wav_file : Path
        Path to the WAV file.
    levels : Dict
        Dictionary with calibration levels for each channel.
    analysis_settings : AnalysisSettings
        Analysis settings object.
    parallel_mosqito : bool, optional
        Whether to process MoSQITo metrics in parallel. Defaults to True.

    Returns
    -------
    pd.DataFrame
        DataFrame with analysis results.
    """
    logger.info(f"Processing {wav_file}")
    try:
        b = Binaural.from_wav(wav_file, resample=resample)
        if levels is not None:
            if isinstance(levels, dict) and b.recording in levels:
                decibel = (levels[b.recording]["Left"], levels[b.recording]["Right"])
                b.calibrate_to(decibel, inplace=True)
            elif isinstance(levels, list | tuple):
                logger.debug(f"Calibrating {wav_file} to {levels} dB")
                b.calibrate_to(levels, inplace=True)
            else:
                logger.warning(f"No calibration levels found for {wav_file}")
        else:
            logger.warning(f"No calibration levels found for {wav_file}")
        return process_all_metrics(b, analysis_settings, parallel=parallel_mosqito)
    except Exception as e:
        logger.error(f"Error processing {wav_file}: {str(e)}")
        raise

parallel_process

parallel_process(wav_files, results_df, levels, analysis_settings, max_workers=None, parallel_mosqito=True, resample=None)

Process multiple binaural files in parallel.

PARAMETER DESCRIPTION
resample

TYPE: Optional[int] DEFAULT: None

wav_files

List of WAV files to process.

TYPE: List[Path]

results_df

Initial results DataFrame to update.

TYPE: DataFrame

levels

Dictionary with calibration levels for each file.

TYPE: Dict

analysis_settings

Analysis settings object.

TYPE: AnalysisSettings

max_workers

Maximum number of worker processes. If None, it will default to the number of processors on the machine.

TYPE: int DEFAULT: None

parallel_mosqito

Whether to process MoSQITo metrics in parallel within each file. Defaults to True.

TYPE: bool DEFAULT: True

RETURNS DESCRIPTION
DataFrame

Updated results DataFrame with analysis results for all files.

Source code in soundscapy/audio/parallel_processing.py
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
def parallel_process(
    wav_files: List[Path],
    results_df: pd.DataFrame,
    levels: Dict,
    analysis_settings: AnalysisSettings,
    max_workers: Optional[int] = None,
    parallel_mosqito: bool = True,
    resample: Optional[int] = None,
) -> pd.DataFrame:
    """
    Process multiple binaural files in parallel.

    Parameters
    ----------
    resample
    wav_files : List[Path]
        List of WAV files to process.
    results_df : pd.DataFrame
        Initial results DataFrame to update.
    levels : Dict
        Dictionary with calibration levels for each file.
    analysis_settings : AnalysisSettings
        Analysis settings object.
    max_workers : int, optional
        Maximum number of worker processes. If None, it will default to the number of processors on the machine.
    parallel_mosqito : bool, optional
        Whether to process MoSQITo metrics in parallel within each file. Defaults to True.

    Returns
    -------
    pd.DataFrame
        Updated results DataFrame with analysis results for all files.
    """
    logger.info(f"Starting parallel processing of {len(wav_files)} files")

    # Add a handler that uses tqdm.write for output
    tqdm_handler_id = logger.add(tqdm_write_sink, format="{message}")

    with concurrent.futures.ProcessPoolExecutor(max_workers=max_workers) as executor:
        futures = []
        for wav_file in wav_files:
            future = executor.submit(
                load_analyse_binaural,
                wav_file,
                levels,
                analysis_settings,
                parallel_mosqito,
                resample,
            )
            futures.append(future)

        with tqdm(total=len(futures), desc="Processing files") as pbar:
            for future in concurrent.futures.as_completed(futures):
                try:
                    result = future.result()
                    results_df = add_results(results_df, result)
                except Exception as e:
                    logger.error(f"Error processing file: {str(e)}")
                finally:
                    pbar.update(1)

    # Remove the tqdm-compatible handler
    logger.remove(tqdm_handler_id)

    logger.info("Parallel processing completed")
    return results_df

tqdm_write_sink

tqdm_write_sink(message)

A custom sink for loguru that writes messages using tqdm.write().

This ensures that log messages don't interfere with tqdm progress bars.

Source code in soundscapy/audio/parallel_processing.py
38
39
40
41
42
43
44
def tqdm_write_sink(message):
    """
    A custom sink for loguru that writes messages using tqdm.write().

    This ensures that log messages don't interfere with tqdm progress bars.
    """
    tqdm.write(message, end="")