audio.py 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157
  1. import os
  2. from functools import lru_cache
  3. from subprocess import CalledProcessError, run
  4. from typing import Optional, Union
  5. import numpy as np
  6. import torch
  7. import torch.nn.functional as F
  8. from .utils import exact_div
  9. # hard-coded audio hyperparameters
  10. SAMPLE_RATE = 16000
  11. N_FFT = 400
  12. HOP_LENGTH = 160
  13. CHUNK_LENGTH = 30
  14. N_SAMPLES = CHUNK_LENGTH * SAMPLE_RATE # 480000 samples in a 30-second chunk
  15. N_FRAMES = exact_div(N_SAMPLES, HOP_LENGTH) # 3000 frames in a mel spectrogram input
  16. N_SAMPLES_PER_TOKEN = HOP_LENGTH * 2 # the initial convolutions has stride 2
  17. FRAMES_PER_SECOND = exact_div(SAMPLE_RATE, HOP_LENGTH) # 10ms per audio frame
  18. TOKENS_PER_SECOND = exact_div(SAMPLE_RATE, N_SAMPLES_PER_TOKEN) # 20ms per audio token
  19. def load_audio(file: str, sr: int = SAMPLE_RATE):
  20. """
  21. Open an audio file and read as mono waveform, resampling as necessary
  22. Parameters
  23. ----------
  24. file: str
  25. The audio file to open
  26. sr: int
  27. The sample rate to resample the audio if necessary
  28. Returns
  29. -------
  30. A NumPy array containing the audio waveform, in float32 dtype.
  31. """
  32. # This launches a subprocess to decode audio while down-mixing
  33. # and resampling as necessary. Requires the ffmpeg CLI in PATH.
  34. # fmt: off
  35. cmd = [
  36. "ffmpeg",
  37. "-nostdin",
  38. "-threads", "0",
  39. "-i", file,
  40. "-f", "s16le",
  41. "-ac", "1",
  42. "-acodec", "pcm_s16le",
  43. "-ar", str(sr),
  44. "-"
  45. ]
  46. # fmt: on
  47. try:
  48. out = run(cmd, capture_output=True, check=True).stdout
  49. except CalledProcessError as e:
  50. raise RuntimeError(f"Failed to load audio: {e.stderr.decode()}") from e
  51. return np.frombuffer(out, np.int16).flatten().astype(np.float32) / 32768.0
  52. def pad_or_trim(array, length: int = N_SAMPLES, *, axis: int = -1):
  53. """
  54. Pad or trim the audio array to N_SAMPLES, as expected by the encoder.
  55. """
  56. if torch.is_tensor(array):
  57. if array.shape[axis] > length:
  58. array = array.index_select(
  59. dim=axis, index=torch.arange(length, device=array.device)
  60. )
  61. if array.shape[axis] < length:
  62. pad_widths = [(0, 0)] * array.ndim
  63. pad_widths[axis] = (0, length - array.shape[axis])
  64. array = F.pad(array, [pad for sizes in pad_widths[::-1] for pad in sizes])
  65. else:
  66. if array.shape[axis] > length:
  67. array = array.take(indices=range(length), axis=axis)
  68. if array.shape[axis] < length:
  69. pad_widths = [(0, 0)] * array.ndim
  70. pad_widths[axis] = (0, length - array.shape[axis])
  71. array = np.pad(array, pad_widths)
  72. return array
  73. @lru_cache(maxsize=None)
  74. def mel_filters(device, n_mels: int) -> torch.Tensor:
  75. """
  76. load the mel filterbank matrix for projecting STFT into a Mel spectrogram.
  77. Allows decoupling librosa dependency; saved using:
  78. np.savez_compressed(
  79. "mel_filters.npz",
  80. mel_80=librosa.filters.mel(sr=16000, n_fft=400, n_mels=80),
  81. mel_128=librosa.filters.mel(sr=16000, n_fft=400, n_mels=128),
  82. )
  83. """
  84. assert n_mels in {80, 128}, f"Unsupported n_mels: {n_mels}"
  85. filters_path = os.path.join(os.path.dirname(__file__), "assets", "mel_filters.npz")
  86. with np.load(filters_path, allow_pickle=False) as f:
  87. return torch.from_numpy(f[f"mel_{n_mels}"]).to(device)
  88. def log_mel_spectrogram(
  89. audio: Union[str, np.ndarray, torch.Tensor],
  90. n_mels: int = 80,
  91. padding: int = 0,
  92. device: Optional[Union[str, torch.device]] = None,
  93. ):
  94. """
  95. Compute the log-Mel spectrogram of
  96. Parameters
  97. ----------
  98. audio: Union[str, np.ndarray, torch.Tensor], shape = (*)
  99. The path to audio or either a NumPy array or Tensor containing the audio waveform in 16 kHz
  100. n_mels: int
  101. The number of Mel-frequency filters, only 80 is supported
  102. padding: int
  103. Number of zero samples to pad to the right
  104. device: Optional[Union[str, torch.device]]
  105. If given, the audio tensor is moved to this device before STFT
  106. Returns
  107. -------
  108. torch.Tensor, shape = (80, n_frames)
  109. A Tensor that contains the Mel spectrogram
  110. """
  111. if not torch.is_tensor(audio):
  112. if isinstance(audio, str):
  113. audio = load_audio(audio)
  114. audio = torch.from_numpy(audio)
  115. if device is not None:
  116. audio = audio.to(device)
  117. if padding > 0:
  118. audio = F.pad(audio, (0, padding))
  119. window = torch.hann_window(N_FFT).to(audio.device)
  120. stft = torch.stft(audio, N_FFT, HOP_LENGTH, window=window, return_complex=True)
  121. magnitudes = stft[..., :-1].abs() ** 2
  122. filters = mel_filters(audio.device, n_mels)
  123. mel_spec = filters @ magnitudes
  124. log_spec = torch.clamp(mel_spec, min=1e-10).log10()
  125. log_spec = torch.maximum(log_spec, log_spec.max() - 8.0)
  126. log_spec = (log_spec + 4.0) / 4.0
  127. return log_spec