fixed most of the audio stuttering
This commit is contained in:
parent
077d1d3d9d
commit
c225a3e27c
3 changed files with 72 additions and 43 deletions
36
README.md
36
README.md
|
@ -1,25 +1,29 @@
|
|||
## Installation
|
||||
# M2 Projet Thématique - Diffusion Radio Proche-en-Proche
|
||||
|
||||
Un projet visant à créer un réseau de machine capable de diffuser une source
|
||||
audio à jouer de manière synchronisé.
|
||||
|
||||
Les communications du réseau doivent être chiffré et il ne doit pas être possible
|
||||
d'inséré une machine inconnu pour pertuber le réseau.
|
||||
|
||||
## Usage
|
||||
|
||||
Debian
|
||||
```bash
|
||||
# Pre-requires
|
||||
sudo apt install chrony
|
||||
# dependencies
|
||||
sudo apt install ffmpeg
|
||||
|
||||
# Download
|
||||
sudo apt install -y git
|
||||
# download the project
|
||||
git clone https://git.faraphel.fr/study-faraphel/M2-PT-DRP
|
||||
cd M2-PT-DRP
|
||||
cd ./M2-PT-DRP/
|
||||
|
||||
# Dependencies
|
||||
sudo apt install -y libmpg123-dev libssl-dev portaudio19-dev
|
||||
# create a virtual environment
|
||||
python3 -m venv ./.venv/
|
||||
source ./.venv/bin/activate
|
||||
|
||||
# Compile
|
||||
sudo apt install -y build-essential cmake ninja-build pkg-config
|
||||
cmake -S . -B build -G Ninja
|
||||
cmake --build build
|
||||
cd build
|
||||
# install python packages
|
||||
pip3 install -r ./requirements.txt
|
||||
|
||||
# Run
|
||||
sudo ./M2-PT-DRP --host ff02::1 --ipv6
|
||||
# run the application
|
||||
python3 -m source
|
||||
```
|
||||
|
||||
|
|
|
@ -3,6 +3,7 @@ from datetime import datetime, timedelta
|
|||
|
||||
import pause
|
||||
import pydub
|
||||
from pydub.utils import make_chunks
|
||||
|
||||
from source.behaviors.roles import base
|
||||
from source.managers import Manager
|
||||
|
@ -25,27 +26,24 @@ class MasterRole(base.BaseRole):
|
|||
self.manager.communication.secret_key = os.urandom(32)
|
||||
|
||||
# prepare the audio file that will be streamed
|
||||
# TODO(Faraphel): use another audio source
|
||||
self.audio = pydub.AudioSegment.from_file("../assets/Caravan Palace - Wonderland.mp3")
|
||||
self.play_time = datetime.now()
|
||||
|
||||
# calculate the number of bytes per milliseconds in the audio
|
||||
bytes_per_ms = self.audio.frame_rate * self.audio.sample_width * self.audio.channels / 1000
|
||||
# calculate the required chunk duration to reach that size
|
||||
self.chunk_duration = timedelta(milliseconds=self.TARGET_SIZE / bytes_per_ms)
|
||||
|
||||
# split the audio into chunks
|
||||
self.chunks = make_chunks(self.audio, self.chunk_duration.total_seconds() * 1000)
|
||||
|
||||
|
||||
def handle(self):
|
||||
# TODO(Faraphel): check if another server is emitting sound in the network. Return to undefined if yes
|
||||
|
||||
# calculate the number of bytes per milliseconds in the audio
|
||||
bytes_per_ms = self.audio.frame_rate * self.audio.sample_width * self.audio.channels / 1000
|
||||
# calculate the required chunk duration to reach that size
|
||||
chunk_duration = timedelta(milliseconds=self.TARGET_SIZE / bytes_per_ms)
|
||||
|
||||
# calculate the audio time
|
||||
chunk_start_time = datetime.now() - self.play_time
|
||||
chunk_end_time = chunk_start_time + chunk_duration
|
||||
|
||||
# get the music for that period
|
||||
chunk = self.audio[
|
||||
chunk_start_time.total_seconds() * 1000 :
|
||||
chunk_end_time.total_seconds() * 1000
|
||||
]
|
||||
# get the next chunk
|
||||
chunk = self.chunks.pop(0)
|
||||
|
||||
# broadcast it in the network
|
||||
audio_packet = AudioPacket(
|
||||
|
@ -59,4 +57,4 @@ class MasterRole(base.BaseRole):
|
|||
|
||||
# wait for the audio to play
|
||||
# TODO(Faraphel): should adapt to the compute time above
|
||||
pause.until(self.play_time + chunk_end_time)
|
||||
pause.until(datetime.now() + self.chunk_duration)
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
import threading
|
||||
import typing
|
||||
from datetime import datetime
|
||||
|
||||
import numpy
|
||||
|
@ -12,6 +13,8 @@ from source.utils.audio.audio import sample_width_to_type
|
|||
|
||||
class AudioManager:
|
||||
def __init__(self, manager: "managers.Manager"):
|
||||
self.stream: typing.Optional[sounddevice.OutputStream] = None
|
||||
|
||||
# buffer containing the set of audio chunk to play. Sort them by their time to play
|
||||
self.buffer = sortedcontainers.SortedList(key=lambda audio: audio.time)
|
||||
|
||||
|
@ -31,13 +34,46 @@ class AudioManager:
|
|||
# trigger the new audio event
|
||||
self.new_audio_event.set()
|
||||
|
||||
def play_audio(self, audio: packets.AudioPacket) -> None:
|
||||
# create a numpy array for our sample
|
||||
sample = numpy.frombuffer(audio.data, dtype=sample_width_to_type(audio.sample_width))
|
||||
# reshape it to have a sub-array for each channels
|
||||
sample = sample.reshape((-1, audio.channels))
|
||||
# normalize the sample to be between -1 and 1
|
||||
sample = sample / (2 ** (audio.sample_width * 8 - 1))
|
||||
# use float32 for the audio library
|
||||
sample = sample.astype(numpy.float32)
|
||||
|
||||
# wait for the audio given time
|
||||
pause.until(audio.time)
|
||||
|
||||
# update the stream if the audio use different settings
|
||||
if (
|
||||
self.stream is None or
|
||||
self.stream.samplerate != audio.sample_rate or
|
||||
self.stream.channels != audio.channels
|
||||
):
|
||||
self.stream = sounddevice.OutputStream(
|
||||
samplerate=audio.sample_rate,
|
||||
channels=audio.channels,
|
||||
)
|
||||
|
||||
# play
|
||||
self.stream.start()
|
||||
|
||||
# write the audio to the stream
|
||||
self.stream.write(sample)
|
||||
|
||||
def handle(self) -> None:
|
||||
"""
|
||||
Play the audio chunk in the buffer at the given time
|
||||
"""
|
||||
|
||||
# wait for a new audio packet
|
||||
self.new_audio_event.wait()
|
||||
# TODO(Faraphel): use self.lock ? seem to softlock the application
|
||||
if len(self.buffer) == 0:
|
||||
self.new_audio_event.clear()
|
||||
self.new_audio_event.wait()
|
||||
|
||||
# get the most recent audio packet to play
|
||||
audio: packets.AudioPacket = self.buffer.pop(0)
|
||||
|
@ -46,18 +82,9 @@ class AudioManager:
|
|||
if audio.time < datetime.now():
|
||||
return
|
||||
|
||||
# create a numpy array for our sample
|
||||
sample = numpy.frombuffer(audio.data, dtype=sample_width_to_type(numpy.int16))
|
||||
# reshape it to have a sub-array for each channels
|
||||
sample = sample.reshape((-1, audio.channels))
|
||||
# normalize the sample to be between -1 and 1
|
||||
sample = sample / (2 ** (audio.sample_width * 8 - 1))
|
||||
# play the audio packet
|
||||
self.play_audio(audio)
|
||||
|
||||
# wait for the audio given time
|
||||
pause.until(audio.time)
|
||||
|
||||
# play the audio
|
||||
sounddevice.play(sample, audio.sample_rate)
|
||||
|
||||
def loop(self) -> None:
|
||||
"""
|
||||
|
|
Loading…
Reference in a new issue