Add Micro auto

This commit is contained in:
2026-04-05 10:58:35 +02:00
parent 88fed85a13
commit d9d0de51dd

View File

@@ -103,13 +103,14 @@ export default function App() {
const audioContextRef = useRef(null) const audioContextRef = useRef(null)
const analyserRef = useRef(null) const analyserRef = useRef(null)
const sourceNodeRef = useRef(null) const sourceNodeRef = useRef(null)
const processorNodeRef = useRef(null)
const monitorGainRef = useRef(null) const monitorGainRef = useRef(null)
const animationFrameRef = useRef(null)
const silenceStartedAtRef = useRef(null) const silenceStartedAtRef = useRef(null)
const hasSpeechInSegmentRef = useRef(false) const hasSpeechInSegmentRef = useRef(false)
const isRecordingRef = useRef(false) const isRecordingRef = useRef(false)
const isAutoListeningRef = useRef(false) const isAutoListeningRef = useRef(false)
const isTranscribingRef = useRef(false) const isTranscribingRef = useRef(false)
const levelFrameCountRef = useRef(0)
const availableVoices = useMemo(() => { const availableVoices = useMemo(() => {
return [...voices].sort((a, b) => scoreVoice(b) - scoreVoice(a)) return [...voices].sort((a, b) => scoreVoice(b) - scoreVoice(a))
@@ -284,14 +285,15 @@ export default function App() {
function stopMediaStream() { function stopMediaStream() {
pushAudioDebug('Arrêt et nettoyage du flux micro') pushAudioDebug('Arrêt et nettoyage du flux micro')
if (animationFrameRef.current) {
cancelAnimationFrame(animationFrameRef.current)
animationFrameRef.current = null
}
if (sourceNodeRef.current) { if (sourceNodeRef.current) {
sourceNodeRef.current.disconnect() sourceNodeRef.current.disconnect()
sourceNodeRef.current = null sourceNodeRef.current = null
} }
if (processorNodeRef.current) {
processorNodeRef.current.disconnect()
processorNodeRef.current.onaudioprocess = null
processorNodeRef.current = null
}
if (monitorGainRef.current) { if (monitorGainRef.current) {
monitorGainRef.current.disconnect() monitorGainRef.current.disconnect()
monitorGainRef.current = null monitorGainRef.current = null
@@ -303,6 +305,7 @@ export default function App() {
analyserRef.current = null analyserRef.current = null
silenceStartedAtRef.current = null silenceStartedAtRef.current = null
hasSpeechInSegmentRef.current = false hasSpeechInSegmentRef.current = false
levelFrameCountRef.current = 0
setMicLevel(0) setMicLevel(0)
if (mediaStreamRef.current) { if (mediaStreamRef.current) {
mediaStreamRef.current.getTracks().forEach((track) => track.stop()) mediaStreamRef.current.getTracks().forEach((track) => track.stop())
@@ -368,17 +371,13 @@ export default function App() {
} }
} }
function calculateVolume(analyser) { function calculateVolume(floatData) {
const data = new Uint8Array(analyser.fftSize)
analyser.getByteTimeDomainData(data)
let sumSquares = 0 let sumSquares = 0
for (const value of data) { for (const value of floatData) {
const normalized = (value - 128) / 128 sumSquares += value * value
sumSquares += normalized * normalized
} }
return Math.sqrt(sumSquares / data.length) return Math.sqrt(sumSquares / floatData.length)
} }
async function startSegmentRecording() { async function startSegmentRecording() {
@@ -441,12 +440,18 @@ export default function App() {
} }
} }
function monitorMicrophone() { function handleMicrophoneLevel(volume) {
if (!analyserRef.current || !isAutoListeningRef.current) return if (!isAutoListeningRef.current) return
const volume = calculateVolume(analyserRef.current)
const now = Date.now() const now = Date.now()
setMicLevel(volume) setMicLevel(volume)
levelFrameCountRef.current += 1
if (levelFrameCountRef.current === 1) {
pushAudioDebug(`Premier niveau audio reçu=${volume.toFixed(4)}`)
} else if (levelFrameCountRef.current % 50 === 0) {
pushAudioDebug(`Niveau audio=${volume.toFixed(4)}`)
}
if (volume >= SPEECH_START_THRESHOLD) { if (volume >= SPEECH_START_THRESHOLD) {
if (!hasSpeechInSegmentRef.current) { if (!hasSpeechInSegmentRef.current) {
@@ -465,8 +470,6 @@ export default function App() {
} else if (volume > SILENCE_THRESHOLD) { } else if (volume > SILENCE_THRESHOLD) {
silenceStartedAtRef.current = null silenceStartedAtRef.current = null
} }
animationFrameRef.current = requestAnimationFrame(monitorMicrophone)
} }
async function activateAutoListening() { async function activateAutoListening() {
@@ -495,22 +498,31 @@ export default function App() {
analyser.fftSize = 2048 analyser.fftSize = 2048
analyser.smoothingTimeConstant = 0.85 analyser.smoothingTimeConstant = 0.85
const sourceNode = audioContext.createMediaStreamSource(stream) const sourceNode = audioContext.createMediaStreamSource(stream)
const processorNode = audioContext.createScriptProcessor(2048, 1, 1)
const monitorGain = audioContext.createGain() const monitorGain = audioContext.createGain()
monitorGain.gain.value = 0 monitorGain.gain.value = 0
processorNode.onaudioprocess = (event) => {
const channelData = event.inputBuffer.getChannelData(0)
const volume = calculateVolume(channelData)
handleMicrophoneLevel(volume)
}
sourceNode.connect(analyser) sourceNode.connect(analyser)
sourceNode.connect(processorNode)
processorNode.connect(monitorGain)
analyser.connect(monitorGain) analyser.connect(monitorGain)
monitorGain.connect(audioContext.destination) monitorGain.connect(audioContext.destination)
audioContextRef.current = audioContext audioContextRef.current = audioContext
analyserRef.current = analyser analyserRef.current = analyser
sourceNodeRef.current = sourceNode sourceNodeRef.current = sourceNode
processorNodeRef.current = processorNode
monitorGainRef.current = monitorGain monitorGainRef.current = monitorGain
mediaStreamRef.current = stream mediaStreamRef.current = stream
setIsAutoListening(true) setIsAutoListening(true)
setVoiceStatus('Micro actif. Parle quand tu veux, jenverrai après 2,5 s de silence.') setVoiceStatus('Micro actif. Parle quand tu veux, jenverrai après 2,5 s de silence.')
pushAudioDebug(`Piste micro active=${stream.getAudioTracks()[0]?.readyState || 'inconnue'}`) pushAudioDebug(`Piste micro active=${stream.getAudioTracks()[0]?.readyState || 'inconnue'}`)
await startSegmentRecording() await startSegmentRecording()
monitorMicrophone()
} catch { } catch {
pushAudioDebug('Accès micro refusé ou indisponible') pushAudioDebug('Accès micro refusé ou indisponible')
setVoiceStatus('Accès au micro refusé ou indisponible.') setVoiceStatus('Accès au micro refusé ou indisponible.')