diff --git a/frontend/src/App.jsx b/frontend/src/App.jsx index 9856dc6..fab5414 100644 --- a/frontend/src/App.jsx +++ b/frontend/src/App.jsx @@ -2,7 +2,8 @@ import React, { useEffect, useMemo, useRef, useState } from 'react' const API_BASE = '/api' const AUTO_STOP_SILENCE_MS = 2500 -const SPEECH_START_THRESHOLD = 0.05 +const SPEECH_START_THRESHOLD = 0.02 +const SILENCE_THRESHOLD = 0.012 async function parseApiResponse(res) { const contentType = res.headers.get('content-type') || '' @@ -90,6 +91,7 @@ export default function App() { const [isTranscribing, setIsTranscribing] = useState(false) const [voiceStatus, setVoiceStatus] = useState('') const [errorMessage, setErrorMessage] = useState('') + const [micLevel, setMicLevel] = useState(0) const [voices, setVoices] = useState([]) const [selectedVoiceURI, setSelectedVoiceURI] = useState('') const mediaRecorderRef = useRef(null) @@ -101,6 +103,7 @@ export default function App() { const sourceNodeRef = useRef(null) const animationFrameRef = useRef(null) const silenceStartedAtRef = useRef(null) + const hasSpeechInSegmentRef = useRef(false) const isRecordingRef = useRef(false) const isAutoListeningRef = useRef(false) const isTranscribingRef = useRef(false) @@ -285,6 +288,8 @@ export default function App() { } analyserRef.current = null silenceStartedAtRef.current = null + hasSpeechInSegmentRef.current = false + setMicLevel(0) if (mediaStreamRef.current) { mediaStreamRef.current.getTracks().forEach((track) => track.stop()) mediaStreamRef.current = null @@ -370,6 +375,7 @@ export default function App() { recordedChunksRef.current = [] recordingMimeTypeRef.current = mimeType || recorder.mimeType || 'audio/webm' silenceStartedAtRef.current = null + hasSpeechInSegmentRef.current = false recorder.ondataavailable = (event) => { if (event.data && event.data.size > 0) { @@ -384,11 +390,13 @@ export default function App() { mediaRecorderRef.current = null setIsRecording(false) - if (audioBlob.size > 0) { + if (audioBlob.size > 0 && hasSpeechInSegmentRef.current) { await transcribeRecording(audioBlob, finalMimeType, { autoSend: true }) } else if (isAutoListeningRef.current) { setVoiceStatus('Micro actif. Parle quand tu veux.') } + + hasSpeechInSegmentRef.current = false } recorder.onerror = () => { @@ -415,18 +423,19 @@ export default function App() { const volume = calculateVolume(analyserRef.current) const now = Date.now() + setMicLevel(volume) if (volume >= SPEECH_START_THRESHOLD) { + hasSpeechInSegmentRef.current = true silenceStartedAtRef.current = null - if (!isRecordingRef.current && !isTranscribingRef.current) { - startSegmentRecording() - } - } else if (isRecordingRef.current) { + } else if (isRecordingRef.current && hasSpeechInSegmentRef.current && volume <= SILENCE_THRESHOLD) { if (!silenceStartedAtRef.current) { silenceStartedAtRef.current = now } else if (now - silenceStartedAtRef.current >= AUTO_STOP_SILENCE_MS) { stopSegmentRecording() } + } else if (volume > SILENCE_THRESHOLD) { + silenceStartedAtRef.current = null } animationFrameRef.current = requestAnimationFrame(monitorMicrophone) @@ -459,6 +468,7 @@ export default function App() { mediaStreamRef.current = stream setIsAutoListening(true) setVoiceStatus('Micro actif. Parle quand tu veux, j’enverrai après 2,5 s de silence.') + await startSegmentRecording() monitorMicrophone() } catch { setVoiceStatus('Accès au micro refusé ou indisponible.') @@ -603,6 +613,11 @@ export default function App() { + {isAutoListening && ( +
+ Niveau micro: {Math.round(Math.min(micLevel * 1200, 100))}% +
+ )} {voiceStatus &&

{voiceStatus}

} {errorMessage &&

{errorMessage}

}