Add Micro auto
This commit is contained in:
@@ -103,13 +103,14 @@ export default function App() {
|
||||
const audioContextRef = useRef(null)
|
||||
const analyserRef = useRef(null)
|
||||
const sourceNodeRef = useRef(null)
|
||||
const processorNodeRef = useRef(null)
|
||||
const monitorGainRef = useRef(null)
|
||||
const animationFrameRef = useRef(null)
|
||||
const silenceStartedAtRef = useRef(null)
|
||||
const hasSpeechInSegmentRef = useRef(false)
|
||||
const isRecordingRef = useRef(false)
|
||||
const isAutoListeningRef = useRef(false)
|
||||
const isTranscribingRef = useRef(false)
|
||||
const levelFrameCountRef = useRef(0)
|
||||
|
||||
const availableVoices = useMemo(() => {
|
||||
return [...voices].sort((a, b) => scoreVoice(b) - scoreVoice(a))
|
||||
@@ -284,14 +285,15 @@ export default function App() {
|
||||
|
||||
function stopMediaStream() {
|
||||
pushAudioDebug('Arrêt et nettoyage du flux micro')
|
||||
if (animationFrameRef.current) {
|
||||
cancelAnimationFrame(animationFrameRef.current)
|
||||
animationFrameRef.current = null
|
||||
}
|
||||
if (sourceNodeRef.current) {
|
||||
sourceNodeRef.current.disconnect()
|
||||
sourceNodeRef.current = null
|
||||
}
|
||||
if (processorNodeRef.current) {
|
||||
processorNodeRef.current.disconnect()
|
||||
processorNodeRef.current.onaudioprocess = null
|
||||
processorNodeRef.current = null
|
||||
}
|
||||
if (monitorGainRef.current) {
|
||||
monitorGainRef.current.disconnect()
|
||||
monitorGainRef.current = null
|
||||
@@ -303,6 +305,7 @@ export default function App() {
|
||||
analyserRef.current = null
|
||||
silenceStartedAtRef.current = null
|
||||
hasSpeechInSegmentRef.current = false
|
||||
levelFrameCountRef.current = 0
|
||||
setMicLevel(0)
|
||||
if (mediaStreamRef.current) {
|
||||
mediaStreamRef.current.getTracks().forEach((track) => track.stop())
|
||||
@@ -368,17 +371,13 @@ export default function App() {
|
||||
}
|
||||
}
|
||||
|
||||
function calculateVolume(analyser) {
|
||||
const data = new Uint8Array(analyser.fftSize)
|
||||
analyser.getByteTimeDomainData(data)
|
||||
|
||||
function calculateVolume(floatData) {
|
||||
let sumSquares = 0
|
||||
for (const value of data) {
|
||||
const normalized = (value - 128) / 128
|
||||
sumSquares += normalized * normalized
|
||||
for (const value of floatData) {
|
||||
sumSquares += value * value
|
||||
}
|
||||
|
||||
return Math.sqrt(sumSquares / data.length)
|
||||
return Math.sqrt(sumSquares / floatData.length)
|
||||
}
|
||||
|
||||
async function startSegmentRecording() {
|
||||
@@ -441,12 +440,18 @@ export default function App() {
|
||||
}
|
||||
}
|
||||
|
||||
function monitorMicrophone() {
|
||||
if (!analyserRef.current || !isAutoListeningRef.current) return
|
||||
function handleMicrophoneLevel(volume) {
|
||||
if (!isAutoListeningRef.current) return
|
||||
|
||||
const volume = calculateVolume(analyserRef.current)
|
||||
const now = Date.now()
|
||||
setMicLevel(volume)
|
||||
levelFrameCountRef.current += 1
|
||||
|
||||
if (levelFrameCountRef.current === 1) {
|
||||
pushAudioDebug(`Premier niveau audio reçu=${volume.toFixed(4)}`)
|
||||
} else if (levelFrameCountRef.current % 50 === 0) {
|
||||
pushAudioDebug(`Niveau audio=${volume.toFixed(4)}`)
|
||||
}
|
||||
|
||||
if (volume >= SPEECH_START_THRESHOLD) {
|
||||
if (!hasSpeechInSegmentRef.current) {
|
||||
@@ -465,8 +470,6 @@ export default function App() {
|
||||
} else if (volume > SILENCE_THRESHOLD) {
|
||||
silenceStartedAtRef.current = null
|
||||
}
|
||||
|
||||
animationFrameRef.current = requestAnimationFrame(monitorMicrophone)
|
||||
}
|
||||
|
||||
async function activateAutoListening() {
|
||||
@@ -495,22 +498,31 @@ export default function App() {
|
||||
analyser.fftSize = 2048
|
||||
analyser.smoothingTimeConstant = 0.85
|
||||
const sourceNode = audioContext.createMediaStreamSource(stream)
|
||||
const processorNode = audioContext.createScriptProcessor(2048, 1, 1)
|
||||
const monitorGain = audioContext.createGain()
|
||||
monitorGain.gain.value = 0
|
||||
processorNode.onaudioprocess = (event) => {
|
||||
const channelData = event.inputBuffer.getChannelData(0)
|
||||
const volume = calculateVolume(channelData)
|
||||
handleMicrophoneLevel(volume)
|
||||
}
|
||||
|
||||
sourceNode.connect(analyser)
|
||||
sourceNode.connect(processorNode)
|
||||
processorNode.connect(monitorGain)
|
||||
analyser.connect(monitorGain)
|
||||
monitorGain.connect(audioContext.destination)
|
||||
|
||||
audioContextRef.current = audioContext
|
||||
analyserRef.current = analyser
|
||||
sourceNodeRef.current = sourceNode
|
||||
processorNodeRef.current = processorNode
|
||||
monitorGainRef.current = monitorGain
|
||||
mediaStreamRef.current = stream
|
||||
setIsAutoListening(true)
|
||||
setVoiceStatus('Micro actif. Parle quand tu veux, j’enverrai après 2,5 s de silence.')
|
||||
pushAudioDebug(`Piste micro active=${stream.getAudioTracks()[0]?.readyState || 'inconnue'}`)
|
||||
await startSegmentRecording()
|
||||
monitorMicrophone()
|
||||
} catch {
|
||||
pushAudioDebug('Accès micro refusé ou indisponible')
|
||||
setVoiceStatus('Accès au micro refusé ou indisponible.')
|
||||
|
||||
Reference in New Issue
Block a user