Add voice

This commit is contained in:
2026-04-05 09:59:56 +02:00
parent 887e9919a1
commit 1ca69e94c1

View File

@@ -35,6 +35,17 @@ function ProgressCard({ item }) {
)
}
function getSupportedRecordingMimeType() {
if (typeof MediaRecorder === 'undefined') return ''
const candidates = [
'audio/webm;codecs=opus',
'audio/webm',
'audio/mp4',
'audio/ogg;codecs=opus',
]
return candidates.find((type) => MediaRecorder.isTypeSupported(type)) || ''
}
export default function App() {
const [students, setStudents] = useState([])
const [selectedStudentId, setSelectedStudentId] = useState('')
@@ -45,10 +56,17 @@ export default function App() {
const [assessment, setAssessment] = useState(null)
const [assessmentAnswer, setAssessmentAnswer] = useState('')
const [speaking, setSpeaking] = useState(false)
const [isRecording, setIsRecording] = useState(false)
const [isTranscribing, setIsTranscribing] = useState(false)
const [voiceStatus, setVoiceStatus] = useState('')
const recognitionRef = useRef(null)
const mediaRecorderRef = useRef(null)
const mediaStreamRef = useRef(null)
const recordedChunksRef = useRef([])
const recordingMimeTypeRef = useRef('')
const selectedStudent = useMemo(
() => students.find((s) => String(s.id) === String(selectedStudentId)),
() => students.find((student) => String(student.id) === String(selectedStudentId)),
[students, selectedStudentId]
)
@@ -62,6 +80,15 @@ export default function App() {
}
}, [selectedStudentId])
useEffect(() => {
return () => {
if (recognitionRef.current) {
recognitionRef.current.abort()
}
stopMediaStream()
}
}, [])
async function loadStudents() {
const res = await fetch(`${API_BASE}/students`)
const data = await res.json()
@@ -157,12 +184,113 @@ export default function App() {
window.speechSynthesis.speak(utterance)
}
function startVoiceInput() {
const Recognition = window.SpeechRecognition || window.webkitSpeechRecognition
if (!Recognition) {
alert('La reconnaissance vocale du navigateur n\'est pas disponible ici.')
function stopMediaStream() {
if (mediaStreamRef.current) {
mediaStreamRef.current.getTracks().forEach((track) => track.stop())
mediaStreamRef.current = null
}
mediaRecorderRef.current = null
recordedChunksRef.current = []
recordingMimeTypeRef.current = ''
}
async function transcribeRecording(audioBlob, mimeType) {
const extension = mimeType.includes('mp4') ? 'mp4' : mimeType.includes('ogg') ? 'ogg' : 'webm'
const formData = new FormData()
formData.append('file', new File([audioBlob], `voice-input.${extension}`, { type: mimeType || 'audio/webm' }))
setIsTranscribing(true)
setVoiceStatus('Transcription en cours...')
try {
const res = await fetch(`${API_BASE}/transcribe`, {
method: 'POST',
body: formData,
})
if (!res.ok) {
const error = await res.json().catch(() => ({}))
throw new Error(error.detail || 'La transcription a échoué.')
}
const data = await res.json()
setInput(data.text || '')
setVoiceStatus(data.text ? 'Texte dicté prêt à être envoyé.' : 'Aucun texte reconnu.')
} catch (error) {
setVoiceStatus(error.message || 'Impossible de transcrire cet enregistrement.')
} finally {
setIsTranscribing(false)
}
}
async function startRecordedVoiceInput() {
if (!navigator.mediaDevices?.getUserMedia || typeof MediaRecorder === 'undefined') {
setVoiceStatus('La dictée vocale nest pas prise en charge par ce navigateur.')
return
}
const mimeType = getSupportedRecordingMimeType()
if (!mimeType) {
setVoiceStatus('Aucun format audio compatible nest disponible dans ce navigateur.')
return
}
try {
const stream = await navigator.mediaDevices.getUserMedia({ audio: true })
const recorder = new MediaRecorder(stream, { mimeType })
mediaStreamRef.current = stream
mediaRecorderRef.current = recorder
recordedChunksRef.current = []
recordingMimeTypeRef.current = mimeType
recorder.ondataavailable = (event) => {
if (event.data && event.data.size > 0) {
recordedChunksRef.current.push(event.data)
}
}
recorder.onstop = async () => {
const finalMimeType = recordingMimeTypeRef.current || mimeType
const audioBlob = new Blob(recordedChunksRef.current, { type: finalMimeType })
stopMediaStream()
setIsRecording(false)
if (audioBlob.size > 0) {
await transcribeRecording(audioBlob, finalMimeType)
} else {
setVoiceStatus('Aucun son détecté. Réessaie en parlant plus près du micro.')
}
}
recorder.onerror = () => {
setVoiceStatus('Le navigateur a rencontré une erreur pendant lenregistrement.')
setIsRecording(false)
stopMediaStream()
}
recorder.start()
setIsRecording(true)
setVoiceStatus('Enregistrement en cours... clique à nouveau pour arrêter.')
} catch {
setVoiceStatus('Accès au micro refusé ou indisponible.')
setIsRecording(false)
stopMediaStream()
}
}
function stopRecordedVoiceInput() {
const recorder = mediaRecorderRef.current
if (recorder && recorder.state !== 'inactive') {
recorder.stop()
setVoiceStatus('Finalisation de lenregistrement...')
}
}
function startBrowserRecognition() {
const Recognition = window.SpeechRecognition || window.webkitSpeechRecognition
if (!Recognition) return false
const recognition = new Recognition()
recognition.lang = 'fr-FR'
recognition.interimResults = false
@@ -170,17 +298,36 @@ export default function App() {
recognition.onresult = (event) => {
const transcript = event.results[0][0].transcript
setInput(transcript)
setVoiceStatus('Texte dicté prêt à être envoyé.')
}
recognition.onerror = () => {
setVoiceStatus('La reconnaissance vocale du navigateur a échoué. Essaie lenregistrement audio.')
}
recognition.onstart = () => {
setVoiceStatus('Écoute en cours...')
}
recognition.onerror = () => {}
recognitionRef.current = recognition
recognition.start()
return true
}
async function startVoiceInput() {
if (isRecording) {
stopRecordedVoiceInput()
return
}
const startedNativeRecognition = startBrowserRecognition()
if (!startedNativeRecognition) {
await startRecordedVoiceInput()
}
}
return (
<div className="layout">
<aside className="sidebar card">
<h2>Élève</h2>
<select value={selectedStudentId} onChange={(e) => setSelectedStudentId(e.target.value)}>
<select value={selectedStudentId} onChange={(event) => setSelectedStudentId(event.target.value)}>
<option value="">Choisir un élève</option>
{students.map((student) => (
<option key={student.id} value={student.id}>
@@ -193,16 +340,16 @@ export default function App() {
<input
placeholder="Prénom"
value={form.first_name}
onChange={(e) => setForm({ ...form, first_name: e.target.value })}
onChange={(event) => setForm({ ...form, first_name: event.target.value })}
/>
<input
type="number"
min="8"
max="12"
value={form.age}
onChange={(e) => setForm({ ...form, age: e.target.value })}
onChange={(event) => setForm({ ...form, age: event.target.value })}
/>
<select value={form.grade} onChange={(e) => setForm({ ...form, grade: e.target.value })}>
<select value={form.grade} onChange={(event) => setForm({ ...form, grade: event.target.value })}>
<option>CE2</option>
<option>CM1</option>
<option>CM2</option>
@@ -249,7 +396,7 @@ export default function App() {
<p>{assessment.question}</p>
<input
value={assessmentAnswer}
onChange={(e) => setAssessmentAnswer(e.target.value)}
onChange={(event) => setAssessmentAnswer(event.target.value)}
placeholder="Ta réponse"
/>
<button type="submit">Valider</button>
@@ -259,13 +406,17 @@ export default function App() {
<form onSubmit={sendMessage} className="composer">
<input
value={input}
onChange={(e) => setInput(e.target.value)}
onChange={(event) => setInput(event.target.value)}
placeholder="Pose une question ou demande une explication..."
/>
<button type="button" onClick={startVoiceInput}>🎤 Dicter</button>
<button type="button" onClick={startVoiceInput} disabled={isTranscribing}>
{isRecording ? 'Arrêter' : isTranscribing ? 'Transcription...' : 'Dicter'}
</button>
<button type="submit">Envoyer</button>
</form>
{voiceStatus && <p className="muted voice-status">{voiceStatus}</p>}
</main>
</div>
)
}
}