Created
May 22, 2023 13:09
-
-
Save ali-sabry/38ad9dba94f51b270096f118b81629ac to your computer and use it in GitHub Desktop.
this hook allows users to control the application with voice commands.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import { useState, useEffect } from "react"; | |
function useSpeechRecognition(commands, callback) { | |
const [recognition, setRecognition] = useState(null); | |
const [transcript, setTranscript] = useState(""); | |
const [error, setError] = useState(""); | |
const [status, setStatus] = useState("idle"); | |
// define an effect that runs once when the component mounts | |
useEffect(() => { | |
// check if the browser supports the SpeechRecognition API | |
if (window.SpeechRecognition || window.webkitSpeechRecognition) { | |
// create a new recognition object using the webkit prefix if needed | |
const newRecognition = new (window.SpeechRecognition || | |
window.webkitSpeechRecognition)(); | |
// set some recognition properties | |
newRecognition.continuous = true; // keep listening until stopped | |
newRecognition.interimResults = true; // show interim results | |
newRecognition.lang = "en-US"; // set the language | |
setRecognition(newRecognition); | |
} else { | |
setError("Speech recognition is not supported by this browser."); | |
} | |
}, []); | |
useEffect(() => { | |
if (recognition) { // check if the recognition object is not null | |
const handleStart = () => { setStatus("listening") }; | |
const handleEnd = () => { setStatus("idle") }; | |
const handleResult = (e) => { // get the transcript of the speech from the event | |
let newTranscript = ""; | |
for (let i = e.resultIndex; i < e.results.length; i++) { | |
newTranscript += e.results[i][0].transcript; | |
} | |
setTranscript(newTranscript); | |
for (let command of commands) { // check if any of the commands matches the transcript | |
if (newTranscript.includes(command)) { | |
recognition.stop(); | |
callback(command); | |
break; | |
} | |
} | |
}; | |
const handleError = (e) => { setError(e.error) }; | |
// add event listeners for the recognition object | |
recognition.addEventListener("start", handleStart); | |
recognition.addEventListener("end", handleEnd); | |
recognition.addEventListener("result", handleResult); | |
recognition.addEventListener("error", handleError); | |
return () => { // return a cleanup function to remove the event listeners | |
recognition.removeEventListener("start", handleStart); | |
recognition.removeEventListener("end", handleEnd); | |
recognition.removeEventListener("result", handleResult); | |
recognition.removeEventListener("error", handleError); | |
}; | |
} | |
}, [recognition]); | |
const startListening = () => { | |
if (recognition && status === "idle") { recognition.start() }; | |
}; | |
const stopListening = () => { | |
if (recognition && status === "listening") { | |
recognition.stop(); | |
} | |
}; | |
return { transcript, error, status, startListening, stopListening }; | |
}; | |
const App = () => { | |
// define a list of commands that are valid for controlling the app | |
const commands = ["play", "pause", "stop", "next", "previous"]; | |
// define a callback function that will be invoked when a command is recognized | |
const handleCommand = (command) => { | |
// perform some action based on the command | |
switch (command) { | |
case "play": | |
// play the audio or video | |
break; | |
case "pause": | |
// pause the audio or video | |
break; | |
case "stop": | |
// stop the audio or video | |
break; | |
case "next": | |
// play the next track or video | |
break; | |
case "previous": | |
// play the previous track or video | |
break; | |
default: | |
// do nothing | |
break; | |
} | |
}; | |
// use the custom hook with the commands and the callback function as arguments | |
const { transcript, error, status, | |
startListening, stopListening } = useSpeechRecognition(commands, handleCommand); | |
return ( | |
<> | |
<h1>Speech Recognition Example</h1> | |
<p>You can control the app with voice commands: play, pause, stop, next, previous.</p> | |
<p>Transcript: {transcript}</p> | |
<p>Error: {error}</p> | |
<p>Status: {status}</p> | |
<button onClick={startListening}>Start listening</button> | |
<button onClick={stopListening}>Stop listening</button> | |
</> | |
); | |
}; | |
export default App; |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment