Week 12. 26/10/2020
This week I had to do:
- Optimize Acustic Teleoperator including a Vumeter –> I have some problems with the volumeter in my audio.js code, this is because the scream of audio I use in my recognizer (teachable machine) is diferent of the volumeter stream, also when i want that the recognizer stream stops its recognition, it doesn’t work. I don´t know why. I try other options like setTimeout(function regonizer,seconds) and wait some seconds and then recognizer.stopListening() and it doesn’t work and with other options sometimes the console showed me the same error.
Here I explain the problems:
this is my audio.js code:
document.addEventListener('robot-loaded', (evt)=>{
localRobot = evt.detail;
console.log(localRobot);
});
// more documentation available at
// https://github.com/tensorflow/tfjs-models/tree/master/speech-commands
// the link to your model provided by Teachable Machine export panel
const URL = "https://teachablemachine.withgoogle.com/models/P3XdF5r5d/";
var connection = false;
async function createModel() {
const checkpointURL = URL + "model.json"; // model topology
const metadataURL = URL + "metadata.json"; // model metadata
const recognizer = speechCommands.create(
"BROWSER_FFT", // fourier transform type, not useful to change
undefined, // speech commands vocabulary feature, not useful for your models
checkpointURL,
metadataURL);
// check that model and metadata are loaded via HTTPS requests.
await recognizer.ensureModelLoaded();
return recognizer;
}
function analizar(recognizer, classLabels, word, average){
if (Math.round(average - 40) <= 0 && recognizer.isListening()) {
console.log('PARA DE RECONCOCER');
recognizer.stopListening();
}else if (Math.round(average - 40) > 0) {
recognizer.listen(result => {
const scores = result.scores; // probability of prediction for each class
var word_index = 0;
// render the probability scores per class
for (let i = 0; i < classLabels.length; i++) {
const classPrediction = classLabels[i] + ": " + result.scores[i].toFixed(2);
//labelContainer.childNodes[i].innerHTML = classPrediction;
//The most probable word
if (result.scores[i].toFixed(2) >= result.scores[word_index].toFixed(2)) {
word_index = i;
}
}
var prediction = classLabels[word_index];
word.innerHTML = "The most probable word: " + prediction;
if (connection){
if (String(prediction) == "Go") {
localRobot.setV(0.9);
//que se mueva el robot
}else if (String(prediction) == "Stop") {
localRobot.setV(0);
localRobot.setW(0);
localRobot.setL(0);
//que se pare
}else if (String(prediction) == "Back"){
localRobot.setV(-0.9);
}else if (String(prediction) == "Right"){
localRobot.setW(-0.005);
}else if (String(prediction) == "Left"){
localRobot.setW(0.005);
}else if (String(prediction) == "Up"){
localRobot.setL(0.25);
}else if (String(prediction) == "Down"){
console.log(localRobot.velocity);
if (localRobot.velocity.y = 0.25) {
localRobot.setL(-0.25);
}
}
//For stop going down or going up, turn left or turn right say the word : STOP
}
console.log(prediction);
}, {
includeSpectrogram: true, // in case listen should return result.spectrogram
probabilityThreshold: 0.75,
invokeCallbackOnNoiseAndUnknown: true,
overlapFactor: 0.50 // probably want between 0.5 and 0.75. More info in README
});
}
}
async function listen() {
navigator.getUserMedia = navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia;
if (navigator.getUserMedia) {
//--------TEACHABLE MACHINE--------------
const recognizer = await createModel();
const classLabels = recognizer.wordLabels(); // get class labels
//const labelContainer = document.getElementById("label-container");
const word = document.getElementById("prediction");
// for (let i = 0; i < classLabels.length; i++) {
// labelContainer.appendChild(document.createElement("div"));
//}
// listen() takes two arguments:
// 1. A callback function that is invoked anytime a word is recognized.
// 2. A configuration object with adjustable fields
//-------------------
navigator.getUserMedia({
audio: true
},
function(stream) {
audioContext = new AudioContext();
analyser = audioContext.createAnalyser();
microphone = audioContext.createMediaStreamSource(stream);
javascriptNode = audioContext.createScriptProcessor(2048, 1, 1);
analyser.smoothingTimeConstant = 0.8;
analyser.fftSize = 1024;
microphone.connect(analyser);
analyser.connect(javascriptNode);
javascriptNode.connect(audioContext.destination);
canvasContext = document.getElementById("canvas").getContext("2d");
javascriptNode.onaudioprocess = function() {
var array = new Uint8Array(analyser.frequencyBinCount);
analyser.getByteFrequencyData(array);
var values = 0;
var length = array.length;
for (var i = 0; i < length; i++) {
values += (array[i]);
}
var average = values / length;
analizar(recognizer, classLabels, word, average);
canvasContext.clearRect(0, 0, 150, 300);
canvasContext.fillStyle = '#BadA55';
canvasContext.fillRect(0, 300 - average, 150, 300);
canvasContext.fillStyle = '#262626';
canvasContext.font = "48px impact";
canvasContext.fillText(Math.round(average - 40), -2, 300);
} // end fn stream
},
function(err) {
console.log("The following error occured: " + err.name)
});
} else {
console.log("getUserMedia not supported");
}
}
async function Connect_To_Robot() {
if (connection){connection= false;
document.getElementById("connection").style.backgroundColor= "red";
console.log( "Desconectado");
}else{
connection = true;
console.log( "CONECTADO AL ROBOT");
document.getElementById("connection").style.backgroundColor= "green";
}
}
- Gamming in my D3 record a video with the works of Async & Sync competive exercises for example the competition of F1. –> We change this goal because the server of competitions doesn’t work because of the update of the webworkers, instead of that I had to study the code of the webserver and try it works. –> I have trying to understand the code. This week I will try to get it going.
The next week goals are:
- Optimize Acustic Teleoperator including a Vumeter/Studing Teacheble Machine and how works recognizer.listen()
- Server Gamming in my D3 Async competive exercises