tutorial, new prompts

pull/1/head
Michael Nguyen 2018-10-03 16:20:50 -05:00
parent 0002dbf10c
commit 0e28b183e4
20 changed files with 30532 additions and 226 deletions

5
.gitignore vendored
View File

@ -25,8 +25,9 @@ yarn-error.log*
# my ignores
*.json
db/
prompts/
audio_files/
tmp/
.vscode
logs
logs
__pycache__
build/

View File

@ -0,0 +1,18 @@
# Mimic Recording Studio
Welcome to the Mimic Recording Studio.
## Quick Start
### Dependencies
* [Docker](https://docs.docker.com/) (community edition is fine)
* [Docker-compose](https://docs.docker.com/compose/install/)
### Build and Rum
* `git clone `
* `cd mimic-recording-studio`
* `docker-compose up`
* In browser go to `http://localhost:3000`
**Note**
First run will require

View File

@ -1,100 +1 @@
from flask import Flask, jsonify, request
from flask.views import MethodView
from flask_cors import CORS
from .api import UserAPI, PromptAPI, AudioAPI
app = Flask(__name__)
CORS(app)
user_api = UserAPI()
audio_api = AudioAPI()
prompt_api = PromptAPI()
class Users(MethodView):
def get(self):
uuid = request.args.get('uuid')
user = user_api.get_user(uuid)
if user.success:
return jsonify(success=True, message="success", data=user.data)
else:
return jsonify(success=False, message=user.message)
def post(self):
user = request.get_json(force=True)
res = user_api.save_user(user)
if res.success:
return jsonify(success=True, message="succesfully saved user")
else:
return jsonify(success=False, message=res.message)
class Audio(MethodView):
def save_audio(self, uuid: str, prompt: str, data: bytes) -> jsonify:
res = audio_api.save_audio(data, uuid, prompt)
if res.success:
return jsonify(success=True, message="sucessfully saved audio")
else:
return jsonify(
success=False,
message="did not sucessfully save audio"
)
def get_audio_len(self, data: bytes) -> jsonify:
res = audio_api.get_audio_len(data)
if res.success:
return jsonify(success=True, data=res.data)
else:
return jsonify(success=False, message="error occured in server")
def post(self):
data = request.data
uuid = request.args.get('uuid')
prompt = request.args.get('prompt')
get_len = request.args.get('get_len')
print("got it")
if uuid and prompt:
return self.save_audio(uuid, prompt, data)
elif uuid and get_len:
return self.get_audio_len(data)
else:
return jsonify(
success=False,
message="missing prompt or uuid query param"
)
class Prompts(MethodView):
def get(self):
uuid = request.args.get('uuid')
prompts = prompt_api.get_prompt(uuid)
if prompts.success:
return jsonify(success=True, data=prompts.data)
else:
return jsonify(success=False, messsage="failed to get prompt")
# registering apis
user_view = Users.as_view('user')
app.add_url_rule(
'/api/user/',
view_func=user_view,
methods=['POST', 'GET']
)
audio_view = Audio.as_view('audio')
app.add_url_rule(
'/api/audio/',
view_func=audio_view,
methods=['POST', 'GET']
)
prompt_view = Prompts.as_view('prompt')
app.add_url_rule(
'/api/prompt/',
view_func=prompt_view,
methods=['GET']
)
from .app import app

View File

@ -1,7 +1,7 @@
import os
from .db import DB
from .protocol import response
from .file_system import AudioFS, temp_path
from .file_system import AudioFS, PromptsFS, temp_path
from .audio import Audio
import random
@ -80,6 +80,8 @@ class AudioAPI:
class PromptAPI:
"""API to get prompts"""
prompt_fs = PromptsFS()
def __init__(self):
self.user_api = UserAPI()
@ -87,7 +89,8 @@ class PromptAPI:
user = self.user_api.get_user(uuid)
if user.success:
prompt_num = user.data["prompt_num"]
res = DB.get_prompt(prompt_num)
# res = DB.get_prompt(prompt_num)
res = PromptAPI.prompt_fs.get(prompt_num)
if res.success:
return response(True, data=res.data)

100
backend/app/app.py Normal file
View File

@ -0,0 +1,100 @@
from flask import Flask, jsonify, request
from flask.views import MethodView
from flask_cors import CORS
from .api import UserAPI, PromptAPI, AudioAPI
app = Flask(__name__)
CORS(app)
user_api = UserAPI()
audio_api = AudioAPI()
prompt_api = PromptAPI()
class Users(MethodView):
def get(self):
uuid = request.args.get('uuid')
user = user_api.get_user(uuid)
if user.success:
return jsonify(success=True, message="success", data=user.data)
else:
return jsonify(success=False, message=user.message)
def post(self):
user = request.get_json(force=True)
res = user_api.save_user(user)
if res.success:
return jsonify(success=True, message="succesfully saved user")
else:
return jsonify(success=False, message=res.message)
class Audio(MethodView):
def save_audio(self, uuid: str, prompt: str, data: bytes) -> jsonify:
res = audio_api.save_audio(data, uuid, prompt)
if res.success:
return jsonify(success=True, message="sucessfully saved audio")
else:
return jsonify(
success=False,
message="did not sucessfully save audio"
)
def get_audio_len(self, data: bytes) -> jsonify:
res = audio_api.get_audio_len(data)
if res.success:
return jsonify(success=True, data=res.data)
else:
return jsonify(success=False, message="error occured in server")
def post(self):
data = request.data
uuid = request.args.get('uuid')
prompt = request.args.get('prompt')
get_len = request.args.get('get_len')
print("got it")
if uuid and prompt:
return self.save_audio(uuid, prompt, data)
elif uuid and get_len:
return self.get_audio_len(data)
else:
return jsonify(
success=False,
message="missing prompt or uuid query param"
)
class Prompts(MethodView):
def get(self):
uuid = request.args.get('uuid')
prompts = prompt_api.get_prompt(uuid)
if prompts.success:
return jsonify(success=True, data=prompts.data)
else:
return jsonify(success=False, messsage="failed to get prompt")
# registering apis
user_view = Users.as_view('user')
app.add_url_rule(
'/api/user/',
view_func=user_view,
methods=['POST', 'GET']
)
audio_view = Audio.as_view('audio')
app.add_url_rule(
'/api/audio/',
view_func=audio_view,
methods=['POST', 'GET']
)
prompt_view = Prompts.as_view('prompt')
app.add_url_rule(
'/api/prompt/',
view_func=prompt_view,
methods=['GET']
)

View File

@ -5,7 +5,6 @@
import os
import datetime
from .protocol import response
from .file_system import PromptsFS
from .audio import Audio
from peewee import (
Model, SqliteDatabase, CharField, IntegerField,
@ -74,8 +73,6 @@ class AudioModel(Model):
mimic_studio_db.connect()
mimic_studio_db.create_tables([UserModel, AudioModel])
prompt_fs = PromptsFS()
class DB:
"""DB layer"""
@ -149,14 +146,14 @@ class DB:
print(e)
return response(False, message="Exception thrown, check logs")
# TODO: should we add prompt in database?
@staticmethod
def get_prompt(prompt_num: int) -> response:
prompt = prompt_fs.get(prompt_num)
if prompt:
data = {
"prompt": prompt
}
return response(True, data=data)
else:
return response(False)
# # TODO: should we add prompt in database?
# @staticmethod
# def get_prompt(prompt_num: int) -> response:
# prompt = prompt_fs.get(prompt_num)
# if prompt:
# data = {
# "prompt": prompt
# }
# return response(True, data=data)
# else:
# return response(False)

View File

@ -5,6 +5,7 @@ import csv
import hashlib
import subprocess
from subprocess import DEVNULL
from .protocol import response
prompts_dir = prompts_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
@ -13,7 +14,7 @@ prompts_dir = prompts_path = os.path.join(
os.makedirs(prompts_dir, exist_ok=True)
prompts_path = os.path.join(
prompts_dir,
"../prompts/english_prompts.csv"
"../prompts/english_prompts_v2.csv"
)
audio_dir = os.path.join(
@ -75,11 +76,15 @@ class PromptsFS:
with open(prompts_path, 'r') as f:
prompts = csv.reader(f, delimiter="\t")
for p in prompts:
self.data.append(p[2])
self.data.append(p[0])
def get(self, prompt_number: int) -> str:
def get(self, prompt_number: int) -> response:
try:
return self.data[prompt_number]
d = {
"prompt": self.data[prompt_number],
"total_prompt": len(self.data)
}
return response(True, data=d)
except IndexError as e:
# TODO: loggin
print(e)

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
{
"name": "rebuild_new_recording_studio",
"name": "mimic-recording-studio",
"version": "0.1.0",
"private": true,
"devDependencies": {

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.8 KiB

View File

@ -9,7 +9,7 @@
homescreen on Android. See https://developers.google.com/web/fundamentals/engage-and-retain/web-app-manifest/
-->
<link rel="manifest" href="%PUBLIC_URL%/manifest.json">
<link rel="shortcut icon" href="%PUBLIC_URL%/favicon.ico">
<link rel="shortcut icon" href="%PUBLIC_URL%/mycroft-favicon.ico">
<!-- style links for icons and font -->
<link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:400,500,700,900">
<link rel="stylesheet" href="https://use.fontawesome.com/releases/v5.2.0/css/all.css" integrity="sha384-hWVjflwFxL6sNzntih27bfxkr27PmbbK/iSvJ+a4+0owXq79v+lsFkW54bOGbiDQ" crossorigin="anonymous">
@ -23,7 +23,7 @@
work correctly both with client-side routing and a non-root public URL.
Learn how to configure a non-root public URL by running `npm run build`.
-->
<title>React App</title>
<title>Mimic Recording Studio</title>
</head>
<body>
<noscript>

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

View File

@ -63,7 +63,7 @@ h2 {
font-size: 1em;
font-variant: small-caps;
/* text-transform: uppercase; */
font-weight: 500;
font-weight: 700;
/* display: inline; */
}
@ -432,33 +432,57 @@ input::placeholder {
}
.feedback-ball-red {
background-color: red;
width: 30px;
background-color: rgb(228, 63, 70);
width: 100px;
height: 30px;
border-radius: 50%;
border-radius: 5px;
position: absolute;
top: 10px;
right: 10px;
font-size: large;
color: white;
}
.feedback-ball-green {
background-color: green;
width: 30px;
background-color: rgb(37, 162, 78);
width: 100px;
height: 30px;
border-radius: 50%;
border-radius: 5px;
position: absolute;
top: 10px;
right: 10px;
font-size: large;
color: white;
}
.feedback-ball-grey {
/* .feedback-ball-grey {
background-color: grey;
width: 30px;
width: 100px;
height: 30px;
border-radius: 50%;
border-radius: 5px;
position: absolute;
top: 10px;
right: 10px;
} */
.feedback-ball-red-t {
background-color: rgb(228, 63, 70);
width: 100px;
height: 30px;
border-radius: 5px;
font-size: large;
color: white;
text-align: center;
}
.feedback-ball-green-t {
background-color: rgb(37, 162, 78);
width: 100px;
height: 30px;
border-radius: 5px;
font-size: large;
color: white;
text-align: center;
}
#instructions {
@ -468,6 +492,14 @@ input::placeholder {
width: 20vw;
}
.grid-layout {
display: grid;
grid-template-columns: auto auto auto;
align-items: center;
justify-items: center;
margin: 2em 0 2em 0;
}
/* Holds recording widgets */
#container {
@ -572,3 +604,13 @@ canvas {
margin-right: 8px;
/* margin-bottom: 40px; */
}
/* Tutorial Page */
.tutorial {
/* max-width: 100%; */
margin: 20px 0px 40px 0px;
padding: 40px 50px 40px 50px;
background-color: #f1f3f4;
border-radius: 6px;
/* line-height: 2; */
}

View File

@ -3,6 +3,7 @@ import "./App.css";
import Header from "./Header";
import Intro from "./Intro";
import Record from "./Record";
import Tutorial from "./Tutorial";
import { getUUID, createUUID } from "./api/localstorage";
import { BrowserRouter as Router, Route } from "react-router-dom";
@ -23,6 +24,7 @@ class App extends Component {
<div className="page">
<Route exact path="/" component={Intro} />
<Route path="/record" component={Record} />
<Route path="/tutorial" component={Tutorial} />
</div>
</div>
</Router>

View File

@ -22,7 +22,7 @@ class Intro extends Component {
return (
<div className="App">
<div id="PageIntro">
<h2 style={{ color: "#FD9E66" }}>mimic training studio</h2>
<h2 style={{ color: "#FD9E66" }}>Mimic Recording Studio</h2>
<h1>Help us build the voice(s) of Mycroft!</h1>
<p>
Mycroft's open source Mimic technologies are Text-to-Speech engines,
@ -32,7 +32,7 @@ class Intro extends Component {
sounding like the voice on which it was trained.
</p>
<p>
This website simplifies the collection of training data from
The Mimic Recording Studio simplifies the collection of training data from
individuals, each of which can be used to produce a distinct voice
for Mimic.
</p>
@ -87,11 +87,11 @@ class Intro extends Component {
{getName() ? this.renderWelcomeBackMsg() : this.renderInput()}
<div className="btn_PageIntro">
<button
// to="/record"
id="btn_PageIntro"
className="btn"
onClick={this.handleTrainMimicBtn}
>
<div className="btn">train mimic</div>
Record
</button>
</div>
</div>
@ -102,7 +102,7 @@ class Intro extends Component {
renderInput = () => {
return (
<div>
<p>To get started, enter your name and hit the Train Mimic button.</p>
<p>To get started, enter your name and hit the Record button.</p>
<input
type="text"
id="yourname"

View File

@ -1,6 +1,8 @@
import React, { Component } from "react";
import { ReactMic as Visualizer } from "react-mic";
import Recorder from "./components/Recorder";
import PhraseBox from "./components/PhraseBox";
import Metrics from "./components/Metrics";
import hark from "hark";
import Wave from "./components/Wave";
@ -9,13 +11,7 @@ import spacebarSVG from "./assets/space.svg";
import PSVG from "./assets/P.svg";
import rightSVG from "./assets/right.svg";
import {
postAudio,
getPrompt,
getUser,
createUser,
getAudioLen
} from "./api";
import { postAudio, getPrompt, getUser, createUser, getAudioLen } from "./api";
import { getUUID, getName } from "./api/localstorage";
class Record extends Component {
@ -25,7 +21,7 @@ class Record extends Component {
this.state = {
userCreated: false,
shouldRecord: false,
displayWave: false,
displayWav: false,
blob: undefined,
play: false,
prompt: "...error loading prompt...",
@ -36,8 +32,8 @@ class Record extends Component {
audioLen: 0
};
this.uuid = getUUID()
this.name = getName()
this.uuid = getUUID();
this.name = getName();
}
componentDidMount() {
@ -53,33 +49,25 @@ class Record extends Component {
return (
<div id="PageRecord">
<h1>Mimic Recording Studio</h1>
<TopContainer
userName={this.name}
promptNum={this.state.promptNum}
/>
<TopContainer userName={this.name} promptNum={this.state.promptNum} route={this.props.history.push}/>
<Metrics
totalTime={this.state.totalTime}
totalChar={this.state.totalCharLen}
totalCharLen={this.state.totalCharLen}
promptNum={this.state.promptNum}
totalPrompt={this.state.totalPrompt}
/>
<PhraseBox
prompt={this.state.prompt}
promptNum={this.state.promptNum}
audioLen={this.state.audioLen}
totalCharLen={this.state.totalCharLen}
totalTime={this.state.totalTime}
/>
<div id="phraseBox">
{/* <div className="recordBox">
<p>Click
<img id="record" src={microphoneSVG} alt="" onClick={this.recordHandler} />
and say this phrase:
</p>
</div> */}
<div id="phrase">
{this.renderFeedback()}
{this.state.prompt}
</div>
</div>
<div id="container ">
{this.state.displayWav ? this.renderWave() : this.renderVisualizer()}
<Recorder
command={this.state.shouldRecord ? "start" : "stop"}
onStart={() => this.shouldDisplayWave(false)}
onStart={() => this.shoulddisplayWav(false)}
onStop={this.processBlob}
gotStream={this.silenceDetection}
/>
@ -105,7 +93,8 @@ class Record extends Component {
.then(res => {
if (res.success) {
this.setState({
prompt: res.data.prompt
prompt: res.data.prompt,
totalPrompt: res.data.total_prompt
});
}
});
@ -125,43 +114,28 @@ class Record extends Component {
});
this.requestPrompts(this.uuid);
} else {
if (this.uuid){
if (this.uuid) {
createUser(this.uuid, this.name)
.then(res => res.json())
.then(res => {
if (res.success) {
this.setState({userCreated: true})
this.setState({ userCreated: true });
this.requestPrompts(this.uuid);
} else {
alert("sorry there is in error creating user")
alert("sorry there is in error creating user");
}
})
});
} else {
alert("sorry there is in error creating user")
alert("sorry there is in error creating user");
}
}
});
};
renderFeedback = () => {
if (this.state.promptNum < 10 || this.state.audioLen === 0) {
return <div className="feedback-ball-grey"></div>
}
else {
const speechRate = this.state.prompt.length / this.state.audioLen
const avgSpeechRate = (this.state.totalCharLen / this.state.totalTime).toFixed(1)
if ((avgSpeechRate * 0.9) < speechRate && speechRate < (avgSpeechRate * 1.1)) {
return <div className="feedback-ball-green"></div>
} else {
return <div className="feedback-ball-red"></div>
}
}
}
renderWave = () => (
<Wave
className="wavedisplay"
waveColor={"#FD9E66"}
waveColor="#FD9E66"
blob={this.state.blob}
play={this.state.play}
onFinish={this.stopWav}
@ -180,17 +154,18 @@ class Record extends Component {
processBlob = blob => {
getAudioLen(this.uuid, blob)
.then(res => res.json())
.then(res => this.setState({
audioLen: res.data.audio_len
}))
.then(res =>
this.setState({
audioLen: res.data.audio_len
})
);
this.setState({
blob: blob
});
this.shouldDisplayWave(true);
this.shoulddisplayWav(true);
};
shouldDisplayWave = bool => this.setState({ displayWav: bool });
shoulddisplayWav = bool => this.setState({ displayWav: bool });
playWav = () => this.setState({ play: true });
@ -218,12 +193,14 @@ class Record extends Component {
};
recordHandler = () => {
this.setState((state, props) => {
return {
shouldRecord: !state.shouldRecord,
play: false
};
});
setTimeout(() => {
this.setState((state, props) => {
return {
shouldRecord: !state.shouldRecord,
play: false
};
});
}, 200);
};
onNext = () => {
@ -235,7 +212,10 @@ class Record extends Component {
this.setState({ displayWav: false });
this.requestPrompts(this.uuid);
this.requestUserDetails(this.uuid);
this.setState({ blob: undefined });
this.setState({
blob: undefined,
audioLen: 0
});
} else {
alert("There was an error in saving that audio");
}
@ -245,7 +225,10 @@ class Record extends Component {
};
silenceDetection = stream => {
const options = {};
const options = {
interval: "150",
threshold: -80
};
const speechEvents = hark(stream, options);
speechEvents.on("stopped_speaking", () => {
@ -262,7 +245,7 @@ class TopContainer extends Component {
<div className="top-container">
<div className="instructions2">
<i className="fas fa-info-circle" />
<h2>hints</h2>
<h2>HINTS</h2>
<ul className="hints">
<li>
<img src={spacebarSVG} className="key-icon" alt="space" /> will
@ -282,11 +265,11 @@ class TopContainer extends Component {
<div className="session-info">
<div className="top-info">
<div>
Recorder:&nbsp;
<h2>RECORDER</h2>
&nbsp;
<span id="sessionName">{this.props.userName}</span>
</div>
<div className="btn-restart">
</div>
<div className="btn-restart" />
</div>
<hr />
<p>
@ -297,27 +280,14 @@ class TopContainer extends Component {
. If you accidentally deviate from the script or are unsure, please
record the prompt again.
</p>
<button className="btn" onClick={this.handleClick}>Tutorial</button>
</div>
</div>
);
}
}
class Metrics extends Component {
render() {
return (
<div className="metrics-container">
<div className="total-hours">
<h4>Progress</h4>
<div>Phrase: {this.props.promptNum} / 23320</div>
<div>Time Recorded: {Math.round(this.props.totalTime)} seconds</div>
</div>
<div className="speech-rate">
<h4>Speech Rate</h4>
<div>Average: {(this.props.totalChar / this.props.totalTime).toFixed(1)} characters per seconds</div>
</div>
</div>
);
handleClick = () => {
this.props.route("/tutorial")
}
}

View File

@ -0,0 +1,79 @@
import React, { Component } from "react";
import PhraseBox from "./components/PhraseBox";
import { ReactMic as Visualizer } from "react-mic";
import spacebarSVG from "./assets/space.svg";
import PSVG from "./assets/P.svg";
import rightSVG from "./assets/right.svg";
class Tutorial extends Component {
render() {
return (
<div className="App">
<h1>Tutorial</h1>
<div className="tutorial">
The Mimic Recording Studio was made to simplify the process of
creating your own text to speech corpus. This tutorial will help you
get started.
<br />
<br />
<div>
<h2>Recording Box</h2>
<PhraseBox
prompt="The human voice is the most perfect instrument of all."
promptNum={0}
audioLen={0}
totalCharLen={0}
totalTime={0}
/>
<Visualizer
className="wavedisplay"
record={false}
backgroundColor="#222222"
strokeColor="#FD9E66"
/>
<p>
In the middle, you can see the phrase to record. To start the
recording press the &nbsp;
<img src={spacebarSVG} className="key-icon" alt="space" /> &nbsp;
bar. The recording should automatically stop when it detects
silence. To replay the recording press the &nbsp;
<img src={PSVG} className="key-icon" alt="p" /> &nbsp;
key. You may re-record
that same phrase as many times as you like. <b>It is essential that the
recorded words match the text in the script exactly. </b> If you
accidentally deviate from the script or are unsure, please record
the prompt again. Once saved you may not be able to go back. Press
the &nbsp;
<img src={rightSVG} className="key-icon" alt="->" />&nbsp;
key to keep the recording and move on to the next phrase.
</p>
</div>
<div>
<h2>Feedback</h2>
<p>
When recording stops, you may notice a feedback indicator appearing in the top right corner of the Recording Box. This indicator will tell you if you are speaking to fast, to slow, or at a good pace.
</p>
<div className="grid-layout">
<div className="feedback-ball-green-t">Good Pace</div>
<div className="feedback-ball-red-t">To Slow</div>
<div className="feedback-ball-red-t">To Fast</div>
</div>
<p><b>The indicator is determined using your average speech rate. The indicator will only start appearing after 100 recorded samples.
</b>
</p>
</div>
<div>
<button className="btn" onClick={this.handleClick}>Record</button>
</div>
</div>
</div>
);
}
handleClick = () => {
this.props.history.push("/record")
}
}
export default Tutorial;

View File

@ -0,0 +1,47 @@
import React, { Component } from "react";
import PropTypes from 'prop-types';
class Metrics extends Component {
render() {
let charPerSec = (this.props.totalCharLen / this.props.totalTime).toFixed(1);
charPerSec = isNaN(charPerSec) ? 0 : charPerSec;
return (
<div className="metrics-container">
<div className="total-hours">
<h2>PROGRESS</h2>
<div>
Phrase: {this.props.promptNum} / {this.props.totalPrompt}
</div>
<div>
Time Recorded: {this.secondsToHms(Math.round(this.props.totalTime))}
</div>
</div>
<div className="speech-rate">
<h2>SPEECH RATE</h2>
<div>Overall Average: {charPerSec} characters per second</div>
</div>
</div>
);
}
secondsToHms = d => {
d = Number(d);
var h = Math.floor(d / 3600);
var m = Math.floor((d % 3600) / 60);
var s = Math.floor((d % 3600) % 60);
var hDisplay = h > 0 ? h + (h == 1 ? " hour, " : " hours, ") : "";
var mDisplay = m > 0 ? m + (m == 1 ? " minute, " : " minutes, ") : "";
var sDisplay = s > 0 ? s + (s == 1 ? " second" : " seconds") : "";
return hDisplay + mDisplay + sDisplay;
};
}
Metrics.propTypes = {
promptNum: PropTypes.number,
totalCharLen: PropTypes.number,
totalTime: PropTypes.number,
totalPrompt: PropTypes.number
}
export default Metrics;

View File

@ -0,0 +1,60 @@
import React, { Component } from "react";
import PropTypes from 'prop-types';
class PhraseBox extends Component {
render() {
return (
<div id="phraseBox">
<div id="phrase">
{this.renderFeedback()}
{this.props.prompt}
</div>
</div>
);
}
renderFeedback = () => {
if (this.props.promptNum < 20 || this.props.audioLen === 0) {
return ""
} else {
const speechRate = this.props.prompt.length / this.props.audioLen;
const avgSpeechRate = (
this.props.totalCharLen / this.props.totalTime
).toFixed(1);
// allow deviation of 25% of speechRate from average speech rate
if (this.determinePace(avgSpeechRate, speechRate)) {
return <div className="feedback-ball-green">Good Pace</div>;
} else if (speechRate < avgSpeechRate) {
return <div className="feedback-ball-red">To Slow</div>;
} else {
return <div className="feedback-ball-red">To Fast</div>;
}
}
};
determinePace = (avgSpeechRate, speechRate) => {
if (this.props.prompt.length <= 25) {
return (
avgSpeechRate * 0.5 < speechRate && speechRate < avgSpeechRate * 1.5
);
} else if (this.props.prompt.length <= 125) {
return (
avgSpeechRate * 0.75 < speechRate && speechRate < avgSpeechRate * 1.25
);
} else {
return (
avgSpeechRate * 0.85 < speechRate && speechRate < avgSpeechRate * 1.15
);
}
};
}
PhraseBox.propTypes = {
prompt: PropTypes.string,
promptNum: PropTypes.number,
audioLen: PropTypes.number,
totalCharLen: PropTypes.number,
totalTime: PropTypes.number
}
export default PhraseBox;

View File

@ -1,5 +1,6 @@
import React, { Component } from "react";
import WaveSurfer from "wavesurfer.js";
import PropTypes from 'prop-types';
class Wave extends Component {
componentDidMount() {
@ -36,4 +37,12 @@ class Wave extends Component {
};
}
Wave.propTypes = {
className: PropTypes.string,
waveColor: PropTypes.string,
blob: PropTypes.blob,
play: PropTypes.bool,
onFinish: PropTypes.func
}
export default Wave;