All files / src/app/touchpad/speech-processing speech-providers.ts

51.85% Statements 28/54
100% Branches 2/2
46.67% Functions 7/15
53.85% Lines 28/52

Press n or j to go to the next uncovered block, b, p or k for the previous block.

1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166                                    1x     1x         1x       5x 5x 5x     5x 5x   5x   5x 5x   5x 5x   54x     5x       5x         5x               6x   6x                 1x                               1x 1x               1x 2x           1x 1x                   1x                                                                                       1x  
import { Language, VoiceCommand, languageIds } from './utils';
 
/**
 * The interface of speech providers in the application.
 * SpeechProviders are objects that provide speech synthesis
 * and recognition functionalities.
 */
export interface SpeechProvider {
  language: Language;
  speechRecognitionSupported(): boolean;
  say(text: string): void;
  addCommand(voiceCommand: VoiceCommand): void;
  removeAllCommands(): void;
}
 
interface AppWindow extends Window {
  [key: string]: any;
}
const appWindow = window as AppWindow;
 
// For future support in Firefox.
const SpeechRecognition = appWindow['SpeechRecognition'] || appWindow['webkitSpeechRecognition'];
 
/**
 * A SpeechProvider using the WebSpeech API to provide speech processing functionalities.
 */
export class WebSpeechProvider implements SpeechProvider {
 
  private _language: Language;
 
  private speechSynthesis = window.speechSynthesis;
  private pitch = 1;
  private rate = 1;
 
  private speechRecognition: SpeechRecognition;
  private voiceCommands: VoiceCommand[] = [];
  private currentCommand = '';
 
  constructor(defaultLanguage: Language) {
    if (SpeechRecognition) {
      this.speechRecognition = new SpeechRecognition();
      this.speechRecognition.continuous = true;
 
      this.speechRecognition.onresult = (event: SpeechRecognitionEvent) => this.processCommand(event);
      this.speechRecognition.onend = _ => {
        if (this.speechRecognition.continuous) {
          this.speechRecognition.start();
        }
      };
      this.speechRecognition.onspeechend = _ => {
        console.log('Speech stopped being recognised...');
      };
 
      this.speechRecognition.start();
    } else {
      console.log('SpeechRecognition unavailable in this browser.');
    }
 
    this.language = defaultLanguage;
  }
 
  get language() {
    return this._language;
  }
 
  set language(language: Language) {
    this._language = language;
    if (this.speechRecognition) {
      this.speechRecognition.lang = languageIds[language];
    }
  }
 
  /**
   * Use speech synthesis to say some input text.
   *
   * @param text The text to say.
   */
  say(text: string) {
    // Interrupt any current speech synthesis.
    this.speechSynthesis.cancel();
 
    const utterance = new SpeechSynthesisUtterance(text);
    utterance.lang = languageIds[this.language];
    utterance.pitch = this.pitch;
    utterance.rate = this.rate;
    this.speechSynthesis.speak(utterance);
  }
 
  /**
   * Determine if speech recognition is supported in the browser being used.
   *
   * @returns A boolean value indicating if speech recognition is supported.
   */
  speechRecognitionSupported() {
    return !!this.speechRecognition;
  }
 
  /**
   * Add a voice command to recognise to the speech recognition.
   *
   * @param voiceCommand The voice command to recognise.
   */
  addCommand(voiceCommand: VoiceCommand) {
    this.voiceCommands.push(voiceCommand);
  }
 
  /**
   * Remove all voice commands from the speech provider.
   */
  removeAllCommands() {
    this.voiceCommands = [];
  }
 
  /**
   * Process a SpeechRecognitionEvent triggered by SpeechRecognition:
   * check if the recognised text matches any of the SpeechProvider's commands
   * and execute the associated callback if so.
   *
   * @param event A SpeechRecognitionEvent triggered by SpeechRecognition.
   */
  private processCommand(event: SpeechRecognitionEvent) {
    for (let i = event.resultIndex; i < event.results.length; i++) {
      if (event.results[i].isFinal) {
        this.currentCommand += event.results[i][0].transcript;
      }
    }
 
    this.currentCommand = this.currentCommand.toLowerCase().trim();
    console.log(this.currentCommand);
 
    let bestMatch = '';
    let callback: (param: string) => void = (param: string) => {};
    let bestMatchLength = 0;
    this.voiceCommands.forEach(
      (voiceCommand) => {
        voiceCommand.commands.forEach(
          (command) => {
            if (this.currentCommand.indexOf(command) > -1) {
              if (command.length > bestMatchLength) {
                bestMatch = command;
                bestMatchLength = command.length;
                callback = voiceCommand.callback;
              }
            }
          }
        );
      }
    );
 
    if (bestMatch) {
      // If the current command has no parameters, call it without any.
      if (this.currentCommand[bestMatchLength] === undefined) {
        callback('');
 
      // If there are parameters, parse them and pass them to the callback.
      // It is up to the callback to decide to use them or not.
      } else if (this.currentCommand[bestMatchLength] === ' ') {
        const param = this.currentCommand.substring(bestMatchLength + 1, this.currentCommand.length);
        callback(param);
      }
    }
    this.currentCommand = '';
  }
 
}