[javasound/dialogprocessor] Not share mic ref on javasound + close audio streams and use RecognitionStartEvent on dialogprocessor (#2732)
* [javasound] not share targetdataline Signed-off-by: Miguel Álvarez Díez <miguelwork92@gmail.com>pull/2746/head
parent
75252cf4aa
commit
9b438d7e12
|
@ -12,11 +12,11 @@
|
|||
*/
|
||||
package org.openhab.core.audio.internal.javasound;
|
||||
|
||||
import java.util.HashSet;
|
||||
import java.util.Locale;
|
||||
import java.util.Set;
|
||||
|
||||
import javax.sound.sampled.AudioSystem;
|
||||
import javax.sound.sampled.DataLine;
|
||||
import javax.sound.sampled.TargetDataLine;
|
||||
|
||||
import org.eclipse.jdt.annotation.NonNullByDefault;
|
||||
|
@ -32,6 +32,7 @@ import org.osgi.service.component.annotations.Component;
|
|||
*
|
||||
* @author Kelly Davis - Initial contribution and API
|
||||
* @author Kai Kreuzer - Refactored and stabilized
|
||||
* @author Miguel Álvarez - Share microphone line only under Windows OS
|
||||
*
|
||||
*/
|
||||
@NonNullByDefault
|
||||
|
@ -50,10 +51,20 @@ public class JavaSoundAudioSource implements AudioSource {
|
|||
private final AudioFormat audioFormat = convertAudioFormat(format);
|
||||
|
||||
/**
|
||||
* TargetDataLine for the mic
|
||||
* Running on Windows OS
|
||||
*/
|
||||
private final boolean windowsOS = System.getProperty("os.name", "Unknown").startsWith("Win");
|
||||
|
||||
/**
|
||||
* TargetDataLine for sharing the mic on Windows OS due to limitations
|
||||
*/
|
||||
private @Nullable TargetDataLine microphone;
|
||||
|
||||
/**
|
||||
* Set for control microphone close on Windows OS
|
||||
*/
|
||||
private final Set<Object> openStreamRefs = new HashSet<>();
|
||||
|
||||
/**
|
||||
* Constructs a JavaSoundAudioSource
|
||||
*/
|
||||
|
@ -63,16 +74,10 @@ public class JavaSoundAudioSource implements AudioSource {
|
|||
private TargetDataLine initMicrophone(javax.sound.sampled.AudioFormat format) throws AudioException {
|
||||
try {
|
||||
TargetDataLine microphone = AudioSystem.getTargetDataLine(format);
|
||||
|
||||
DataLine.Info info = new DataLine.Info(TargetDataLine.class, format);
|
||||
microphone = (TargetDataLine) AudioSystem.getLine(info);
|
||||
|
||||
microphone.open(format);
|
||||
|
||||
this.microphone = microphone;
|
||||
return microphone;
|
||||
} catch (Exception e) {
|
||||
throw new AudioException("Error creating the audio input stream.", e);
|
||||
throw new AudioException("Error creating the audio input stream: " + e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -81,11 +86,30 @@ public class JavaSoundAudioSource implements AudioSource {
|
|||
if (!expectedFormat.isCompatible(audioFormat)) {
|
||||
throw new AudioException("Cannot produce streams in format " + expectedFormat);
|
||||
}
|
||||
TargetDataLine mic = this.microphone;
|
||||
if (mic == null) {
|
||||
mic = initMicrophone(format);
|
||||
// on OSs other than windows we can open multiple lines for the microphone
|
||||
if (!windowsOS) {
|
||||
return new JavaSoundInputStream(initMicrophone(format), audioFormat);
|
||||
}
|
||||
return new JavaSoundInputStream(mic, audioFormat);
|
||||
// on Windows OS we share the microphone line
|
||||
var ref = new Object();
|
||||
TargetDataLine microphoneDataLine;
|
||||
synchronized (openStreamRefs) {
|
||||
microphoneDataLine = this.microphone;
|
||||
if (microphoneDataLine == null) {
|
||||
microphoneDataLine = initMicrophone(format);
|
||||
this.microphone = microphoneDataLine;
|
||||
}
|
||||
openStreamRefs.add(ref);
|
||||
}
|
||||
return new JavaSoundInputStream(microphoneDataLine, audioFormat, () -> {
|
||||
synchronized (openStreamRefs) {
|
||||
var microphone = this.microphone;
|
||||
if (openStreamRefs.remove(ref) && openStreamRefs.isEmpty() && microphone != null) {
|
||||
microphone.close();
|
||||
this.microphone = null;
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -35,6 +35,7 @@ public class JavaSoundInputStream extends AudioStream {
|
|||
*/
|
||||
private final TargetDataLine input;
|
||||
private final AudioFormat format;
|
||||
private final @Nullable JavaSoundInputStreamCloseHandler closeHandler;
|
||||
|
||||
/**
|
||||
* Constructs a JavaSoundInputStream with the passed input
|
||||
|
@ -42,7 +43,18 @@ public class JavaSoundInputStream extends AudioStream {
|
|||
* @param input The mic which data is pulled from
|
||||
*/
|
||||
public JavaSoundInputStream(TargetDataLine input, AudioFormat format) {
|
||||
this(input, format, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a JavaSoundInputStream with the passed input and a close handler.
|
||||
*
|
||||
* @param input The mic which data is pulled from
|
||||
*/
|
||||
public JavaSoundInputStream(TargetDataLine input, AudioFormat format,
|
||||
@Nullable JavaSoundInputStreamCloseHandler closeHandler) {
|
||||
this.format = format;
|
||||
this.closeHandler = closeHandler;
|
||||
this.input = input;
|
||||
this.input.start();
|
||||
}
|
||||
|
@ -74,11 +86,20 @@ public class JavaSoundInputStream extends AudioStream {
|
|||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
input.close();
|
||||
var closeHandler = this.closeHandler;
|
||||
if (closeHandler != null) {
|
||||
closeHandler.onStreamClosed();
|
||||
} else {
|
||||
input.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public AudioFormat getFormat() {
|
||||
return format;
|
||||
}
|
||||
|
||||
public interface JavaSoundInputStreamCloseHandler {
|
||||
void onStreamClosed();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -14,6 +14,7 @@ package org.openhab.core.voice.internal;
|
|||
|
||||
import static org.openhab.core.voice.internal.VoiceManagerImpl.getBestMatch;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashSet;
|
||||
import java.util.Locale;
|
||||
import java.util.Set;
|
||||
|
@ -39,6 +40,7 @@ import org.openhab.core.voice.KSListener;
|
|||
import org.openhab.core.voice.KSService;
|
||||
import org.openhab.core.voice.KSServiceHandle;
|
||||
import org.openhab.core.voice.KSpottedEvent;
|
||||
import org.openhab.core.voice.RecognitionStartEvent;
|
||||
import org.openhab.core.voice.RecognitionStopEvent;
|
||||
import org.openhab.core.voice.STTEvent;
|
||||
import org.openhab.core.voice.STTException;
|
||||
|
@ -65,6 +67,7 @@ import org.slf4j.LoggerFactory;
|
|||
* @author Christoph Weitkamp - Added getSupportedStreams() and UnsupportedAudioStreamException
|
||||
* @author Christoph Weitkamp - Added parameter to adjust the volume
|
||||
* @author Laurent Garnier - Added stop() + null annotations + resources releasing
|
||||
* @author Miguel Álvarez - Close audio streams + use RecognitionStartEvent
|
||||
*/
|
||||
@NonNullByDefault
|
||||
public class DialogProcessor implements KSListener, STTListener {
|
||||
|
@ -134,7 +137,7 @@ public class DialogProcessor implements KSListener, STTListener {
|
|||
streamKS = stream;
|
||||
ksServiceHandle = ks.spot(this, stream, locale, keyword);
|
||||
} catch (AudioException e) {
|
||||
logger.warn("Error creating the audio stream: {}", e.getMessage());
|
||||
logger.warn("Encountered audio error: {}", e.getMessage());
|
||||
} catch (KSException e) {
|
||||
logger.warn("Encountered error calling spot: {}", e.getMessage());
|
||||
closeStreamKS();
|
||||
|
@ -160,8 +163,11 @@ public class DialogProcessor implements KSListener, STTListener {
|
|||
private void closeStreamKS() {
|
||||
AudioStream stream = streamKS;
|
||||
if (stream != null) {
|
||||
// Due to an issue in JavaSoundAudioSource ( https://github.com/openhab/openhab-core/issues/2702 )
|
||||
// we do not try closing the stream
|
||||
try {
|
||||
stream.close();
|
||||
} catch (IOException e) {
|
||||
logger.debug("IOException closing ks audio stream: {}", e.getMessage(), e);
|
||||
}
|
||||
streamKS = null;
|
||||
}
|
||||
}
|
||||
|
@ -178,8 +184,11 @@ public class DialogProcessor implements KSListener, STTListener {
|
|||
private void closeStreamSTT() {
|
||||
AudioStream stream = streamSTT;
|
||||
if (stream != null) {
|
||||
// Due to an issue in JavaSoundAudioSource ( https://github.com/openhab/openhab-core/issues/2702 )
|
||||
// we do not try closing the stream
|
||||
try {
|
||||
stream.close();
|
||||
} catch (IOException e) {
|
||||
logger.debug("IOException closing stt audio stream: {}", e.getMessage(), e);
|
||||
}
|
||||
streamSTT = null;
|
||||
}
|
||||
}
|
||||
|
@ -201,7 +210,6 @@ public class DialogProcessor implements KSListener, STTListener {
|
|||
if (!processing) {
|
||||
isSTTServerAborting = false;
|
||||
if (ksEvent instanceof KSpottedEvent) {
|
||||
toggleProcessing(true);
|
||||
abortSTT();
|
||||
closeStreamSTT();
|
||||
isSTTServerAborting = false;
|
||||
|
@ -213,10 +221,8 @@ public class DialogProcessor implements KSListener, STTListener {
|
|||
sttServiceHandle = stt.recognize(this, stream, locale, new HashSet<>());
|
||||
} catch (AudioException e) {
|
||||
logger.warn("Error creating the audio stream: {}", e.getMessage());
|
||||
toggleProcessing(false);
|
||||
} catch (STTException e) {
|
||||
closeStreamSTT();
|
||||
toggleProcessing(false);
|
||||
String msg = e.getMessage();
|
||||
String text = i18nProvider.getText(bundle, "error.stt-exception", null, locale);
|
||||
if (msg != null) {
|
||||
|
@ -254,6 +260,8 @@ public class DialogProcessor implements KSListener, STTListener {
|
|||
}
|
||||
}
|
||||
}
|
||||
} else if (sttEvent instanceof RecognitionStartEvent) {
|
||||
toggleProcessing(true);
|
||||
} else if (sttEvent instanceof RecognitionStopEvent) {
|
||||
toggleProcessing(false);
|
||||
} else if (sttEvent instanceof SpeechRecognitionErrorEvent) {
|
||||
|
|
Loading…
Reference in New Issue