import com.sap.gateway.ip.core.customdev.util.Message;
import java.util.HashMap;
import groovy.xml.MarkupBuilder ;
def Message processData(Message message) {
def body = message.getBody(java.lang.String)as String;
def parseXML = new XmlParser().parseText(body);
String name ;
String email ;
name = "${parseXML.PerPerson.personalInfoNav.PerPersonal.firstName.text().toString()}" ;
email = "${parseXML.PerPerson.emailNav.PerEmail.emailAddress.text().toString()}";
message.setProperty("name", name);
message.setProperty("email", email);
return message;
}
const webclientBridge = {
callImplMethod: async (name, ...args) => {
console.log(name)
if (window.webclientBridgeImpl && window.webclientBridgeImpl[name]) {
return window.webclientBridgeImpl[name](...args)
}
},
// if this function returns an object, WebClient will enable the microphone button.
sttGetConfig: async (...args) => {
return webclientBridge.callImplMethod('sttGetConfig', ...args)
},
sttStartListening: async (...args) => {
return webclientBridge.callImplMethod('sttStartListening', ...args)
},
sttStopListening: async (...args) => {
return webclientBridge.callImplMethod('sttStopListening', ...args)
},
sttAbort: async (...args) => {
return webclientBridge.callImplMethod('sttAbort', ...args)
},
// only called if useMediaRecorder = true in sttGetConfig
sttOnFinalAudioData: async (...args) => {
return webclientBridge.callImplMethod('sttOnFinalAudioData', ...args)
},
// only called if useMediaRecorder = true in sttGetConfig
sttOnInterimAudioData: async (...args) => {
// send interim blob to STT service
return webclientBridge.callImplMethod('sttOnInterimAudioData', ...args)
}
}
window.sapcai = {
webclientBridge,
}
// Handles working with browser speech recognition API
class SpeechToText {
constructor(onFinalised, onEndEvent, onAnythingSaid) {
var _this = this;
var language = arguments.length > 3 && arguments[3] !== undefined ? arguments[3] : 'en-US';
if (!('webkitSpeechRecognition' in window)) {
throw new Error("This browser doesn't support speech recognition. Try Google Chrome.");
}
var SpeechRecognition = window.webkitSpeechRecognition;
this.recognition = new SpeechRecognition(); // set interim results to be returned if a callback for it has been passed in
this.recognition.interimResults = !!onAnythingSaid;
this.recognition.lang = language;
var finalTranscript = ''; // process both interim and finalised results
this.recognition.onresult = function (event) {
var interimTranscript = ''; // concatenate all the transcribed pieces together (SpeechRecognitionResult)
for (var i = event.resultIndex; i < event.results.length; i += 1) {
var transcriptionPiece = event.results[i][0].transcript; // check for a finalised transciption in the cloud
if (event.results[i].isFinal) {
finalTranscript += transcriptionPiece;
onFinalised(finalTranscript);
finalTranscript = '';
} else if (_this.recognition.interimResults) {
interimTranscript += transcriptionPiece;
onAnythingSaid(interimTranscript);
}
}
};
this.recognition.onend = function () {
onEndEvent();
};
this.startListening = function () {
this.recognition.start();
};
this.stopListening = function () {
this.recognition.stop();
};
}
}
// Contains callbacks for when results are returned
class STTSpeechAPI {
constructor(language = 'en-US') {
this.stt = new SpeechToText(this.onFinalResult, this.onStop, this.onInterimResult, language)
}
startListening() {
this.stt.startListening()
}
stopListening() {
this.stt.stopListening()
}
abort() {
this.stt.recognition.abort()
this.stt.stopListening()
}
onFinalResult(text) {
const m = {
text,
final: true,
}
window.sap.cai.webclient.onSTTResult(m)
}
onInterimResult(text) {
const m = {
text,
final: false,
}
window.sap.cai.webclient.onSTTResult(m)
}
onStop() {
const m = {
text: '',
final: true,
}
window.sap.cai.webclient.onSTTResult(m)
}
}
// Contains methods SAP Conversational AI needs for handling
// chatbot UI events
let stt = null
const sttSpeech = {
sttGetConfig: async () => {
return {
useMediaRecorder: false,
}
},
sttStartListening: async (params) => {
const [metadata] = params
const { language, _ } = metadata
stt = new STTSpeechAPI(language)
stt.startListening()
},
sttStopListening: () => {
stt.stopListening()
},
sttAbort: () => {
stt.abort()
},
}
window.webclientBridgeImpl = sttSpeech
You must be a registered user to add a comment. If you've already registered, sign in. Otherwise, register and sign in.
User | Count |
---|---|
16 | |
12 | |
9 | |
7 | |
7 | |
6 | |
6 | |
5 | |
5 | |
5 |