Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

initial changes to allow sample js to work with ESM v1.33.1 #2132

Merged
merged 6 commits into from
Nov 9, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
110 changes: 53 additions & 57 deletions quickstart/javascript/node/conversation-transcription/index.js
Original file line number Diff line number Diff line change
@@ -1,60 +1,56 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.

(function() {
"use strict";

// pull in the required packages.
var sdk = require("microsoft-cognitiveservices-speech-sdk");
var fs = require("fs");

// replace with your own subscription key,
// service region (e.g., "centralus"), and
// the name of the file you want to transcribe
// through the conversation transcriber.
var subscriptionKey = "YourSubscriptionKey";
var serviceRegion = "YourServiceRegion"; // e.g., "centralus"
var filename = "YourAudioFile.wav";

// create the push stream we need for the speech sdk.
var pushStream = sdk.AudioInputStream.createPushStream();

// open the file and push it to the push stream.
fs.createReadStream(filename).on('data', function(arrayBuffer) {
pushStream.write(arrayBuffer.slice());
}).on('end', function() {
pushStream.close();
});

// we are done with the setup
console.log("Transcribing from: " + filename);
// now create the audio-config pointing to our stream and
// the speech config specifying the language.
var speechConfig = sdk.SpeechConfig.fromSubscription(subscriptionKey, serviceRegion);
var audioConfig = sdk.AudioConfig.fromStreamInput(pushStream);

// create the conversation transcriber.
var transcriber = new sdk.ConversationTranscriber(speechConfig, audioConfig);

transcriber.sessionStarted = function(s, e) {
console.log("(sessionStarted) SessionId:" + e.sessionId);
};
transcriber.sessionStopped = function(s, e) {
console.log("(sessionStopped) SessionId:" + e.sessionId);
};
transcriber.canceled = function(s, e) {
console.log("(canceled) " + e.errorDetails);
};
transcriber.transcribed = function(s, e) {
console.log("(transcribed) text: " + e.result.text);
console.log("(transcribed) speakerId: " + e.result.speakerId);
};

// Begin conversation transcription
transcriber.startTranscribingAsync(
function () {},
function (err) {
console.trace("err - starting transcription: " + err);
}
);
}());
// pull in the required packages.
import * as sdk from "microsoft-cognitiveservices-speech-sdk";
import * as fs from "fs";

// replace with your own subscription key,
// service region (e.g., "centralus"), and
// the name of the file you want to transcribe
// through the conversation transcriber.
var subscriptionKey = "YourSubscriptionKey";
var serviceRegion = "YourServiceRegion"; // e.g., "centralus"
var filename = "YourAudioFile.wav";

// create the push stream we need for the speech sdk.
var pushStream = sdk.AudioInputStream.createPushStream();

// open the file and push it to the push stream.
fs.createReadStream(filename).on('data', function(arrayBuffer) {
pushStream.write(arrayBuffer.slice());
}).on('end', function() {
pushStream.close();
});

// we are done with the setup
console.log("Transcribing from: " + filename);
// now create the audio-config pointing to our stream and
// the speech config specifying the language.
var speechConfig = sdk.SpeechConfig.fromSubscription(subscriptionKey, serviceRegion);
var audioConfig = sdk.AudioConfig.fromStreamInput(pushStream);

// create the conversation transcriber.
var transcriber = new sdk.ConversationTranscriber(speechConfig, audioConfig);

transcriber.sessionStarted = function(s, e) {
console.log("(sessionStarted) SessionId:" + e.sessionId);
};
transcriber.sessionStopped = function(s, e) {
console.log("(sessionStopped) SessionId:" + e.sessionId);
};
transcriber.canceled = function(s, e) {
console.log("(canceled) " + e.errorDetails);
};
transcriber.transcribed = function(s, e) {
console.log("(transcribed) text: " + e.result.text);
console.log("(transcribed) speakerId: " + e.result.speakerId);
};

// Begin conversation transcription
transcriber.startTranscribingAsync(
function () {},
function (err) {
console.trace("err - starting transcription: " + err);
}
);
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,14 @@
"version": "1.0.0",
"description": "Quickstart for the Microsoft Speech SDK on Node.js",
"main": "index.js",
"type": "module",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"
},
"author": "Microsoft",
"license": "MIT",
"dependencies": {
"https-proxy-agent": "^3.0.0",
"microsoft-cognitiveservices-speech-sdk": "^1.33.0"
"microsoft-cognitiveservices-speech-sdk": "^1.33.1"
}
}
7 changes: 2 additions & 5 deletions quickstart/javascript/node/from-file/index.js
Original file line number Diff line number Diff line change
@@ -1,13 +1,11 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.

(function() {
// <code>
"use strict";

// pull in the required packages.
var sdk = require("microsoft-cognitiveservices-speech-sdk");
var fs = require("fs");
import * as sdk from "microsoft-cognitiveservices-speech-sdk";
import * as fs from "fs";

// replace with your own subscription key,
// service region (e.g., "westus"), and
Expand Down Expand Up @@ -57,5 +55,4 @@
});
// </code>

}());

3 changes: 2 additions & 1 deletion quickstart/javascript/node/from-file/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,14 @@
"version": "1.0.0",
"description": "Quickstart for the Microsoft Speech SDK on Node.js",
"main": "index.js",
"type": "module",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"
},
"author": "Microsoft",
"license": "MIT",
"dependencies": {
"https-proxy-agent": "^3.0.0",
"microsoft-cognitiveservices-speech-sdk": "^1.33.0"
"microsoft-cognitiveservices-speech-sdk": "^1.33.1"
}
}
163 changes: 79 additions & 84 deletions quickstart/javascript/node/meeting-transcription/index.js
Original file line number Diff line number Diff line change
@@ -1,99 +1,94 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.

(function() {
"use strict";

// pull in the required packages.
var sdk = require("microsoft-cognitiveservices-speech-sdk");
var fs = require("fs");

// replace with your own subscription key,
// service region (e.g., "centralus"), and
// the name of the file you want to transcribe
// through the meeting transcriber.
var subscriptionKey = "YourSubscriptionKey";
var serviceRegion = "YourServiceRegion"; // e.g., "centralus"
var filename = "YourAudioFile.wav"; // 8-channel audio

// create the push stream we need for the speech sdk.
var pushStream = sdk.AudioInputStream.createPushStream(sdk.AudioStreamFormat.getWaveFormatPCM(16000, 16, 8))

// open the file and push it to the push stream.
fs.createReadStream(filename).on('data', function(arrayBuffer) {
pushStream.write(arrayBuffer.slice());
}).on('end', function() {
pushStream.close();
});

// we are done with the setup
console.log("Transcribing from: " + filename);
// now create the audio-config pointing to our stream and
// the speech config specifying the language.
var speechTranslationConfig = sdk.SpeechTranslationConfig.fromSubscription(subscriptionKey, serviceRegion);
var audioConfig = sdk.AudioConfig.fromStreamInput(pushStream);
import * as sdk from "microsoft-cognitiveservices-speech-sdk";
import * as fs from "fs";

// replace with your own subscription key,
// service region (e.g., "centralus"), and
// the name of the file you want to transcribe
// through the meeting transcriber.
var subscriptionKey = "YourSubscriptionKey";
var serviceRegion = "YourServiceRegion"; // e.g., "centralus"
var filename = "YourAudioFile.wav"; // 8-channel audio

// create the push stream we need for the speech sdk.
var pushStream = sdk.AudioInputStream.createPushStream(sdk.AudioStreamFormat.getWaveFormatPCM(16000, 16, 8))

// setting the recognition language to English.
speechTranslationConfig.speechRecognitionLanguage = "en-US";
// open the file and push it to the push stream.
fs.createReadStream(filename).on('data', function(arrayBuffer) {
pushStream.write(arrayBuffer.slice());
}).on('end', function() {
pushStream.close();
});

// create the meeting object tracking participants
var meeting = sdk.Meeting.createMeetingAsync(speechTranslationConfig, "myMeeting");
// we are done with the setup
console.log("Transcribing from: " + filename);
// now create the audio-config pointing to our stream and
// the speech config specifying the language.
var speechTranslationConfig = sdk.SpeechTranslationConfig.fromSubscription(subscriptionKey, serviceRegion);
var audioConfig = sdk.AudioConfig.fromStreamInput(pushStream);

// create the meeting transcriber.
var transcriber = new sdk.MeetingTranscriber(audioConfig);
// setting the recognition language to English.
speechTranslationConfig.speechRecognitionLanguage = "en-US";

// attach the transcriber to the meeting
transcriber.joinMeetingAsync(meeting,
function () {
// add first participant with voice signature from enrollment step
var voiceSignatureUser1 = "{" +
"Version: 0," +
"Tag: \"<<VOICE_TAG_HERE>>\"," +
"Data: \"<<VOICE_DATA_HERE>>\"" +
"}";
var user1 = sdk.Participant.From("user1@example.com", "en-us", voiceSignatureUser1);
meeting.addParticipantAsync(user1,
function () {
// add second participant with voice signature from enrollment step
var voiceSignatureUser2 = "{" +
"Version: 0," +
"Tag: \"<<VOICE_TAG_HERE>>\"," +
"Data: \"<<VOICE_DATA_HERE>>\"" +
"}";
var user2 = sdk.Participant.From("user2@example.com", "en-us", voiceSignatureUser2);
meeting.addParticipantAsync(user2,
function () {
transcriber.sessionStarted = function(s, e) {
console.log("(sessionStarted)");
};
transcriber.sessionStopped = function(s, e) {
console.log("(sessionStopped)");
};
transcriber.canceled = function(s, e) {
console.log("(canceled)");
};
transcriber.transcribed = function(s, e) {
console.log("(transcribed) text: " + e.result.text);
console.log("(transcribed) speakerId: " + e.result.speakerId);
};
// create the meeting object tracking participants
var meeting = sdk.Meeting.createMeetingAsync(speechTranslationConfig, "myMeeting");

// Begin meeting transcription
transcriber.startTranscribingAsync(
function () { },
function (err) {
console.trace("err - starting transcription: " + err);
});
},
function (err) {
console.trace("err - adding user1: " + err);
});
// create the meeting transcriber.
var transcriber = new sdk.MeetingTranscriber(audioConfig);

// attach the transcriber to the meeting
transcriber.joinMeetingAsync(meeting,
function () {
// add first participant with voice signature from enrollment step
var voiceSignatureUser1 = "{" +
"Version: 0," +
"Tag: \"<<VOICE_TAG_HERE>>\"," +
"Data: \"<<VOICE_DATA_HERE>>\"" +
"}";
var user1 = sdk.Participant.From("user1@example.com", "en-us", voiceSignatureUser1);
meeting.addParticipantAsync(user1,
function () {
// add second participant with voice signature from enrollment step
var voiceSignatureUser2 = "{" +
"Version: 0," +
"Tag: \"<<VOICE_TAG_HERE>>\"," +
"Data: \"<<VOICE_DATA_HERE>>\"" +
"}";
var user2 = sdk.Participant.From("user2@example.com", "en-us", voiceSignatureUser2);
meeting.addParticipantAsync(user2,
function () {
transcriber.sessionStarted = function(s, e) {
console.log("(sessionStarted)");
};
transcriber.sessionStopped = function(s, e) {
console.log("(sessionStopped)");
};
transcriber.canceled = function(s, e) {
console.log("(canceled)");
};
transcriber.transcribed = function(s, e) {
console.log("(transcribed) text: " + e.result.text);
console.log("(transcribed) speakerId: " + e.result.speakerId);
};

// Begin meeting transcription
transcriber.startTranscribingAsync(
function () { },
function (err) {
console.trace("err - starting transcription: " + err);
});
},
function (err) {
console.trace("err - adding user2: " + err);
console.trace("err - adding user1: " + err);
});
},
function (err) {
console.trace("err - " + err);
console.trace("err - adding user2: " + err);
});

}());
},
function (err) {
console.trace("err - " + err);
});
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,14 @@
"version": "1.0.0",
"description": "Quickstart for the Microsoft Speech SDK on Node.js",
"main": "index.js",
"type": "module",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"
},
"author": "Microsoft",
"license": "MIT",
"dependencies": {
"https-proxy-agent": "^3.0.0",
"microsoft-cognitiveservices-speech-sdk": "^1.33.0"
"microsoft-cognitiveservices-speech-sdk": "^1.33.1"
}
}
Loading