Skip to content

Commit e687685

Browse files
committed
updated stream handler for narrator
1 parent b58db16 commit e687685

File tree

9 files changed

+95
-78
lines changed

9 files changed

+95
-78
lines changed

reporter-input-response/reporter-API-handler.js

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,12 @@
11
import { v4 as UUID } from "uuid";
22

3-
import { gVars } from "./utils/global-variables.js";
3+
import { gVars } from "./reporter-global-variables.js";
44

55
import { getBusyMessage } from "./utils/busy-message.js";
66
import { initializeAssistant } from "./utils/initialize-assistants.js";
77
import { initializeImageAssistant } from "./utils/initialize-image-assistant.js";
88
import { callNarrator } from "./utils/call-narrator.js";
9+
// import { streamNarrator } from "./utils/stream.js";
910
import { callImageGenerator } from "./utils/call-image-generator.js";
1011

1112
async function reporterAPIHandler(body) {
@@ -42,6 +43,7 @@ async function reporterAPIHandler(body) {
4243
gVars.busy = false;
4344
return { message: "assistants initialized" };
4445
case "respond to action":
46+
// await streamNarrator(body.message);
4547
await callNarrator(body.message); // this updates gVars.message
4648
gVars.busy = false;
4749
return { message: gVars.message };

reporter-input-response/utils/global-variables.js renamed to reporter-input-response/reporter-global-variables.js

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ import OpenAI from "openai";
22
import {
33
prompt_create_narrative_response,
44
prompt_create_image_generation_prompt,
5-
} from "./prompts.js";
5+
} from "./utils/prompts.js";
66
import dotenv from "dotenv";
77

88
dotenv.config();
@@ -16,6 +16,7 @@ const openai = new OpenAI({
1616
let thread;
1717
let messages, message;
1818
let run;
19+
let stream;
1920
let narratorAssistant;
2021
var assistantInitialized = false;
2122

@@ -34,6 +35,7 @@ let gVars = {
3435
messages: messages,
3536
message: message,
3637
run: run,
38+
stream: stream,
3739
assistantInitialized: assistantInitialized,
3840
narratorAssistant: narratorAssistant,
3941
prompt_create_narrative_response: prompt_create_narrative_response,
Lines changed: 77 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,77 @@
1+
import { gVars } from "./reporter-global-variables.js";
2+
3+
async function reporterStreamHandler(req, res) {
4+
const { chatInput } = req.body;
5+
6+
res.setHeader("Content-Type", "text/event-stream");
7+
res.setHeader("Cache-Control", "no-cache");
8+
res.flushHeaders(); // flush the headers to establish SSE with client
9+
10+
try {
11+
let count = 0;
12+
let messageStart = "Response: ";
13+
await gVars.openai.beta.threads.messages.create(gVars.thread.id, {
14+
role: "user",
15+
content: chatInput,
16+
});
17+
18+
gVars.openai.beta.threads.runs
19+
.stream(gVars.thread.id, {
20+
assistant_id: gVars.narratorAssistant.id,
21+
})
22+
// .on("textCreated", (text) => {
23+
// })
24+
.on("textDelta", (textDelta, snapshot) => {
25+
if (count < 3) {
26+
count++;
27+
// trim messageStart from the stream
28+
let i = 0;
29+
while (
30+
i < textDelta.value.length &&
31+
i < messageStart.length &&
32+
textDelta.value[i] === messageStart[i]
33+
) {
34+
i++;
35+
}
36+
messageStart = messageStart.slice(i);
37+
const slicedValue = textDelta.value.slice(i);
38+
if (slicedValue) {
39+
res.write(`data: ${slicedValue}\n\n`);
40+
}
41+
} else {
42+
res.write(`data: ${textDelta.value}\n\n`);
43+
}
44+
})
45+
// .on("toolCallCreated", (toolCall) => {
46+
// res.write(`data: assistant > ${toolCall.type}\n\n`);
47+
// })
48+
// .on("toolCallDelta", (toolCallDelta, snapshot) => {
49+
// if (toolCallDelta.type === "code_interpreter") {
50+
// if (toolCallDelta.code_interpreter.input) {
51+
// res.write(`data: ${toolCallDelta.code_interpreter.input}\n\n`);
52+
// }
53+
// if (toolCallDelta.code_interpreter.outputs) {
54+
// res.write(`data: output >\n\n`);
55+
// toolCallDelta.code_interpreter.outputs.forEach((output) => {
56+
// if (output.type === "logs") {
57+
// res.write(`data: ${output.logs}\n\n`);
58+
// }
59+
// });
60+
// }
61+
// }
62+
// })
63+
.on("end", () => {
64+
res.write("data: [DONE]\n\n");
65+
res.end();
66+
})
67+
.on("error", (err) => {
68+
console.error("Error in stream:", err);
69+
res.end();
70+
});
71+
} catch (error) {
72+
console.error("Error:", error);
73+
res.status(500).send("An error occurred");
74+
}
75+
}
76+
77+
export { reporterStreamHandler };

reporter-input-response/utils/call-image-generator.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
import { gVars } from "./global-variables.js";
1+
import { gVars } from "../reporter-global-variables.js";
22

33
async function callImageGenerator() {
44
await gVars.openai.beta.threads.messages.create(gVars.imageThread.id, {

reporter-input-response/utils/call-narrator.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
import { gVars } from "./global-variables.js";
1+
import { gVars } from "../reporter-global-variables.js";
22

33
async function callNarrator(chatInput) {
44
await gVars.openai.beta.threads.messages.create(gVars.thread.id, {

reporter-input-response/utils/initialize-assistants.js

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
import { gVars } from "./global-variables.js";
1+
import { gVars } from "../reporter-global-variables.js";
22

33
async function initializeAssistant() {
44
gVars.narratorAssistant = await gVars.openai.beta.assistants.create({
@@ -40,4 +40,5 @@ async function initializeAssistant() {
4040
gVars.assistantInitialized = true;
4141
console.log("assistant initialization complete");
4242
}
43+
4344
export { initializeAssistant };

reporter-input-response/utils/initialize-image-assistant.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
import { gVars } from "./global-variables.js";
1+
import { gVars } from "../reporter-global-variables.js";
22

33
async function initializeImageAssistant() {
44
gVars.imageAssistant = await gVars.openai.beta.assistants.create({

reporter-input-response/utils/stream.js

Whitespace-only changes.

server.js

Lines changed: 7 additions & 72 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,10 @@
11
import express from "express";
22
import cors from "cors";
33
import dotenv from "dotenv";
4-
import fetch from "node-fetch";
54
import { megaSearch } from "./mega-search/mega-search.js";
65
import { getFlashcards } from "./flashcards/flashcards.js";
76
import { reporterAPIHandler } from "./reporter-input-response/reporter-API-handler.js";
7+
import { reporterStreamHandler } from "./reporter-input-response/reporter-stream-handler.js";
88

99
const app = express();
1010

@@ -46,90 +46,25 @@ app.post("/flashcards/v1", async (req, res) => {
4646
message: "Access code accepted",
4747
cardSets: await getFlashcards("v1"),
4848
};
49-
// console.log(responseData);
5049
res.json(responseData);
5150
} else {
5251
res.status(401).send("Unauthorized");
5352
}
5453
});
5554

55+
app.post("/reporter/stream", async (req, res) => {
56+
console.log("post call to /reporter/stream");
57+
console.log(req.body);
58+
reporterStreamHandler(req, res);
59+
});
60+
5661
app.post("/reporter", async (req, res) => {
5762
console.log("post call to /reporter");
5863
console.log(req.body);
5964
var response = await reporterAPIHandler(req.body);
6065
res.json(response);
6166
});
6267

63-
app.post("/test/chat", async (req, res) => {
64-
const { message } = req.body;
65-
66-
const response = await fetch("https://api.openai.com/v1/chat/completions", {
67-
method: "post",
68-
headers: {
69-
"Content-Type": "application/json",
70-
Authorization: `Bearer ${process.env.REPORTER_OPENAI_API_KEY}`,
71-
},
72-
body: JSON.stringify({
73-
model: "gpt-3.5-turbo",
74-
messages: [{ role: "user", content: message }],
75-
stream: true,
76-
}),
77-
});
78-
79-
res.setHeader("Content-Type", "text/event-stream");
80-
res.setHeader("Cache-Control", "no-cache");
81-
82-
// Initialize variables to handle streaming data
83-
let buffer = "";
84-
85-
response.body.on("data", (chunk) => {
86-
buffer += chunk.toString();
87-
88-
// Split the buffer by newlines
89-
const lines = buffer.split("\n");
90-
91-
// Keep the last partial line in the buffer
92-
buffer = lines.pop();
93-
94-
for (const line of lines) {
95-
const message = line.trim();
96-
97-
// Ignore empty lines
98-
if (!message) continue;
99-
100-
// Stream finished
101-
if (message === "data: [DONE]") {
102-
return res.end();
103-
}
104-
105-
if (message.startsWith("data: ")) {
106-
const jsonString = message.replace("data: ", "");
107-
try {
108-
const parsed = JSON.parse(jsonString);
109-
const content = parsed.choices[0].delta.content;
110-
if (content) {
111-
console.log(content);
112-
if (content == " ") console.log("space");
113-
res.write(`data: ${content}\n\n`);
114-
}
115-
} catch (error) {
116-
// Handle JSON parsing errors
117-
console.error("Error parsing JSON:", error);
118-
}
119-
}
120-
}
121-
});
122-
123-
response.body.on("end", () => {
124-
res.end();
125-
});
126-
127-
response.body.on("error", (error) => {
128-
console.error("Error with OpenAI API stream:", error);
129-
res.end();
130-
});
131-
});
132-
13368
const PORT = process.env.PORT || 3000;
13469
app.listen(PORT, () => {
13570
console.log(`Server is running on port ${PORT}`);

0 commit comments

Comments
 (0)