Skip to content

Commit

Permalink
Update dependencies: add dompurify package
Browse files Browse the repository at this point in the history
  • Loading branch information
aeltorio committed Oct 9, 2024
1 parent 1b44645 commit dcdc954
Show file tree
Hide file tree
Showing 4 changed files with 107 additions and 39 deletions.
25 changes: 25 additions & 0 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 2 additions & 0 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
"@sctg/ai-sdk": "0.0.4",
"@sctg/sentencepiece-js": "^1.3.3",
"core-js": "^3.38.1",
"dompurify": "^3.1.7",
"es6-promise": "^4.2.8",
"react": "^18.3.1",
"react-dom": "^18.3.1",
Expand All @@ -44,6 +45,7 @@
"@babel/core": "^7.25.7",
"@babel/plugin-syntax-import-attributes": "^7.25.7",
"@babel/preset-typescript": "^7.25.7",
"@types/dompurify": "^3.0.5",
"@types/office-js": "^1.0.434",
"@types/office-runtime": "^1.0.35",
"@types/react": "^18.3.11",
Expand Down
79 changes: 51 additions & 28 deletions src/aipane/aipane.ts
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,8 @@ import { AI } from "@sctg/ai-sdk";
import config from "../config.json" with { type: "json" };
import type { AIAnswer, AIModel, AIPrompt, AIProvider } from "./AIPrompt.js";
import { SentencePieceProcessor, cleanText, llama_3_1_tokeniser_b64 } from "@sctg/sentencepiece-js";
import { Model, ModelListResponse } from "@sctg/ai-sdk/resources/models.js";
import { Model } from "@sctg/ai-sdk/resources/models.js";
import DOMPurify from "dompurify";

const TOKEN_MARGIN: number = 20; // Safety margin for token count
const ERROR_MESSAGE: string = "Error: Unable to insert AI answer.";
Expand Down Expand Up @@ -98,7 +99,13 @@ async function aiRequest(
*/
function getPrompt(id: string): AIPrompt {
const prompts: AIPrompt[] = config.prompts;
return prompts.find((prompt) => prompt.id === id && prompt.standalone !== isOutlookClient()) || prompts[0];
const prompt: AIPrompt | undefined = prompts.find(
(prompt) => prompt.id === id && prompt.standalone !== isOutlookClient()
);
if (!prompt) {
throw new Error("Prompt not found");
}
return prompt;
}

/**
Expand All @@ -119,6 +126,12 @@ export async function insertAIAnswer(
): Promise<AIAnswer> {
const { system, user } = getPrompt(id);
let error: string | null = ERROR_MESSAGE;

// Validate and sanitize inputs
if (!system || !user || !userText) {
throw new Error("Invalid input");
}

try {
console.log(`Prompt: ${id}`);
console.log(`System text: \n${system}`);
Expand All @@ -132,11 +145,14 @@ export async function insertAIAnswer(
// Replace newlines with HTML line breaks
aiText = aiText.replace(/\n/g, "<br>");

// Sanitize and escape the AI-generated text
const sanitizedAiText = DOMPurify.sanitize(aiText);

// Insert the AI-generated text into the email body
if (isOutlookClient()) {
error = null;
Office.context.mailbox.item?.body.setSelectedDataAsync(
aiText,
sanitizedAiText,
{ coercionType: Office.CoercionType.Html },
(asyncResult: Office.AsyncResult<void>) => {
if (asyncResult.status === Office.AsyncResultStatus.Failed) {
Expand All @@ -145,7 +161,7 @@ export async function insertAIAnswer(
}
);
}
return { response: aiText, error };
return { response: sanitizedAiText, error };
} catch (err) {
console.error("Error: " + err);
return { response: "", error };
Expand All @@ -166,32 +182,39 @@ export async function getAIModels(provider: AIProvider, apiKey: string, filter:
active?: boolean;
}

const proxyUrl: string = config.aiproxy.host;
const ai: AI = new AI({
baseURL: provider.baseUrl,
basePath: provider.basePath,
disableCorsCheck: false,
apiKey,
dangerouslyAllowBrowser: true,
proxy: provider.aiproxied ? proxyUrl : undefined,
});
try {
const proxyUrl = config.aiproxy.host;
const ai = new AI({
baseURL: provider.baseUrl,
basePath: provider.basePath,
disableCorsCheck: false,
apiKey,
dangerouslyAllowBrowser: true,
proxy: provider.aiproxied ? proxyUrl : undefined,
});

const returnedModels: AIModel[] = [];
const models: ModelListResponse = await ai.models.list();
const filteredModels: ExtendedModel[] = models.data.filter(
(model: ExtendedModel) => model.id.includes(filter) && model.active
);
const orderedModels: ExtendedModel[] = filteredModels.sort((a, b) => b.created - a.created);
orderedModels.forEach((model) => {
returnedModels.push({
id: model.id,
name: model.id,
default: false,
max_tokens: model.context_window || 2048,
const models = await ai.models.list();
const filteredModels = models.data.filter(
(model: ExtendedModel) => model.id.includes(filter) && model.active
) as ExtendedModel[];
const orderedModels: ExtendedModel[] = filteredModels.sort((a, b) => b.created - a.created);

const returnedModels: AIModel[] = [];
orderedModels.forEach((model) => {
returnedModels.push({
id: model.id,
name: model.id,
default: false,
max_tokens: model.context_window || 2048,
});
});
});
returnedModels[0].default = true;
return returnedModels;
returnedModels[0].default = true;

return returnedModels;
} catch (error) {
console.error("Error retrieving AI models:", error);
throw error;
}
}

/**
Expand Down
40 changes: 29 additions & 11 deletions src/aipane/components/TextInsertion.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,8 @@
*/

import * as React from "react";
import { useRef, useState } from "react";
import { Button, Field, Text, Textarea, tokens, makeStyles } from "@fluentui/react-components";
import { useState } from "react";
import { Button, Field, Textarea, tokens, makeStyles, Skeleton, SkeletonItem } from "@fluentui/react-components";
import { AIAnswer } from "../AIPrompt";
import Markdown from "react-markdown";
import rehypeHighlight from "rehype-highlight";
Expand Down Expand Up @@ -53,10 +53,19 @@ const useStyles = makeStyles({
textAreaBox: {
height: "27vh",
},
text: {
width: "100%",
whiteSpace: "pre-wrap",
overflowWrap: "break-word",
skeleton: {
display: "inherit",
width: "50vw",
},
skeletonOff: {
display: "none",
},
skeletonItem: {
margin: "0.5em",
},
markdown: {
display: "block",
maxWidth: "1024px",
},
});

Expand All @@ -66,16 +75,18 @@ const useStyles = makeStyles({
* @returns {React.JSX.Element} - The rendered component.
*/
const TextInsertion: React.FC<TextInsertionProps> = (props: TextInsertionProps): React.JSX.Element => {
const textRef = useRef<HTMLTextAreaElement>(null);
const [text, setText] = useState<string>(props.basePrompt || "");
const [skeletonVisibility, setSkeletonVisibility] = useState<boolean>(false);
const [answer, setAnswer] = useState<string>(null);

/**
* Handles the insertion of AI-generated text.
*/
const handleTextInsertion = async () => {
setSkeletonVisibility(true);
const answer = await props.insertAIAnswer(text);
if (answer.error && textRef.current) {
setSkeletonVisibility(false);
if (answer.error) {
//textRef.current.innerHTML = `Error: ${answer.error}<br/>Answer: ${answer.response}`;
setAnswer(`${answer.error} \nAnswer: \n${answer.response.replace(/<br\/>/g, "\n").replace(/<br>/g, "\n")}`);
}
Expand All @@ -100,10 +111,17 @@ const TextInsertion: React.FC<TextInsertionProps> = (props: TextInsertionProps):
<Button appearance="primary" size="large" onClick={handleTextInsertion}>
Insert answer
</Button>
<Markdown rehypePlugins={[rehypeHighlight]}>{answer}</Markdown>
<Text ref={textRef} size={200} className={styles.text}>
<div>
<Skeleton aria-label="Loading Content" className={skeletonVisibility ? styles.skeleton : styles.skeletonOff}>
<SkeletonItem className={styles.skeletonItem} />
<SkeletonItem className={styles.skeletonItem} />
<SkeletonItem className={styles.skeletonItem} />
</Skeleton>
<Markdown className={styles.markdown} rehypePlugins={[rehypeHighlight]}>
{answer}
</Markdown>
&nbsp;
</Text>
</div>
</div>
);
};
Expand Down

0 comments on commit dcdc954

Please sign in to comment.