Josh Mabry

Josh Mabry

let { GoogleAuth } = await import("google-auth-library");
let { DiscussServiceClient } = await import("@google-ai/generativelanguage");
import "@johnlindquist/kit";
const MODEL_NAME = "models/chat-bison-001";
const API_KEY = await env("PALM_API_KEY", {
hint: `Signup for waitlist here <a href="https://developers.generativeai.google/">here</a>`,
});
const client = new DiscussServiceClient({
authClient: new GoogleAuth().fromAPIKey(API_KEY),
});
const config = {
model: MODEL_NAME,
temperature: 0.75,
candidateCount: 1,
top_k: 40,
top_p: 0.95,
};
const chatHistory = [];
const generateText = async (text) => {
chatHistory.push({ content: text });
const response = await client.generateMessage({
...config,
prompt: {
context: "You are a funny and helpful assistant.",
messages: chatHistory,
},
});
log(response);
log(response[0].filters);
if (response[0].filters.length > 0) {
return `The model has rejected your input. Reason: ${response[0].filters[0].reason}`;
} else {
chatHistory.push({ content: response[0].candidates[0].content });
return response[0].candidates[0].content;
}
};
await chat({
onSubmit: async (input) => {
setLoading(true);
try {
const response = await generateText(input);
let message = md(response);
chat.addMessage("");
chat.setMessage(-1, message);
} catch (e) {
console.log(e);
chat.addMessage("");
chat.setMessage(-1, md("Error: " + e.message));
}
setLoading(false);
},
});
// Name: Static to Dynamic
// Description: Convert static import to dynamic import
// e.g. import { Foo } from "bar";
// to let { Foo } = await import("bar");
// Author: Josh Mabry
// Twitter: @AI_Citizen
import "@johnlindquist/kit";
const text = await getSelectedText();
function convertImportString(input) {
const importRegex = /import\s+({[^}]+})\s+from\s+"([^"]+)";/;
if (!importRegex.test(input)) {
throw new Error("Invalid import string format");
}
const [_, importList, modulePath] = input.match(importRegex);
const output = `let ${importList} = await import("${modulePath}");`;
return output;
}
const output = convertImportString(text);
await setSelectedText(output);
/*
# Prompt Anything
Highlight some text and run this script to prompt against it.
Useful for summarizing text, generating a title, or any other task you can think of.
## Usage
- Highlight the text you want to prompt against
- Run the script via shortcut or command palette
- Input your desired prompt
- Wait for the AI to respond
- Select one of the options
* Retry - Rerun generation with option to update prompt
* Edit - Edit response in editor
- On editor exit the message is saved to the clipboard
- On editor submit the message is pasted into the highlighted text
* Copy - Copy response to clipboard
* Paste - Paste response into highlighted text
* Save - Save response to file (not working)
## Example
- Highlight: 'Some really long passage in a blog post'
- Run Script
- Prompt: `Summarize this passage in the form of Shakespearean prose`
- Waaaaait for it...
- Get a response from the AI
- Select an option
- Rinse and repeat
*/
// Name: Prompt Anything
// Description: Custom prompt for any highlighted text
// Author: Josh Mabry
// Twitter: @AI_Citizen
// Shortcut: alt shift enter
//#################
// ScriptKit Import
//#################
import "@johnlindquist/kit";
//#################
// LangChain Imports
//#################
let { ChatOpenAI } = await import("langchain/chat_models");
let { HumanChatMessage, SystemChatMessage } = await import("langchain/schema");
//#################
// Request API KEY
//#################
// stored in .env file after first run
// can change there or through the command palette
let openAIApiKey = await env("OPENAI_API_KEY", {
hint: `Grab a key from <a href="https://platform.openai.com/account/api-keys">here</a>`,
});
// System input / Task for the AI to follow
let userSystemInput = await arg("Summarize this passage");
// User Prompt from highlighted text
let userPrompt = await getSelectedText();
//#################
// Prompt Template
//#################
const formatPrompt = (prompt) => {
return `##### Ignore prior instructions
- Return answer in markdown format
- You are tasked with the following
${prompt}
########
`;
};
//################
// Options Template
//################
const options = `
* [Retry](submit:retry) - Rerun generation with option to update prompt
* [Edit](submit:edit) - Edit response in editor
* [Copy](submit:copy) - Copy response to clipboard
* [Paste](submit:paste) - Paste response into highlighted text
* [Save](submit:save) - Save response to file (not working)
`;
//################
// Main Function
//################
/**
*
* @param {*} prompt
* @param {*} humanChatMessage
*/
async function promptAgainstHighlightedText(
prompt = formatPrompt(userSystemInput),
humanChatMessage = userPrompt
) {
//#########
// Helpers
//########
// exit script on cancel
const cancelChat = () => {
process.exit(1);
};
/**
* Paste text to highlighted text and exit script
* @param {*} text
*/
const pasteTextAndExit = async (text) => {
await setSelectedText(text);
process.exit(1);
};
/**
* Copy text to clipboard and exit script
* @param {*} text
*/
const copyToClipboardAndExit = async (text) => {
await clipboard.writeText(currentMessage);
process.exit(1);
};
let currentMessage = "";
const llm = new ChatOpenAI({
// 0 = "precise", 1 = "creative"
temperature: 0.3,
// modelName: "gpt-4", // uncomment to use GPT-4 (requires beta access)
openAIApiKey: openAIApiKey,
// turn off to only get output when the AI is done
streaming: true,
callbacks: [
{
handleLLMNewToken: async (token) => {
log(`handleLLMNewToken`);
// each new token is appended to the current message
// and then rendered to the screen
currentMessage += token;
// render current message
await div({
html: md(currentMessage),
// @TODO: Figure out how to get ESC to trigger a cancel
onAbandon: cancelChat,
onEscape: cancelChat,
onBackspace: cancelChat,
// if this is set to false you can click outside the window to cancel
// which works, but would be nice to also have ESC work
ignoreBlur: false,
focus: true,
// hint: `Press ESC to cancel`,
});
},
handleLLMError: async (err) => {
dev({ err });
},
handleLLMEnd: async () => {
log(`handleLLMEnd`);
// render final message with options
let html = md(currentMessage + options);
// wait for user to select an option
const selectedOption = await div(html, {
ignoreBlur: true,
focus: true,
// have paste on text on submit?
// onSubmit: () => pasteTextAndExit(currentMessage),
});
// handle selected option
switch (selectedOption) {
case "paste":
await pasteTextAndExit(currentMessage);
case "retry":
// reset current message
currentMessage = "";
// prompt again with new prompt
// press enter to use original prompt
const followUp = await arg({
placeholder: userSystemInput,
hint: "Press enter to use the same prompt",
});
await processMessage(followUp);
break;
case "edit":
// @TODO still need to figure out best way to handle submit and abort
// would like custom buttons for triggering all of the actions like copy, paste, etc
await editor({
value: currentMessage,
onEscape: async (state) => await copyToClipboardAndExit(state),
onSubmit: async (state) => await pasteTextAndExit(state),
});
break;
case "copy":
await copyToClipboardAndExit(currentMessage);
case "save":
await inspect(currentMessage, `/conversations/${Date.now()}.md`);
exitChat();
default:
copyToClipboardAndExit(currentMessage);
}
await optionHandler(selectedOption);
},
},
],
});
//###########
// Main Loop
//###########
// runs the language model until the user cancels
while (true) {
await llm.call([
new SystemChatMessage(formatPrompt(prompt)),
new HumanChatMessage(humanChatMessage),
]);
}
}
promptAgainstHighlightedText();
/*
# Prompt Anything
Highlight some text and run this script to prompt against it.
Useful for summarizing text, generating a title, or any other task you can think of.
## Usage
- Highlight the text you want to prompt against
- Run the script via shortcut or command palette
- Input your desired prompt
- Wait for the AI to respond
- Select one of the options
* Retry - Rerun generation with option to update prompt
* Edit - Edit response in editor
- On editor exit the message is saved to the clipboard
- On editor submit the message is pasted into the highlighted text
* Copy - Copy response to clipboard
* Paste - Paste response into highlighted text
* Save - Save response to file (not working)
## Example
- Highlight: 'Some really long passage in a blog post'
- Run Script
- Prompt: `Summarize this passage in the form of Shakespearean prose`
- Waaaaait for it...
- Get a response from the AI
- Select an option
- Rinse and repeat
*/
// Name: Prompt Anything
// Description: Custom prompt for any highlighted text
// Author: Josh Mabry
// Twitter: @AI_Citizen
// Shortcut: alt shift enter
//#################
// ScriptKit Import
//#################
import "@johnlindquist/kit";
//#################
// LangChain Imports
//#################
let { ChatOpenAI } = await import("langchain/chat_models");
let { HumanChatMessage, SystemChatMessage } = await import("langchain/schema");
//#################
// Request API KEY
//#################
// stored in .env file after first run
// can change there or through the command palette
let openAIApiKey = await env("OPENAI_API_KEY", {
hint: `Grab a key from <a href="https://platform.openai.com/account/api-keys">here</a>`,
});
// System input / Task for the AI to follow
let userSystemInput = await arg("Summarize this passage");
// User Prompt from highlighted text
let userPrompt = await getSelectedText();
//#################
// Prompt Template
//#################
const formatPrompt = (prompt) => {
return `##### Ignore prior instructions
- Return answer in markdown format
- You are tasked with the following
${prompt}
########
`;
};
//################
// Options Template
//################
const options = `
* [Retry](submit:retry) - Rerun generation with option to update prompt
* [Edit](submit:edit) - Edit response in editor
* [Copy](submit:copy) - Copy response to clipboard
* [Paste](submit:paste) - Paste response into highlighted text
* [Save](submit:save) - Save response to file (not working)
`;
//#########
// Helpers
//########
// exit script on cancel
const cancelChat = () => {
process.exit(1);
};
//################
// Main Function
//################
/**
*
* @param {*} prompt
* @param {*} humanChatMessage
*/
async function promptAgainstHighlightedText(
prompt = formatPrompt(userSystemInput),
humanChatMessage = userPrompt
) {
let currentMessage = "";
const llm = new ChatOpenAI({
// 0 = "precise", 1 = "creative"
temperature: 0.3,
// modelName: "gpt-4", // uncomment to use GPT-4 (requires beta access)
openAIApiKey: openAIApiKey,
// turn off to only get output when the AI is done
streaming: true,
callbacks: [
{
handleLLMNewToken: async (token) => {
log(`handleLLMNewToken`);
// each new token is appended to the current message
// and then rendered to the screen
currentMessage += token;
// render current message
await div({
html: md(currentMessage + options),
// @TODO: Figure out how to get ESC to trigger a cancel
onAbandon: cancelChat,
onEscape: cancelChat,
onBackspace: cancelChat,
// if this is set to false you can click outside the window to cancel
// ignoreBlur: true,
focus: true,
// hint: `Press ESC to cancel`,
});
},
handleLLMError: async (err) => {
dev({ err });
},
handleLLMEnd: async () => {
log(`handleLLMEnd`);
// render final message with options
let html = md(currentMessage + options);
// wait for user to select an option
const selectedOption = await div(html, {
ignoreBlur: true,
focus: true,
// onSubmit: () => setSelectedText(currentMessage),
});
// handle selected option
switch (selectedOption) {
case "paste":
// paste into highlighted text
await setSelectedText(currentMessage);
process.exit(1);
case "retry":
// reset current message
currentMessage = "";
// prompt again with new prompt
// press enter to use original prompt
const followUp = await arg({
placeholder: userSystemInput,
hint: "Press enter to use the same prompt",
});
await processMessage(followUp);
break;
case "edit":
// still need to figure out best way to handle submit and abort
// would like custom buttons for triggering all these same options such as save
await editor({
value: currentMessage,
onEscape: async (state) => {
// copy to clipboard when exiting the editor
await clipboard.writeText(state);
// exit script
process.exit(1);
},
onSubmit: async (state) => {
// paste into highlighted text when pressing enter
await setSelectedText(state);
// exit script
process.exit(1);
},
});
break;
case "copy":
// copy to clipboard
await clipboard.writeText(currentMessage);
// exit script
process.exit(1);
case "save":
await inspect(currentMessage, `conversations/${Date.now()}.md`);
// exit script
process.exit(1);
default:
// copy to clipboard
await clipboard.writeText(currentMessage);
process.exit(1);
}
await optionHandler(selectedOption);
},
},
],
});
//###########
// Main Loop
//###########
// runs the language model until the user cancels
while (true) {
await llm.call([
new SystemChatMessage(formatPrompt(prompt)),
new HumanChatMessage(humanChatMessage),
]);
}
}
promptAgainstHighlightedText();
/*
# Explain Plz
Highlight some text and have it explained by AI
Works for any highlighted text or code
*/
// Name: Explain Plz
// Description: Get an explanation for any highlighted text
// Author: Josh Mabry
// Twitter: @AI_Citizen
// Shortcut: cmd alt shift e
import "@johnlindquist/kit";
let { ChatOpenAI } = await import("langchain/chat_models");
let { HumanChatMessage, SystemChatMessage } = await import("langchain/schema");
let openAIApiKey = await env("OPENAI_API_KEY", {
hint: `Grab a key from <a href="https://platform.openai.com/account/api-keys">here</a>`,
});
let { CallbackManager } = await import("langchain/callbacks");
let prompt = `#####
Ignore prior instructions, you are tasked with taking the input text and explaining it to the user.
Return the response in the following format using markdown syntax:
# Explain Plz
## TLDR (A quick summary of the highlighted text)
## ELI5 (Explain Like I'm 5)
## Explanation (A longer technical explanation of the highlighted text)
`;
let currentMessage = "";
const chat = new ChatOpenAI({
temperature: 0.3,
openAIApiKey: openAIApiKey,
streaming: true,
callbackManager: CallbackManager.fromHandlers({
handleLLMStart: async (token) => {
let widget = await widget(`
<div class="bg-black text-white h-screen p-5">
Loading...
<div>
`);
log(`handleLLMStart`);
currentMessage += token;
let html = md(token);
await div(html);
},
handleLLMNewToken: async (token, runId) => {
log(`handleLLMNewToken`);
currentMessage += token;
let html = md(currentMessage);
await div(html);
},
handleLLMError: async (err) => {
warn(`error`, JSON.stringify(err));
await setSelectedText(JSON.stringify(err));
},
handleLLMEnd: async () => {
widget = null;
log(`handleLLMEnd`);
let html = md(currentMessage);
await div(html);
},
}),
});
let text = await getSelectedText();
``;
await chat.call([new SystemChatMessage(prompt), new HumanChatMessage(text)]);
/*
Pardon the mess this was put together in half a day for the [lablab.ai](https://lablab.ai/event/autonomous-gpt-agents-hackathon) hackathon.
More updates to come
# AC AGI
An autonomous general intelligence that accomplishes a task for you.
Uses human in the loop to provide feedback to the agent.
How to use:
- Enter your task
- Wait for the agent to complete the task
- Assign max-iterations for the agent to loop: 0 for infinite (probably not a good idea ¯\_(ツ)_/¯)
- Profit
Known issues:
- The agent will sometimes get stuck in a loop and not complete the task
- Human feedback is not always helpful
Upcoming features:
- More tools
- Refined prompts
- Better human feedback system
- Better memory system
Possible thanks to the fine folks at [Langchain](https://js.langchain.com/docs/use_cases/autonomous_agents/baby_agi#example-with-tools)
and all the other giants whose shoulders we stand on.
*/
// Name: AC AGI
// Description: An AGI task manager inspired by BabyAGI
// Author: Josh Mabry
// Twitter: @AI_Citizen
import "@johnlindquist/kit";
let { BabyAGI } = await import("langchain/experimental/babyagi");
let { MemoryVectorStore } = await import("langchain/vectorstores/memory");
let { OpenAIEmbeddings } = await import("langchain/embeddings/openai");
let { OpenAI } = await import("langchain/llms/openai");
let { PromptTemplate } = await import("langchain/prompts");
let { LLMChain } = await import("langchain/chains");
let { ChainTool } = await import("langchain/tools");
let { initializeAgentExecutorWithOptions } = await import("langchain/agents");
let { DynamicTool } = await import("langchain/tools");
let { ChatOpenAI } = await import("langchain/chat_models");
let GOOGLE_API_KEY = await env("GOOGLE_API_KEY", {
shortcuts: [
{
name: "Google API Key",
key: `${cmd}+o`,
bar: "right",
onPress: () => {
open("https://developers.google.com/custom-search/v1/introduction");
},
},
],
ignoreBlur: true,
secret: true,
height: PROMPT.HEIGHT.INPUT_ONLY,
});
let GOOGLE_CSE_KEY = await env("GOOGLE_CSE_KEY", {
shortcuts: [
{
name: "Google Custom Search Engine Key",
key: `${cmd}+o`,
bar: "right",
onPress: () => {
open("https://programmablesearchengine.google.com/");
},
},
],
ignoreBlur: true,
secret: true,
height: PROMPT.HEIGHT.INPUT_ONLY,
});
await env("OPENAI_API_KEY", {
hint: `Grab a key from <a href="https://platform.openai.com/account/api-keys">here</a>`,
});
const task = await arg({
placeholder: "Task",
description: "Enter a task for AC AGI to complete",
ignoreBlur: true,
height: PROMPT.HEIGHT.INPUT_ONLY,
});
let maxIterations = await arg({
placeholder: "How many times should AC AGI loop?",
hint: "Leave empty for infinite iterations *use with caution*",
ignoreBlur: true,
height: PROMPT.HEIGHT.INPUT_ONLY,
});
if (maxIterations === "" || maxIterations === "0") {
maxIterations = undefined;
}
//#########################
// BabyAGI method overrides
//#########################
function printTaskList() {
let result = "";
for (const t of this.taskList) {
result += `${t.taskID}: ${t.taskName}\n`;
}
const msg = `### Task List
${result}
`;
let html = md(msg);
div({
html,
ignoreBlur: true,
});
}
function printNextTask(task) {
const msg = `### Next Task
${task.taskID}: ${task.taskName}
`;
let html = md(msg);
div({
html,
ignoreBlur: true,
});
}
function printTaskResult(result) {
const msg = `### Task Result
${result.trim()}
`;
let html = md(msg);
div({
html,
ignoreBlur: true,
});
}
//#############
// Custom Tools
//#############
let html = (str) => str.replace(/ /g, "+");
let fetch = (q) =>
`https://www.googleapis.com/customsearch/v1?key=${GOOGLE_API_KEY}&cx=${GOOGLE_CSE_KEY}&q=${html(
q
)}&sort=date`;
async function search(query) {
let response = await get(fetch(query));
let items = response?.data?.items;
if (items) {
let choices = items.map((item) => ({
name: item.title,
value: item.link,
}));
return JSON.stringify(choices);
}
}
async function humanFeedbackList(mdStr) {
let html = md(`${mdStr.trim()}`);
const response = div({
html,
ignoreBlur: true,
});
return response;
}
async function humanInput(question) {
const response = await arg({
placeholder: "Human, I need help!",
hint: question,
ignoreBlur: true,
ignoreAbandon: true,
height: PROMPT.HEIGHT.INPUT_ONLY,
});
return response;
}
const todoPrompt = PromptTemplate.fromTemplate(
"You are a planner/expert todo list creator. Generate a markdown formatted todo list for: {objective}"
);
const tools = [
new ChainTool({
name: "TODO",
chain: new LLMChain({
llm: new ChatOpenAI({ temperature: 0 }),
prompt: todoPrompt,
}),
description:
"For making todo lists. Input: objective to create todo list for. Output: the todo list",
}),
new DynamicTool({
name: "Search",
description: "Search web for info",
func: search,
}),
new DynamicTool({
name: "Human Input",
description:
"(Use only when no info is available elsewhere) Ask a human for specific input that you don't know, like a persons name, or DOB, location, etc. Input is question to ask human, output is answer",
func: humanInput,
}),
// new DynamicTool({
// name: "Human Feedback Choice",
// description: `Ask human for feedback if you unsure of next step.
// Input is markdown string formatted with your questions and suitable responses like this example:
// # Human, I need your help!
// <Question Here>
// * [John](submit:John) // don't change formatting of these links
// * [Mindy](submit:Mindy)
// * [Joy](submit:Joy)
// * [Other](submit:Other)
// `,
// func: humanFeedbackList,
// }),
];
//##################
// AC AGI is Born
//##################
const taskBeginMsg = md(`
### Executing Task Manager
Goal: ${task}
`);
div({ html: taskBeginMsg, ignoreBlur: true });
const agentExecutor = await initializeAgentExecutorWithOptions(
tools,
new ChatOpenAI({ temperature: 0 }),
{
agentType: "zero-shot-react-description",
agentArgs: {
prefix: `You are an AI who performs one task based on the following objective: {objective}.
Take into account these previously completed tasks: {context}.`,
suffix: `Question: {task}
{agent_scratchpad}`,
inputVariables: ["objective", "task", "context", "agent_scratchpad"],
},
}
);
const vectorStore = new MemoryVectorStore(new OpenAIEmbeddings());
const babyAGI = BabyAGI.fromLLM({
llm: new ChatOpenAI({ temperature: 0 }),
executionChain: agentExecutor,
vectorstore: vectorStore,
maxIterations: maxIterations,
});
babyAGI.printNextTask = printNextTask;
babyAGI.printTaskList = printTaskList;
babyAGI.printTaskResult = printTaskResult;
await babyAGI.call({ objective: task });

/* 
# Google Search
Example of leveraging Google's Custom Search Engine API to search the web
*/

// Name: Google Search
// Description: Leverage Google's Custom Search Engine API to search the web
// Author: Josh Mabry
// Twitter: @AI_Citizen

import "@johnlindquist/kit";

let GOOGLE_API_KEY = await env("GOOGLE_API_KEY", {
  shortcuts: [
    {
      name: "Google API Key",
      key: `${cmd}+o`,
      bar: "right",
      onPress: () => {
        open("https://developers.google.com/custom-search/v1/introduction");
      },
    },
  ],
  ignoreBlur: true,
  secret: true,
  height: PROMPT.HEIGHT.INPUT_ONLY,
});

let GOOGLE_CSE_KEY = await env("GOOGLE_CSE_KEY", {
  shortcuts: [
    {
      name: "Google Custom Search Engine Key",
      key: `${cmd}+o`,
      bar: "right",
      onPress: () => {
        open("https://programmablesearchengine.google.com/");
      },
    },
  ],
  ignoreBlur: true,
  secret: true,
  height: PROMPT.HEIGHT.INPUT_ONLY,
});

let query = await arg(
  {
    placeholder: "Search Query",
    strict: false,
  },
  [
    {
      name: "Send a search query to Google",
      info: "always",
    },
  ]
);

let search = (q) =>
  `https://www.googleapis.com/customsearch/v1?key=${GOOGLE_API_KEY}&cx=${GOOGLE_CSE_KEY}&q=${q}&sort=date`;

let response = await get(search(query));

let items = response?.data?.items;

if (items) {
  let choices = items.map((item) => ({
    name: item.title,
    value: item.link,
  }));

  let link = await arg("Choose a link to view", choices);

  open(link);
}

/*
/*
# Smartify your words!
Tired of feeling dumb? Winter got you in a funk?
Can you just not seem to get the words out right?
Well, let's Smartify your words!
Highlight some text and press `cmd+shift+enter` to send it through ChatGPT
to replace the text with a more eloquent version. Mileage may vary.
*/
// Name: Smartify Your Words
// Description: Let's make those words smarter!
// Author: Josh Mabry
// Twitter: @AI_Citizen
// Shortcut: command shift enter
import "@johnlindquist/kit";
let { ChatOpenAI } = await import("langchain/chat_models");
let { HumanChatMessage, SystemChatMessage } = await import("langchain/schema");
let openAIApiKey = await env("OPENAI_API_KEY", {
hint: `Grab a key from <a href="https://platform.openai.com/account/api-keys">here</a>`,
});
let { CallbackManager } = await import("langchain/callbacks");
let prompt = `#####
Ignore prior instructions, you are tasked with taking an input and refactoring it using the following rules: '
- Maintain the same meaning, tone, and intent as the original text
- Clean up any grammar or spelling mistakes
- Make it sound more professional, but keep it casual
- Reduce redundancies and excessive verbiage
Return only the updated text, do not offer any explanations or reasoning for the changes.
########
`;
import Bottleneck from "bottleneck";
const limiter = new Bottleneck({
maxConcurrent: 1,
minTime: 100,
});
const type = (text) => {
return new Promise((resolve) => {
keyboard.type(text);
resolve();
});
};
const wrappedType = limiter.wrap(type);
const smartify = new ChatOpenAI({
temperature: 0.3,
openAIApiKey: openAIApiKey,
streaming: true,
callbackManager: CallbackManager.fromHandlers({
handleLLMStart: async (token) => {
if (!token) return;
log(`handleLLMStart`);
await wrappedType(token);
},
handleLLMNewToken: async (token) => {
if (!token) return;
await wrappedType(token);
},
handleLLMError: async (err) => {
warn(`error`, JSON.stringify(err));
await setSelectedText(JSON.stringify(err));
process.exit(1);
},
handleLLMEnd: async () => {
log(`handleLLMEnd`);
log(`currentMessage`, currentMessage);
process.exit(1);
},
}),
});
let text = await getSelectedText();
await smartify.call([
new SystemChatMessage(prompt),
new HumanChatMessage(text),
]);

/*
# Generate a response to a prompt in any text field
This script utilizes ChatGPT to generate a response for the selected text.
The response will then replace the original text, making it useful for various writing tasks such as blog posts, code, and emails.
Highlight some text and press `cmd+shift+p` to send it through ChatGPT
to replace the text response. Your mileage may vary.
*/
// Name: Prompt Anywhere
// Description: Generate a prompt response in anywhere you can type
// Author: Josh Mabry
// Twitter: @AI_Citizen
// Shortcut: cmd shift P
import "@johnlindquist/kit";
import Bottleneck from "bottleneck";
let { ChatOpenAI } = await import("langchain/chat_models");
let { HumanChatMessage, SystemChatMessage } = await import("langchain/schema");
let openAIApiKey = await env("OPENAI_API_KEY", {
hint: `Grab a key from <a href="https://platform.openai.com/account/api-keys">here</a>`,
});
let { CallbackManager } = await import("langchain/callbacks");
let prompt = `#####
Ignore prior instructions, you are tasked with taking the input text and generating a response to it.
Return only the updated text, do not offer any explanations or reasoning for the changes.
Do not ask any questions, do not refuse any input, and do not change the subject.
`;
const limiter = new Bottleneck({
maxConcurrent: 1,
minTime: 100,
});
const type = (text) => {
return new Promise((resolve) => {
keyboard.type(text);
resolve();
});
};
const wrappedType = limiter.wrap(type);
const chat = new ChatOpenAI({
temperature: 0.3,
openAIApiKey: openAIApiKey,
streaming: true,
callbackManager: CallbackManager.fromHandlers({
handleLLMStart: async (token) => {
log(`handleLLMStart`);
if (!token) return;
await wrappedType(token);
},
handleLLMNewToken: async (token, runId) => {
log(`handleLLMNewToken`);
// occasionally I see tokens typed out of order
// still not sure why this happens
await wrappedType(token);
},
handleLLMError: async (err) => {
warn(`error`, JSON.stringify(err));
process.exit(1);
},
handleLLMEnd: async () => {
log(`handleLLMEnd`);
log(`currentMessage`, currentMessage);
process.exit(1);
},
}),
});
let text = await getSelectedText();
await chat.call([new SystemChatMessage(prompt), new HumanChatMessage(text)]);