Skip to content

Commit

Permalink
Merge pull request #37 from briansunter/bs/gpt-page
Browse files Browse the repository at this point in the history
Bs/gpt page
  • Loading branch information
briansunter authored Nov 1, 2022
2 parents 842db02 + 4e06381 commit 370190c
Show file tree
Hide file tree
Showing 6 changed files with 230 additions and 100 deletions.
35 changes: 25 additions & 10 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,15 +21,27 @@
<br>
</p>

## Demo

![logseq gpt3 openai demo](docs/demo.gif)

## Usage
### `gpt-block`

Just type `/gpt3` in a block or select `gpt3` from the block menu.
Type `/gpt-block` in a block or select `gpt-block` from the block menu.

[See here for example usages](https://beta.openai.com/examples).
`gpt-block` will send the block to OpenAI's GPT-3 API and append the response underneath the block.

## Demo
![logseq gpt-block](docs/gpt-block.gif)

![logseq gpt3 openai demo](docs/demo.gif)
### `gpt-page`
Type `/gpt-page` in a block or select `gpt-page` from the block menu.

`gpt-page` will send the entire page to OpenAI's GPT-3 API and append the response to the bottom of the page.

![logseq gpt-page](docs/gpt-page.gif)
### OpenAI Examples
[See here for example usages](https://beta.openai.com/examples).

## 📝 Table of Contents

Expand All @@ -48,7 +60,7 @@ Just type `/gpt3` in a block or select `gpt3` from the block menu.

`logseq-plugin-gpt3-openai` allows users to generate human-like text using GPT-3 within the LogSeq editor.

Write a GPT-3 command in a block, then run the open `gpt3` command via the slash or block menu. The plugin will generate a GPT-3 response using the OpenAI API and insert it beneath the block.
Write a GPT-3 command in a block, then run the open `/gpt` command via the slash or block menu. The plugin will generate a GPT-3 response using the OpenAI API and insert it below. It removes leading and trailing whitespace from the prompt.

## Getting Started <a name = "getting_started"></a>

Expand All @@ -58,6 +70,9 @@ Write a GPT-3 command in a block, then run the open `gpt3` command via the slash

- Download the plugin in the Logseq marketplace by searching for `gpt3` or `openai`.

## ⚠️ Warning ⚠️

GPT-3 has limitations. It sometimes produces output that is subtly wrong or misleading. Don't rely on its output without verifying it yourself. Use it with caution.
## Example Use Cases <a name = "examples"></a>

## Summarizing or explaining a block of text
Expand All @@ -80,6 +95,11 @@ Write a GPT-3 command in a block, then run the open `gpt3` command via the slash

![logseq gpt3 openai table](docs/table.gif)

### Just for fun


![](docs/weirdpizza.gif)

- Generate code to do a given task
- Correct grammar
- Translate into other languages
Expand All @@ -91,11 +111,6 @@ Write a GPT-3 command in a block, then run the open `gpt3` command via the slash
- `Write a tagline for an ice cream shop.`
- Answer Questions
- `Q: How does a telescope work?`
## Just for fun

### Crazy recipes

![](docs/weirdpizza.gif)]
## FAQ <a name = "faq"></a>
### What is GPT-3 and OpenAI, and how does it work?
See [this article for a good overview.](https://www.vox.com/future-perfect/21355768/gpt-3-ai-openai-turing-test-language)
Expand Down
Binary file added docs/gpt-block.gif
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added docs/gpt-page.gif
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
56 changes: 56 additions & 0 deletions src/lib/logseq.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
import { BlockEntity, BlockUUIDTuple } from "@logseq/libs/dist/LSPlugin.user";

function isBlockEntity(b: BlockEntity | BlockUUIDTuple): b is BlockEntity {
return (b as BlockEntity).uuid !== undefined;
}

async function getTreeContent(b: BlockEntity) {
let content = "";
const trimmedBlockContent = b.content.trim();
if (trimmedBlockContent.length > 0) {
content += trimmedBlockContent;
}

if (!b.children) {
return content;
}

for (const child of b.children) {
if (isBlockEntity(child)) {
content += await getTreeContent(child);
} else {
const childBlock = await logseq.Editor.getBlock(child[1], {
includeChildren: true,
});
if (childBlock) {
content += await getTreeContent(childBlock);
}
}
}
return content;
}

async function getPageContentFromBlock(b: BlockEntity): Promise<string> {
let blockContents = [];

const currentBlock = await logseq.Editor.getBlock(b);
if (!currentBlock) {
throw new Error("Block not found");
}

const page = await logseq.Editor.getPage(currentBlock.page.id);
if (!page) {
throw new Error("Page not found");
}

const pageBlocks = await logseq.Editor.getPageBlocksTree(page.name);
for (const pageBlock of pageBlocks) {
const blockContent = await getTreeContent(pageBlock);
if (blockContent.length > 0) {
blockContents.push(blockContent);
}
}
return blockContents.join(" ");
}

export { getPageContentFromBlock };
46 changes: 34 additions & 12 deletions src/lib/openai.ts
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import { Configuration, OpenAIApi } from "openai";
import "@logseq/libs";

interface OpenAIOptions {
import { backOff } from "exponential-backoff";
export interface OpenAIOptions {
apiKey: string;
completionEngine?: string;
temperature?: number;
Expand All @@ -10,11 +10,29 @@ interface OpenAIOptions {

const OpenAIDefaults = (apiKey: string): OpenAIOptions => ({
apiKey,
completionEngine: 'text-davinci-002',
completionEngine: "text-davinci-002",
temperature: 1.0,
maxTokens: 1000,
});

const retryOptions = {
numOfAttempts: 3,
retry: (err: any) => {
if (!err.response || !err.response.data || !err.response.data.error) {
return false;
}
if (err.response.status === 429) {
const errorType = err.response.data.error.type;
if (errorType === "insufficient_quota") {
return false;
}
console.warn("Rate limit exceeded. Retrying...");
return true;
}
return false;
},
};

export async function openAI(
input: string,
openAiOptions: OpenAIOptions
Expand All @@ -28,17 +46,21 @@ export async function openAI(

const openai = new OpenAIApi(configuration);

const response = await openai.createCompletion(engine, {
prompt: input,
temperature: options.temperature,
max_tokens: options.maxTokens,
top_p: 1,
frequency_penalty: 0,
presence_penalty: 0,
});
const response = await backOff(
() =>
openai.createCompletion(engine, {
prompt: input,
temperature: options.temperature,
max_tokens: options.maxTokens,
top_p: 1,
frequency_penalty: 0,
presence_penalty: 0,
}),
retryOptions
);

const choices = response.data.choices;
if (choices && choices[0] && choices[0].text) {
if (choices && choices[0] && choices[0].text && choices[0].text.length > 0) {
return choices[0].text;
} else {
return null;
Expand Down
Loading

0 comments on commit 370190c

Please sign in to comment.