49 lines
1.4 KiB
Python
49 lines
1.4 KiB
Python
|
import random
|
||
|
import re
|
||
|
from typing import Optional
|
||
|
|
||
|
import openai
|
||
|
|
||
|
from edmond.plugin import Plugin
|
||
|
from edmond.utils import limit_text_length
|
||
|
|
||
|
|
||
|
class Gpt3Plugin(Plugin):
|
||
|
|
||
|
REQUIRED_CONFIGS = ["openai_key", "computing_replies", "join_lines"]
|
||
|
|
||
|
def __init__(self, bot):
|
||
|
super().__init__(bot)
|
||
|
openai.api_key = self.config["openai_key"]
|
||
|
|
||
|
def reply(self, prompt: str, target: str):
|
||
|
computing_reply = random.choice(self.config["computing_replies"])
|
||
|
self.bot.say(target, computing_reply)
|
||
|
|
||
|
completion = self.complete(prompt)
|
||
|
if completion and (reply := self.sanitize(completion)):
|
||
|
self.bot.say(target, reply)
|
||
|
else:
|
||
|
self.bot.signal_failure(target)
|
||
|
|
||
|
def complete(self, prompt: str) -> Optional[str]:
|
||
|
try:
|
||
|
completion = openai.Completion.create(
|
||
|
model="text-davinci-002",
|
||
|
prompt=prompt,
|
||
|
temperature=0.7,
|
||
|
max_tokens=128,
|
||
|
top_p=1,
|
||
|
frequency_penalty=0.5,
|
||
|
presence_penalty=0
|
||
|
)
|
||
|
except openai.error.OpenAIError:
|
||
|
return None
|
||
|
return completion.choices[0].text
|
||
|
|
||
|
def sanitize(self, text: str) -> str:
|
||
|
text = text.strip()
|
||
|
text = re.sub(r"\n+", self.config["join_lines"], text)
|
||
|
text = limit_text_length(text)
|
||
|
return text
|