2022-11-29 12:53:47 +01:00
|
|
|
import random
|
|
|
|
import re
|
|
|
|
from typing import Optional
|
|
|
|
|
|
|
|
import openai
|
|
|
|
|
|
|
|
from edmond.plugin import Plugin
|
|
|
|
from edmond.utils import limit_text_length
|
|
|
|
|
|
|
|
|
|
|
|
class Gpt3Plugin(Plugin):
|
|
|
|
|
|
|
|
REQUIRED_CONFIGS = ["openai_key", "computing_replies", "join_lines"]
|
|
|
|
|
|
|
|
def __init__(self, bot):
|
|
|
|
super().__init__(bot)
|
|
|
|
openai.api_key = self.config["openai_key"]
|
|
|
|
|
|
|
|
def reply(self, prompt: str, target: str):
|
|
|
|
computing_reply = random.choice(self.config["computing_replies"])
|
|
|
|
self.bot.say(target, computing_reply)
|
|
|
|
|
|
|
|
completion = self.complete(prompt)
|
|
|
|
if completion and (reply := self.sanitize(completion)):
|
|
|
|
self.bot.say(target, reply)
|
|
|
|
else:
|
2022-12-01 12:03:45 +01:00
|
|
|
self.signal_failure(target)
|
2022-11-29 12:53:47 +01:00
|
|
|
|
|
|
|
def complete(self, prompt: str) -> Optional[str]:
|
|
|
|
try:
|
|
|
|
completion = openai.Completion.create(
|
2022-12-01 12:04:06 +01:00
|
|
|
model="text-davinci-003",
|
2022-11-29 12:53:47 +01:00
|
|
|
prompt=prompt,
|
|
|
|
temperature=0.7,
|
|
|
|
max_tokens=128,
|
|
|
|
top_p=1,
|
|
|
|
frequency_penalty=0.5,
|
2022-12-11 22:13:11 +01:00
|
|
|
presence_penalty=0,
|
|
|
|
timeout=30,
|
2022-12-12 15:54:38 +01:00
|
|
|
request_timeout=30,
|
2022-11-29 12:53:47 +01:00
|
|
|
)
|
2022-12-12 15:54:38 +01:00
|
|
|
except openai.error.OpenAIError as exc:
|
|
|
|
self.bot.log_e(f"OpenAI error: {exc}")
|
2022-11-29 12:53:47 +01:00
|
|
|
return None
|
|
|
|
return completion.choices[0].text
|
|
|
|
|
|
|
|
def sanitize(self, text: str) -> str:
|
|
|
|
text = text.strip()
|
|
|
|
text = re.sub(r"\n+", self.config["join_lines"], text)
|
|
|
|
text = limit_text_length(text)
|
|
|
|
return text
|