|
@@ -0,0 +1,42 @@
|
|
|
|
+from openai import OpenAI
|
|
|
|
+
|
|
|
|
+client = OpenAI(
|
|
|
|
+ api_key = "sk-g4Vp3q4nSGPGNalQ04sbkiSeJX95PAZeYMv8ENB7mVq2Iu8j",
|
|
|
|
+ base_url = "https://api.moonshot.cn/v1",
|
|
|
|
+)
|
|
|
|
+
|
|
|
|
+# 默认人设上下文
|
|
|
|
+history = [
|
|
|
|
+ {"role": "system", "content": "你是 Coo,由汇智提供的人工智能助手,你更擅长中文和英文的对话。你会为用户提供安全,有帮助,准确的回答。同时,你会拒绝一切涉及恐怖主义,种族歧视,黄色暴力等问题的回答。使用检索到的上下文片段来回答问题。如果你不知道答案,就说你不知道。"}
|
|
|
|
+]
|
|
|
|
+
|
|
|
|
+class Chat:
|
|
|
|
+ history = [] # 对话历史上下文
|
|
|
|
+ save_first_len = 1 # 保留起始的几条上下文,小于等于 0 设置无效
|
|
|
|
+ save_last_len = 10 # 保留末尾的几条上下文,小于等于 0 为不限制
|
|
|
|
+
|
|
|
|
+ def __init__(self, history=history, save_first_len=1, save_last_len=10) -> None:
|
|
|
|
+ self.history = history
|
|
|
|
+ self.save_first_len = save_first_len if save_first_len >= 0 else 0
|
|
|
|
+ self.save_last_len = save_last_len if save_last_len >= 0 else 0
|
|
|
|
+
|
|
|
|
+ def chat(self, query):
|
|
|
|
+ self.history.append({
|
|
|
|
+ "role": "user",
|
|
|
|
+ "content": query
|
|
|
|
+ })
|
|
|
|
+ completion = client.chat.completions.create(
|
|
|
|
+ model="moonshot-v1-8k",
|
|
|
|
+ messages=self.history,
|
|
|
|
+ temperature=0.3,
|
|
|
|
+ )
|
|
|
|
+ result = completion.choices[0].message.content
|
|
|
|
+ self.history.append({
|
|
|
|
+ "role": "assistant",
|
|
|
|
+ "content": result
|
|
|
|
+ })
|
|
|
|
+ # 随着对话的进行,模型每次需要传入的 token 都会线性增加,只保留特定几轮对话
|
|
|
|
+ if self.save_first_len + self.save_last_len > len(self.history):
|
|
|
|
+ self.history = self.history[0:self.save_first_len] + self.history[len(self.history) - self.save_last_len:]
|
|
|
|
+
|
|
|
|
+ return result
|