郑毅 3 meses atrás
pai
commit
45635d0ffd
6 arquivos alterados com 54 adições e 10 exclusões
  1. 4 10
      __main__.py
  2. 0 0
      kimi/__init__.py
  3. 42 0
      kimi/chat.py
  4. 0 0
      kimi/file.py
  5. 0 0
      str/__init__.py
  6. 8 0
      str/split.py

+ 4 - 10
__main__.py

@@ -1,15 +1,9 @@
-from airport_codes.get_info import *
+from kimi.chat import *
 
 def main():
-    # print('[{}] Hello world!'.format(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))))
-    # str = '188888'
-    # n = len(str)
-    # new_str = '****' if len(str) <= 4 else str[0:int(n/3)] + '****' + str[int(n*2/3):]
-    # print(new_str)
-
-    # clean_geocode()
-    # req_geocode()
-    test_geocode()
+    c = Chat()
+    print(c.chat('东京奥运会中国得了多少金牌?'))
+    print(c.chat('巴黎呢?'))
 
 
 # 程序入口

+ 0 - 0
kimi/__init__.py


+ 42 - 0
kimi/chat.py

@@ -0,0 +1,42 @@
+from openai import OpenAI
+ 
+client = OpenAI(
+    api_key = "sk-g4Vp3q4nSGPGNalQ04sbkiSeJX95PAZeYMv8ENB7mVq2Iu8j",
+    base_url = "https://api.moonshot.cn/v1",
+)
+
+# 默认人设上下文
+history = [
+    {"role": "system", "content": "你是 Coo,由汇智提供的人工智能助手,你更擅长中文和英文的对话。你会为用户提供安全,有帮助,准确的回答。同时,你会拒绝一切涉及恐怖主义,种族歧视,黄色暴力等问题的回答。使用检索到的上下文片段来回答问题。如果你不知道答案,就说你不知道。"}
+]
+
+class Chat:
+    history = []        # 对话历史上下文
+    save_first_len = 1  # 保留起始的几条上下文,小于等于 0 设置无效
+    save_last_len = 10  # 保留末尾的几条上下文,小于等于 0 为不限制
+
+    def __init__(self, history=history, save_first_len=1, save_last_len=10) -> None:
+        self.history = history
+        self.save_first_len = save_first_len if save_first_len >= 0 else 0
+        self.save_last_len = save_last_len if save_last_len >= 0 else 0
+ 
+    def chat(self, query):
+        self.history.append({
+            "role": "user", 
+            "content": query
+        })
+        completion = client.chat.completions.create(
+            model="moonshot-v1-8k",
+            messages=self.history,
+            temperature=0.3,
+        )
+        result = completion.choices[0].message.content
+        self.history.append({
+            "role": "assistant",
+            "content": result
+        })
+        # 随着对话的进行,模型每次需要传入的 token 都会线性增加,只保留特定几轮对话
+        if self.save_first_len + self.save_last_len > len(self.history):
+            self.history = self.history[0:self.save_first_len] + self.history[len(self.history) - self.save_last_len:]
+        
+        return result

+ 0 - 0
kimi/file.py


+ 0 - 0
str/__init__.py


+ 8 - 0
str/split.py

@@ -0,0 +1,8 @@
+import time
+
+
+print('[{}] Hello world!'.format(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))))
+str = '188888'
+n = len(str)
+new_str = '****' if len(str) <= 4 else str[0:int(n/3)] + '****' + str[int(n*2/3):]
+print(new_str)