Skip to content

Commit f4b91c5

Browse files
authored
feat(core): Support system code for feedback and prompt (#873)
1 parent 711032c commit f4b91c5

File tree

7 files changed

+71
-26
lines changed

7 files changed

+71
-26
lines changed

README.zh.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -127,6 +127,7 @@ DB-GPT是一个开源的数据库领域大模型框架。目的是构建大模
127127
- [internlm-chat-20b](https://huggingface.co/internlm/internlm-chat-20b)
128128
- [qwen-7b-chat](https://huggingface.co/Qwen/Qwen-7B-Chat)
129129
- [qwen-14b-chat](https://huggingface.co/Qwen/Qwen-14B-Chat)
130+
- [qwen-72b-chat](https://huggingface.co/Qwen/Qwen-72B-Chat)
130131
- [wizardlm-13b](https://huggingface.co/WizardLM/WizardLM-13B-V1.2)
131132
- [orca-2-7b](https://huggingface.co/microsoft/Orca-2-7b)
132133
- [orca-2-13b](https://huggingface.co/microsoft/Orca-2-13b)

pilot/configs/model_config.py

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -88,6 +88,18 @@ def get_device() -> str:
8888
"qwen-14b-chat-int8": os.path.join(MODEL_PATH, "Qwen-14B-Chat-Int8"),
8989
# https://huggingface.co/Qwen/Qwen-14B-Chat-Int4
9090
"qwen-14b-chat-int4": os.path.join(MODEL_PATH, "Qwen-14B-Chat-Int4"),
91+
# https://huggingface.co/Qwen/Qwen-72B-Chat
92+
"qwen-72b-chat": os.path.join(MODEL_PATH, "Qwen-72B-Chat"),
93+
# https://huggingface.co/Qwen/Qwen-72B-Chat-Int8
94+
"qwen-72b-chat-int8": os.path.join(MODEL_PATH, "Qwen-72B-Chat-Int8"),
95+
# https://huggingface.co/Qwen/Qwen-72B-Chat-Int4
96+
"qwen-72b-chat-int4": os.path.join(MODEL_PATH, "Qwen-72B-Chat-Int4"),
97+
# https://huggingface.co/Qwen/Qwen-1_8B-Chat
98+
"qwen-1.8b-chat": os.path.join(MODEL_PATH, "Qwen-1_8B-Chat"),
99+
# https://huggingface.co/Qwen/Qwen-1_8B-Chat-Int8
100+
"qwen-1.8b-chat-int8": os.path.join(MODEL_PATH, "wen-1_8B-Chat-Int8"),
101+
# https://huggingface.co/Qwen/Qwen-1_8B-Chat-Int4
102+
"qwen-1.8b-chat-int4": os.path.join(MODEL_PATH, "Qwen-1_8B-Chat-Int4"),
91103
# (Llama2 based) We only support WizardLM-13B-V1.2 for now, which is trained from Llama-2 13b, see https://huggingface.co/WizardLM/WizardLM-13B-V1.2
92104
"wizardlm-13b": os.path.join(MODEL_PATH, "WizardLM-13B-V1.2"),
93105
# wget https://huggingface.co/TheBloke/vicuna-13B-v1.5-GGUF/resolve/main/vicuna-13b-v1.5.Q4_K_M.gguf -O models/ggml-model-q4_0.gguf

pilot/openapi/api_v1/feedback/feed_back_db.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,6 @@ def __init__(self):
4949

5050
def create_or_update_chat_feed_back(self, feed_back: FeedBackBody):
5151
# Todo: We need to have user information first.
52-
def_user_name = ""
5352

5453
session = self.get_session()
5554
chat_feed_back = ChatFeedBackEntity(
@@ -60,7 +59,7 @@ def create_or_update_chat_feed_back(self, feed_back: FeedBackBody):
6059
question=feed_back.question,
6160
knowledge_space=feed_back.knowledge_space,
6261
messages=feed_back.messages,
63-
user_name=def_user_name,
62+
user_name=feed_back.user_name,
6463
gmt_created=datetime.now(),
6564
gmt_modified=datetime.now(),
6665
)
@@ -76,7 +75,7 @@ def create_or_update_chat_feed_back(self, feed_back: FeedBackBody):
7675
result.question = feed_back.question
7776
result.knowledge_space = feed_back.knowledge_space
7877
result.messages = feed_back.messages
79-
result.user_name = def_user_name
78+
result.user_name = feed_back.user_name
8079
result.gmt_created = datetime.now()
8180
result.gmt_modified = datetime.now()
8281
else:

pilot/openapi/api_v1/feedback/feed_back_model.py

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
from pydantic.main import BaseModel
2+
from typing import Optional
23

34

45
class FeedBackBody(BaseModel):
@@ -12,14 +13,16 @@ class FeedBackBody(BaseModel):
1213
"""question: human question"""
1314
question: str
1415

15-
"""knowledge_space: knowledge space"""
16-
knowledge_space: str
17-
1816
"""score: rating of the llm's answer"""
1917
score: int
2018

2119
"""ques_type: question type"""
2220
ques_type: str
2321

22+
user_name: Optional[str] = None
23+
2424
"""messages: rating detail"""
25-
messages: str
25+
messages: Optional[str] = None
26+
27+
"""knowledge_space: knowledge space"""
28+
knowledge_space: Optional[str] = None

pilot/server/prompt/prompt_manage_db.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,7 @@ def create_prompt(self, prompt: PromptManageRequest):
5555
prompt_name=prompt.prompt_name,
5656
content=prompt.content,
5757
user_name=prompt.user_name,
58+
sys_code=prompt.sys_code,
5859
gmt_created=datetime.now(),
5960
gmt_modified=datetime.now(),
6061
)
@@ -83,6 +84,8 @@ def get_prompts(self, query: PromptManageEntity):
8384
prompts = prompts.filter(
8485
PromptManageEntity.prompt_name == query.prompt_name
8586
)
87+
if query.sys_code is not None:
88+
prompts = prompts.filter(PromptManageEntity.sys_code == query.sys_code)
8689

8790
prompts = prompts.order_by(PromptManageEntity.gmt_created.desc())
8891
result = prompts.all()
Lines changed: 38 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -1,24 +1,44 @@
11
from typing import List
22

33
from pydantic import BaseModel
4+
from typing import Optional
5+
from pydantic import BaseModel
46

57

68
class PromptManageRequest(BaseModel):
7-
"""chat_scene: for example: chat_with_db_execute, chat_excel, chat_with_db_qa"""
8-
9-
chat_scene: str = None
10-
11-
"""sub_chat_scene: sub chat scene"""
12-
sub_chat_scene: str = None
13-
14-
"""prompt_type: common or private"""
15-
prompt_type: str = None
16-
17-
"""content: prompt content"""
18-
content: str = None
19-
20-
"""user_name: user name"""
21-
user_name: str = None
22-
23-
"""prompt_name: prompt name"""
24-
prompt_name: str = None
9+
"""Model for managing prompts."""
10+
11+
chat_scene: Optional[str] = None
12+
"""
13+
The chat scene, e.g. chat_with_db_execute, chat_excel, chat_with_db_qa.
14+
"""
15+
16+
sub_chat_scene: Optional[str] = None
17+
"""
18+
The sub chat scene.
19+
"""
20+
21+
prompt_type: Optional[str] = None
22+
"""
23+
The prompt type, either common or private.
24+
"""
25+
26+
content: Optional[str] = None
27+
"""
28+
The prompt content.
29+
"""
30+
31+
user_name: Optional[str] = None
32+
"""
33+
The user name.
34+
"""
35+
36+
sys_code: Optional[str] = None
37+
"""
38+
System code
39+
"""
40+
41+
prompt_name: Optional[str] = None
42+
"""
43+
The prompt name.
44+
"""

pilot/server/prompt/service.py

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,9 +17,15 @@ def create_prompt(self, request: PromptManageRequest):
1717
query = PromptManageRequest(
1818
prompt_name=request.prompt_name,
1919
)
20+
err_sys_str = ""
21+
if query.sys_code:
22+
query.sys_code = request.sys_code
23+
err_sys_str = f" and sys_code: {request.sys_code}"
2024
prompt_name = prompt_manage_dao.get_prompts(query)
2125
if len(prompt_name) > 0:
22-
raise Exception(f"prompt name:{request.prompt_name} have already named")
26+
raise Exception(
27+
f"prompt name: {request.prompt_name}{err_sys_str} have already named"
28+
)
2329
prompt_manage_dao.create_prompt(request)
2430
return True
2531

@@ -32,6 +38,7 @@ def get_prompts(self, request: PromptManageRequest):
3238
prompt_type=request.prompt_type,
3339
prompt_name=request.prompt_name,
3440
user_name=request.user_name,
41+
sys_code=request.sys_code,
3542
)
3643
responses = []
3744
prompts = prompt_manage_dao.get_prompts(query)

0 commit comments

Comments
 (0)