Skip to content

Commit 7a268dd

Browse files
committed
update code during demo
1 parent e070f8f commit 7a268dd

File tree

4 files changed

+79
-67
lines changed

4 files changed

+79
-67
lines changed

2_prompt_templates/1_prompt_template_basic.py

Lines changed: 36 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -1,44 +1,47 @@
1+
# Prompt Template Docs:
2+
# https://python.langchain.com/v0.2/docs/concepts/#prompt-templateshttps://python.langchain.com/v0.2/docs/concepts/#prompt-templates
3+
14
from langchain.prompts import ChatPromptTemplate
25
from langchain_core.messages import HumanMessage
36

4-
# PART 1: Create a ChatPromptTemplate using a template string
5-
template = "Tell me a joke about {topic}."
6-
prompt_template = ChatPromptTemplate.from_template(template)
7+
# # PART 1: Create a ChatPromptTemplate using a template string
8+
# template = "Tell me a joke about {topic}."
9+
# prompt_template = ChatPromptTemplate.from_template(template)
710

8-
print("-----Prompt from Template-----")
9-
prompt = prompt_template.invoke({"topic": "cats"})
10-
print(prompt)
11+
# print("-----Prompt from Template-----")
12+
# prompt = prompt_template.invoke({"topic": "cats"})
13+
# print(prompt)
1114

12-
# PART 2: Prompt with Multiple Placeholders
13-
template_multiple = """You are a helpful assistant.
14-
Human: Tell me a {adjective} story about a {animal}.
15-
Assistant:"""
16-
prompt_multiple = ChatPromptTemplate.from_template(template_multiple)
17-
prompt = prompt_multiple.invoke({"adjective": "funny", "animal": "panda"})
18-
print("\n----- Prompt with Multiple Placeholders -----\n")
19-
print(prompt)
15+
# # PART 2: Prompt with Multiple Placeholders
16+
# template_multiple = """You are a helpful assistant.
17+
# Human: Tell me a {adjective} story about a {animal}.
18+
# Assistant:"""
19+
# prompt_multiple = ChatPromptTemplate.from_template(template_multiple)
20+
# prompt = prompt_multiple.invoke({"adjective": "funny", "animal": "panda"})
21+
# print("\n----- Prompt with Multiple Placeholders -----\n")
22+
# print(prompt)
2023

2124

2225
# PART 3: Prompt with System and Human Messages (Using Tuples)
23-
messages = [
24-
("system", "You are a comedian who tells jokes about {topic}."),
25-
("human", "Tell me {joke_count} jokes."),
26-
]
27-
prompt_template = ChatPromptTemplate.from_messages(messages)
28-
prompt = prompt_template.invoke({"topic": "lawyers", "joke_count": 3})
29-
print("\n----- Prompt with System and Human Messages (Tuple) -----\n")
30-
print(prompt)
31-
32-
# Extra Informoation about Part 3.
33-
# This does work:
34-
messages = [
35-
("system", "You are a comedian who tells jokes about {topic}."),
36-
HumanMessage(content="Tell me 3 jokes."),
37-
]
38-
prompt_template = ChatPromptTemplate.from_messages(messages)
39-
prompt = prompt_template.invoke({"topic": "lawyers"})
40-
print("\n----- Prompt with System and Human Messages (Tuple) -----\n")
41-
print(prompt)
26+
# messages = [
27+
# ("system", "You are a comedian who tells jokes about {topic}."),
28+
# ("human", "Tell me {joke_count} jokes."),
29+
# ]
30+
# prompt_template = ChatPromptTemplate.from_messages(messages)
31+
# prompt = prompt_template.invoke({"topic": "lawyers", "joke_count": 3})
32+
# print("\n----- Prompt with System and Human Messages (Tuple) -----\n")
33+
# print(prompt)
34+
35+
# # Extra Informoation about Part 3.
36+
# # This does work:
37+
# messages = [
38+
# ("system", "You are a comedian who tells jokes about {topic}."),
39+
# HumanMessage(content="Tell me 3 jokes."),
40+
# ]
41+
# prompt_template = ChatPromptTemplate.from_messages(messages)
42+
# prompt = prompt_template.invoke({"topic": "lawyers"})
43+
# print("\n----- Prompt with System and Human Messages (Tuple) -----\n")
44+
# print(prompt)
4245

4346

4447
# This does NOT work:

3_chains/1_chains_basics.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919

2020
# Create the combined chain using LangChain Expression Language (LCEL)
2121
chain = prompt_template | model | StrOutputParser()
22+
# chain = prompt_template | model
2223

2324
# Run the chain
2425
result = chain.invoke({"topic": "lawyers", "joke_count": 3})

3_chains/4_chains_parallel.py

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
from dotenv import load_dotenv
22
from langchain.prompts import ChatPromptTemplate
33
from langchain.schema.output_parser import StrOutputParser
4-
from langchain.schema.runnable import RunnableBranch, RunnableLambda
4+
from langchain.schema.runnable import RunnableParallel, RunnableLambda
55
from langchain_openai import ChatOpenAI
66

77
# Load environment variables from .env
@@ -53,20 +53,21 @@ def combine_pros_cons(pros, cons):
5353

5454

5555
# Simplify branches with LCEL
56-
pros_branch = (
57-
RunnableLambda(lambda x: analyze_pros(x["features"])) | model | StrOutputParser()
56+
pros_branch_chain = (
57+
RunnableLambda(lambda x: analyze_pros(x)) | model | StrOutputParser()
5858
)
59-
cons_branch = (
60-
RunnableLambda(lambda x: analyze_cons(x["features"])) | model | StrOutputParser()
59+
60+
cons_branch_chain = (
61+
RunnableLambda(lambda x: analyze_cons(x)) | model | StrOutputParser()
6162
)
6263

6364
# Create the combined chain using LangChain Expression Language (LCEL)
6465
chain = (
6566
prompt_template
6667
| model
67-
| RunnableLambda(lambda x: {"features": x})
68-
| RunnableBranch(branches={"pros": pros_branch, "cons": cons_branch})
69-
| RunnableLambda(lambda x: combine_pros_cons(x["pros"], x["cons"]))
68+
| StrOutputParser()
69+
| RunnableParallel(branches={"pros": pros_branch_chain, "cons": cons_branch_chain})
70+
| RunnableLambda(lambda x: combine_pros_cons(x["branches"]["pros"], x["branches"]["cons"]))
7071
)
7172

7273
# Run the chain

3_chains/5_chains_branching.py

Lines changed: 33 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
from dotenv import load_dotenv
22
from langchain.prompts import ChatPromptTemplate
33
from langchain.schema.output_parser import StrOutputParser
4-
from langchain.schema.runnable import RunnableableMap, RunnableBranch
4+
from langchain.schema.runnable import RunnableBranch
55
from langchain_openai import ChatOpenAI
66

77
# Load environment variables from .env
@@ -14,14 +14,16 @@
1414
positive_feedback_template = ChatPromptTemplate.from_messages(
1515
[
1616
("system", "You are a helpful assistant."),
17-
("human", "Generate a thank you note for this positive feedback: {feedback}."),
17+
("human",
18+
"Generate a thank you note for this positive feedback: {feedback}."),
1819
]
1920
)
2021

2122
negative_feedback_template = ChatPromptTemplate.from_messages(
2223
[
2324
("system", "You are a helpful assistant."),
24-
("human", "Generate a response addressing this negative feedback: {feedback}."),
25+
("human",
26+
"Generate a response addressing this negative feedback: {feedback}."),
2527
]
2628
)
2729

@@ -45,41 +47,46 @@
4547
]
4648
)
4749

48-
49-
# Define the branch conditions based on feedback sentiment
50-
def is_positive(feedback):
51-
return "good" in feedback.lower() or "excellent" in feedback.lower()
52-
53-
54-
def is_negative(feedback):
55-
return "bad" in feedback.lower() or "poor" in feedback.lower()
56-
57-
58-
def is_neutral(feedback):
59-
return "okay" in feedback.lower() or "neutral" in feedback.lower()
60-
50+
# Define the feedback classification template
51+
classification_template = ChatPromptTemplate.from_messages(
52+
[
53+
("system", "You are a helpful assistant."),
54+
("human",
55+
"Classify the sentiment of this feedback as positive, negative, neutral, or escalate: {feedback}."),
56+
]
57+
)
6158

6259
# Define the runnable branches for handling feedback
6360
branches = RunnableBranch(
64-
(lambda x: is_positive(x), positive_feedback_template | model | StrOutputParser()),
65-
(lambda x: is_negative(x), negative_feedback_template | model | StrOutputParser()),
66-
(lambda x: is_neutral(x), neutral_feedback_template | model | StrOutputParser()),
67-
escalate_feedback_template | model | StrOutputParser(),
61+
(
62+
lambda x: "positive" in x,
63+
positive_feedback_template | model | StrOutputParser() # Positive feedback chain
64+
),
65+
(
66+
lambda x: "negative" in x,
67+
negative_feedback_template | model | StrOutputParser() # Negative feedback chain
68+
),
69+
(
70+
lambda x: "neutral" in x,
71+
neutral_feedback_template | model | StrOutputParser() # Neutral feedback chain
72+
),
73+
escalate_feedback_template | model | StrOutputParser()
6874
)
6975

70-
# Create the combined chain using LangChain Expression Language (LCEL)
71-
chain = branches
76+
# Create the classification chain
77+
classification_chain = classification_template | model | StrOutputParser()
78+
79+
# Combine classification and response generation into one chain
80+
chain = classification_chain | branches
7281

7382
# Run the chain with an example review
7483
# Good review - "The product is excellent. I really enjoyed using it and found it very helpful."
7584
# Bad review - "The product is terrible. It broke after just one use and the quality is very poor."
7685
# Neutral review - "The product is okay. It works as expected but nothing exceptional."
7786
# Default - "I'm not sure about the product yet. Can you tell me more about its features and benefits?"
7887

79-
review = (
80-
"The product is terrible. It broke after just one use and the quality is very poor."
81-
)
82-
result = chain.invoke(review)
88+
review = "The product is terrible. It broke after just one use and the quality is very poor."
89+
result = chain.invoke({"feedback": review})
8390

8491
# Output the result
8592
print(result)

0 commit comments

Comments
 (0)