Skip to content
This repository was archived by the owner on Mar 11, 2026. It is now read-only.

Commit b4b1ddb

Browse files
committed
Add code
0 parents  commit b4b1ddb

File tree

8 files changed

+343
-0
lines changed

8 files changed

+343
-0
lines changed

.env.example

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
ARCADE_API_KEY=
2+
ARCADE_USER_ID=
3+
ANTHROPIC_API_KEY=

README.md

Lines changed: 64 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,64 @@
1+
# Reply gAI
2+
3+
## 🚀 Quickstart with LangGraph server
4+
5+
Sign up for an [Arcade API](https://docs.arcade-ai.com/integrations/toolkits/x) to get access to Twitter data.
6+
7+
Install the langgraph CLI:
8+
```
9+
pip install -U "langgraph-cli[inmem]"
10+
```
11+
12+
Install dependencies:
13+
```
14+
pip install -e .
15+
```
16+
17+
Load API keys into the environment for the LangSmith SDK, Anthropic API and Tavily API:
18+
```
19+
export ANTHROPIC_API_KEY=<your_anthropic_api_key>
20+
export ARCADE_API_KEY=<your_arcade_api_key>
21+
export ARCADE_USER_ID=<your_arcade_user_id>
22+
```
23+
24+
Launch the agent:
25+
```
26+
langgraph dev
27+
```
28+
29+
If all is well, you should see the following output:
30+
31+
> Ready!
32+
33+
> API: http://127.0.0.1:2024
34+
35+
> Docs: http://127.0.0.1:2024/docs
36+
37+
> LangGraph Studio Web UI: https://smith.langchain.com/studio/?baseUrl=http://127.0.0.1:2024
38+
39+
## How it works
40+
41+
Reply gAI uses LangGraph to create a workflow that mimics a Twitter user's writing style. Here's how the system operates:
42+
43+
1. **Tweet Collection**
44+
- Uses the [Arcade API X Toolkit](https://docs.arcade-ai.com/integrations/toolkits/x) to fetch up to 100 recent tweets from a specified Twitter user
45+
- Tweets are stored locally with their text content and URLs
46+
- The system automatically refreshes tweets if they're older than the configured age limit
47+
48+
2. **Conversation Flow**
49+
- The workflow is managed by a state graph with two main nodes:
50+
- `get_tweets`: Fetches and stores recent tweets
51+
- `chat`: Generates responses using Claude 3.5 Sonnet
52+
53+
3. **Response Generation**
54+
- Claude analyzes the collected tweets to understand the user's writing style
55+
- Generates contextually appropriate responses that match the personality and tone of the target Twitter user
56+
- Uses a temperature of 0.75 to balance creativity with consistency
57+
58+
4. **Architecture**
59+
- Built on LangGraph for workflow management
60+
- Uses Anthropic's Claude 3.5 Sonnet for response generation
61+
- Integrates with Arcade API for Twitter data access
62+
- Maintains conversation state and tweet storage for efficient operation
63+
64+
The system automatically determines whether to fetch new tweets or use existing ones based on their age, ensuring responses are generated using recent and relevant data.

langgraph.json

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
{
2+
"dockerfile_lines": [],
3+
"graphs": {
4+
"reply_gai": "./src/agent/graph.py:graph"
5+
},
6+
"python_version": "3.11",
7+
"env": "./.env",
8+
"dependencies": [
9+
"."
10+
]
11+
}

pyproject.toml

Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,56 @@
1+
[project]
2+
name = "reply-gAI"
3+
version = "0.0.1"
4+
description = "Chat persona based on a Twitter user."
5+
authors = [
6+
{ name = "Lance Martin" }
7+
]
8+
readme = "README.md"
9+
license = { text = "MIT" }
10+
requires-python = ">=3.9"
11+
dependencies = [
12+
"langgraph>=0.2.55",
13+
"langchain-community>=0.3.9",
14+
"langchain-anthropic>=0.3.0",
15+
"arcade_x>=0.1.5",
16+
]
17+
18+
[project.optional-dependencies]
19+
dev = ["mypy>=1.11.1", "ruff>=0.6.1"]
20+
21+
[build-system]
22+
requires = ["setuptools>=73.0.0", "wheel"]
23+
build-backend = "setuptools.build_meta"
24+
25+
[tool.setuptools]
26+
packages = ["agent"]
27+
28+
[tool.setuptools.package-dir]
29+
"agent" = "src/agent"
30+
31+
[tool.setuptools.package-data]
32+
"*" = ["py.typed"]
33+
34+
[tool.ruff]
35+
lint.select = [
36+
"E", # pycodestyle
37+
"F", # pyflakes
38+
"I", # isort
39+
"D", # pydocstyle
40+
"D401", # First line should be in imperative mood
41+
"T201",
42+
"UP",
43+
]
44+
lint.ignore = [
45+
"UP006",
46+
"UP007",
47+
"UP035",
48+
"D417",
49+
"E501",
50+
]
51+
52+
[tool.ruff.lint.per-file-ignores]
53+
"tests/*" = ["D", "UP"]
54+
55+
[tool.ruff.lint.pydocstyle]
56+
convention = "google"

src/agent/__init__.py

Whitespace-only changes.

src/agent/configuration.py

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
import os
2+
from dataclasses import dataclass, field, fields
3+
from typing import Any, Optional
4+
5+
from langchain_core.runnables import RunnableConfig
6+
from typing_extensions import Annotated
7+
from dataclasses import dataclass
8+
9+
@dataclass(kw_only=True)
10+
class Configuration:
11+
"""The configurable fields for the chatbot."""
12+
username: str = "elonmusk"
13+
update_tweet: bool = False
14+
max_tweet_age_seconds: int = 86400 # 24 hours (24 * 60 * 60 seconds)
15+
16+
@classmethod
17+
def from_runnable_config(
18+
cls, config: Optional[RunnableConfig] = None
19+
) -> "Configuration":
20+
"""Create a Configuration instance from a RunnableConfig."""
21+
configurable = (
22+
config["configurable"] if config and "configurable" in config else {}
23+
)
24+
values: dict[str, Any] = {
25+
f.name: os.environ.get(f.name.upper(), configurable.get(f.name))
26+
for f in fields(cls)
27+
if f.init
28+
}
29+
return cls(**{k: v for k, v in values.items() if v})

src/agent/graph.py

Lines changed: 166 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,166 @@
1+
import uuid
2+
import os
3+
from datetime import datetime, timezone
4+
from langchain_core.runnables import RunnableConfig
5+
from langchain_core.messages import SystemMessage
6+
from langchain_anthropic import ChatAnthropic
7+
from langgraph.graph import MessagesState
8+
from langgraph.store.base import BaseStore
9+
from langgraph.graph import END, StateGraph
10+
from arcadepy import Arcade
11+
12+
import agent.configuration as configuration
13+
from agent.prompts import CHAT_INSTRUCTIONS
14+
15+
def get_tweets(state: MessagesState, config: RunnableConfig, store: BaseStore) -> dict:
16+
"""Fetch and store recent tweets for a specified Twitter user.
17+
18+
This function authenticates with the Arcade API, retrieves recent tweets for a given
19+
username, and stores them in the provided BaseStore instance. Each tweet is stored
20+
with its text content and URL.
21+
22+
Args:
23+
state (MessagesState): Current conversation state (unused but required by graph)
24+
config (RunnableConfig): Configuration object containing settings like username
25+
store (BaseStore): Storage interface for saving retrieved tweets
26+
27+
Returns:
28+
dict: Empty dictionary (function stores tweets but doesn't return them)
29+
30+
Note:
31+
- Requires ARCADE_USER_ID environment variable to be set
32+
- Fetches up to 100 most recent tweets from the last 7 days
33+
- Stores tweets using (username, "tweets") as namespace
34+
"""
35+
36+
# Get the configuration
37+
configurable = configuration.Configuration.from_runnable_config(config)
38+
39+
client = Arcade()
40+
USER_ID = os.environ["ARCADE_USER_ID"]
41+
TOOL_NAME = "X.SearchRecentTweetsByUsername"
42+
43+
auth_response = client.tools.authorize(
44+
tool_name=TOOL_NAME,
45+
user_id=USER_ID,
46+
)
47+
48+
if auth_response.status != "completed":
49+
print(f"Click this link to authorize: {auth_response.authorization_url}")
50+
51+
# Wait for the authorization to complete
52+
client.auth.wait_for_completion(auth_response)
53+
54+
# Search for recent tweets (last 7 days) on X (Twitter)
55+
username = configurable.username
56+
# TODO: Check with Arcade about max_results
57+
inputs = {"username": username, "max_results": 100}
58+
response = client.tools.execute(
59+
tool_name=TOOL_NAME,
60+
inputs=inputs,
61+
user_id=USER_ID,
62+
)
63+
64+
# Format tweets into a string
65+
tweets = response.output.value['data']
66+
67+
# Load the tweet
68+
namespace_for_memory = (username, "tweets")
69+
for tweet in tweets:
70+
memory_id = tweet.get('id',uuid.uuid4())
71+
text = tweet.get('text',"Tweet empty")
72+
url = tweet.get('tweet_url',"URL not found")
73+
store.put(namespace_for_memory, memory_id, {"text": text,"url": url})
74+
75+
def chat(state: MessagesState, config: RunnableConfig, store: BaseStore) -> dict:
76+
"""Generate a chat response in the style of a specific Twitter user.
77+
78+
This function retrieves tweets from the store for a given username, formats them,
79+
and uses them as context for Claude to generate a response that mimics the user's
80+
writing style and personality.
81+
82+
Args:
83+
state (MessagesState): Current conversation state containing message history
84+
config (RunnableConfig): Configuration object containing settings like username
85+
store (BaseStore): Storage interface for accessing saved tweets
86+
87+
Returns:
88+
dict: Contains the generated message in the 'messages' key
89+
"""
90+
91+
# Get the configuration
92+
configurable = configuration.Configuration.from_runnable_config(config)
93+
username = configurable.username
94+
95+
# Get the tweets
96+
namespace_for_memory = (username, "tweets")
97+
98+
# Get all the tweets
99+
memories = []
100+
while mems := store.search(namespace_for_memory, limit=200, offset=len(memories)):
101+
memories.extend(mems)
102+
103+
# Format the tweets
104+
formatted_output = ""
105+
for memory in memories:
106+
tweet = memory.value
107+
formatted_output += f"@{username}: {tweet['text']}\n"
108+
formatted_output += "-" * 80 + "\n"
109+
110+
# Generate a response
111+
claude_3_5_sonnet = ChatAnthropic(model="claude-3-5-sonnet-20240620", temperature=0.75)
112+
chat_instructions_formatted = CHAT_INSTRUCTIONS.format(username=username,tweets=formatted_output)
113+
msg = claude_3_5_sonnet.invoke([SystemMessage(content=chat_instructions_formatted)]+state['messages'])
114+
return {"messages": [msg]}
115+
116+
def route_to_tweet_loader(state: MessagesState, config: RunnableConfig, store: BaseStore) -> dict:
117+
"""Route the workflow based on tweet availability and age.
118+
119+
This function determines whether to fetch new tweets or proceed to chat by checking:
120+
1. If tweets exist for the user in the store
121+
2. If existing tweets are too old (beyond max_tweet_age_seconds)
122+
123+
Args:
124+
state (MessagesState): Current conversation state
125+
config (RunnableConfig): Configuration containing username and tweet age settings
126+
store (BaseStore): Storage interface for accessing saved tweets
127+
128+
Returns:
129+
str: Either "get_tweets" to fetch new tweets or "chat" to proceed with conversation
130+
"""
131+
132+
# Get the configuration
133+
configurable = configuration.Configuration.from_runnable_config(config)
134+
username = configurable.username
135+
136+
# If we have Tweets from the user, go to chat
137+
namespace_for_memory = (username, "tweets")
138+
memories = store.search(namespace_for_memory, limit=200)
139+
140+
# If we have tweets, check if they're too old
141+
if memories:
142+
# Get most recent tweet timestamp
143+
most_recent = max(mem.created_at for mem in memories)
144+
145+
# Calculate time difference
146+
now = datetime.now(timezone.utc)
147+
time_delta = now - most_recent
148+
149+
# If tweets are too old, get new ones
150+
if time_delta.total_seconds() > configurable.max_tweet_age_seconds:
151+
return "get_tweets"
152+
return "chat"
153+
# If no tweets for the user, get them
154+
else:
155+
return "get_tweets"
156+
157+
# Create the graph + all nodes
158+
builder = StateGraph(MessagesState, config_schema=configuration.Configuration)
159+
builder.add_node("chat",chat)
160+
builder.add_node("get_tweets",get_tweets)
161+
builder.set_conditional_entry_point(route_to_tweet_loader, ["chat", "get_tweets"])
162+
builder.add_edge("get_tweets", "chat")
163+
builder.add_edge("chat", END)
164+
165+
# Compile the graph
166+
graph = builder.compile()

src/agent/prompts.py

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
CHAT_INSTRUCTIONS = """Hey! You're chatting as @{username} on Twitter. I've pulled some of their recent tweets to help you get their vibe:
2+
3+
{tweets}
4+
5+
Quick tips for keeping it real:
6+
- Jump right into answers naturally, no need for formal intros
7+
- Chat like you're talking to a friend - keep it casual and fun
8+
- Use their usual phrases and way of talking
9+
- Share opinions and hot takes in their style
10+
- If it's a new topic, just flow with it naturally using their voice
11+
12+
Just vibe like @{username} would - keep it real and conversational!
13+
14+
Here's what they're asking:"""

0 commit comments

Comments
 (0)