Skip to content
3 changes: 2 additions & 1 deletion DESCRIPTION
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ Imports:
withr,
yaml
Remotes:
posit-dev/shinychat/pkg-r
tidyverse/ellmer@d26b150,
posit-dev/shinychat/pkg-r@cf9d098
URL: https://simonpcouch.github.io/side/
VignetteBuilder: knitr
8 changes: 8 additions & 0 deletions NAMESPACE
Original file line number Diff line number Diff line change
@@ -1,5 +1,13 @@
# Generated by roxygen2: do not edit by hand

S3method(supports_thinking,ProviderAnthropic)
S3method(supports_thinking,ProviderGoogleGemini)
S3method(supports_thinking,ProviderOpenAI)
S3method(supports_thinking,default)
S3method(toggle_thinking,ProviderAnthropic)
S3method(toggle_thinking,ProviderGoogleGemini)
S3method(toggle_thinking,ProviderOpenAI)
S3method(toggle_thinking,default)
export(kick)
import(ellmer)
import(rlang)
2 changes: 2 additions & 0 deletions NEWS.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
# side (development version)

* Added thinking mode support for Anthropic, OpenAI, and Google Gemini. Toggle with Ctrl+T in the chat interface. When enabled, the model's reasoning process is streaming into a collapsible block above each response. For Anthropic and Google Gemini, the number of reasoning tokens is set to 1024 when reasoning is on. For OpenAI, the reasoning effort parameter is set to "medium."

# side 0.0.2

Aligned Skills with Claude's storage format. Skills now use a directory-based
Expand Down
110 changes: 70 additions & 40 deletions R/chat-module.R
Original file line number Diff line number Diff line change
@@ -1,6 +1,16 @@
chat_mod_server_interruptible <- function(id, client, interrupt_flag) {
chat_mod_server_interruptible <- function(
id,
client,
interrupt_flag
) {
append_stream_task <- shiny::ExtendedTask$new(
function(client, ui_id, user_input, interrupt_flag) {
function(
client,
ui_id,
user_input,
interrupt_flag,
assistant_index = NULL
) {
stream <- client$stream_async(
user_input,
stream = "content"
Expand All @@ -13,18 +23,15 @@ chat_mod_server_interruptible <- function(id, client, interrupt_flag) {
stream,
interrupt_flag,
client,
user_input
user_input,
assistant_index = assistant_index
)
})
}
)

shiny::moduleServer(id, function(input, output, session) {
shinychat::chat_restore(
"chat",
client,
session = session
)
restore_chat_filtered(client, session)

last_turn <- shiny::reactiveVal(NULL, label = "last_turn")
last_input <- shiny::reactiveVal(NULL, label = "last_input")
Expand All @@ -35,12 +42,14 @@ chat_mod_server_interruptible <- function(id, client, interrupt_flag) {
}

last_input(input$chat_user_input)
assistant_index <- thinking_next_assistant_index(client)

append_stream_task$invoke(
client,
"chat",
input$chat_user_input,
interrupt_flag
interrupt_flag,
assistant_index
)
})

Expand Down Expand Up @@ -129,34 +138,7 @@ chat_mod_server_interruptible <- function(id, client, interrupt_flag) {
# dynamic reloading. We expose this method to allow external code to reload
# the chat UI while maintaining access to the module's session context.
load_chat_ui <- function() {
shinychat::chat_clear("chat", session = session)

msgs <- shinychat::contents_shinychat(client)
lapply(msgs, function(msg_turn) {
is_list <- is.list(msg_turn$content) &&
!inherits(msg_turn$content, c("shiny.tag", "shiny.taglist"))

if (is_list) {
stream <- coro::generator(function() {
for (x in msg_turn$content) {
coro::yield(x)
}
})
shinychat::chat_append(
"chat",
stream(),
msg_turn$role,
session = session
)
} else {
shinychat::chat_append(
"chat",
msg_turn$content,
role = msg_turn$role,
session = session
)
}
})
restore_chat_filtered(client, session)
}

list(
Expand All @@ -179,7 +161,8 @@ chat_append_interruptible <- coro::async(function(
user_input = NULL,
role = "assistant",
icon = NULL,
session = shiny::getDefaultReactiveDomain()
session = shiny::getDefaultReactiveDomain(),
assistant_index = NULL
) {
chat_append_ <- function(content, chunk = TRUE, ...) {
shinychat::chat_append_message(
Expand All @@ -196,6 +179,12 @@ chat_append_interruptible <- coro::async(function(

res <- fastmap::fastqueue(200)

if (is.null(assistant_index)) {
assistant_index <- thinking_next_assistant_index(client)
}
thinking_ctx <- thinking_context_new(session, assistant_index)
set_thinking_stream_callback(thinking_ctx)

interrupted <- FALSE
for (msg in stream) {
if (promises::is.promising(msg)) {
Expand All @@ -210,21 +199,28 @@ chat_append_interruptible <- coro::async(function(
break
}

res$add(msg)

if (S7::S7_inherits(msg, ellmer::ContentToolResult)) {
if (!is.null(msg@request)) {
session$sendCustomMessage("shiny-tool-request-hide", msg@request@id)
}
}

if (S7::S7_inherits(msg, ellmer::ContentThinking)) {
next
}

res$add(msg)

if (S7::S7_inherits(msg, ellmer::Content)) {
msg <- shinychat::contents_shinychat(msg)
}

chat_append_(msg)
}

clear_thinking_stream_callback()
thinking_context_finalize(thinking_ctx)

if (interrupted) {
streamed_content <- res$as_list()

Expand Down Expand Up @@ -282,3 +278,37 @@ as_ellmer_turns <- function(messages) {
ellmer::Turn(role = role, contents = contents)
})
}

restore_chat_filtered <- function(client, session) {
original_turns <- client$get_turns()

modified_turns <- list()

for (turn in original_turns) {
if (turn@role == "assistant") {
turn@contents <- Filter(
function(c) !S7::S7_inherits(c, ellmer::ContentThinking),
turn@contents
)
}
modified_turns <- c(modified_turns, list(turn))
}

client$set_turns(modified_turns)
on.exit(client$set_turns(original_turns), add = TRUE)

msgs <- shinychat::contents_shinychat(client)

shinychat::chat_clear("chat", session = session)
for (msg in msgs) {
if (is.null(msg$content) || length(msg$content) == 0) {
next
}
shinychat::chat_append(
"chat",
msg$content,
role = msg$role,
session = session
)
}
}
18 changes: 11 additions & 7 deletions R/setup.R
Original file line number Diff line number Diff line change
Expand Up @@ -39,17 +39,17 @@ prompt_provider_selection <- function() {
}
),
list(
name = "OpenAI (GPT 4.1)",
name = "OpenAI (GPT 5.2)",
fn_name = "chat_openai",
model = "gpt-4.1",
create_client = function() ellmer::chat_openai(model = "gpt-4.1")
model = "gpt-5.2",
create_client = function() ellmer::chat_openai(model = "gpt-5.2")
),
list(
name = "Google Gemini (Gemini 2.5 Pro)",
name = "Google Gemini (Gemini 3 Pro)",
fn_name = "chat_google_gemini",
model = "gemini-2.5-pro",
model = "gemini-3-pro-preview",
create_client = function() {
ellmer::chat_google_gemini(model = "gemini-2.5-pro")
ellmer::chat_google_gemini(model = "gemini-3-pro-preview")
}
),
list(
Expand Down Expand Up @@ -103,7 +103,11 @@ prompt_provider_selection <- function() {

options(side.client = client)

prompt_persistence_selection(client, selected_info$fn_name, selected_info$model)
prompt_persistence_selection(
client,
selected_info$fn_name,
selected_info$model
)

client
}
Expand Down
Loading
Loading