From 1495a055bffe9a0cd5c8cc14950053f8eacaaac8 Mon Sep 17 00:00:00 2001
From: box-sdk-build
Date: Fri, 21 Feb 2025 03:13:56 -0800
Subject: [PATCH 1/3] docs: Documentation for Java SDK (box/box-codegen#664)
---
.codegen.json | 2 +-
README.md | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/.codegen.json b/.codegen.json
index 1e36a3eb..835176ae 100644
--- a/.codegen.json
+++ b/.codegen.json
@@ -1 +1 @@
-{ "engineHash": "22f85cc", "specHash": "f20ba3f", "version": "1.12.0" }
+{ "engineHash": "5c674a3", "specHash": "f20ba3f", "version": "1.12.0" }
diff --git a/README.md b/README.md
index f3066265..cf4a1253 100644
--- a/README.md
+++ b/README.md
@@ -2,7 +2,7 @@
-# Box Python SDK GENERATED
+# Box Python SDK Gen
[](http://opensource.box.com/badges)

@@ -28,7 +28,7 @@ Embrace the new generation of Box SDKs and unlock the full potential of the Box
-- [Box Python SDK GENERATED](#box-python-sdk-generated)
+- [Box Python SDK Gen](#box-python-sdk-gen)
- [Table of contents](#table-of-contents)
- [Installing](#installing)
- [Getting Started](#getting-started)
From 0806a401dc19a5f50b2f7dfecc7fe0dd1447eb87 Mon Sep 17 00:00:00 2001
From: box-sdk-build
Date: Fri, 21 Feb 2025 05:23:44 -0800
Subject: [PATCH 2/3] docs: remove beta tag for AI extract endpoints
(box/box-openapi#511)
---
.codegen.json | 2 +-
box_sdk_gen/managers/ai.py | 30 +++++++++++++-----------------
box_sdk_gen/schemas/ai_ask.py | 24 ++++++++++--------------
docs/ai.md | 4 ++--
4 files changed, 26 insertions(+), 34 deletions(-)
diff --git a/.codegen.json b/.codegen.json
index 835176ae..42063025 100644
--- a/.codegen.json
+++ b/.codegen.json
@@ -1 +1 @@
-{ "engineHash": "5c674a3", "specHash": "f20ba3f", "version": "1.12.0" }
+{ "engineHash": "5c674a3", "specHash": "137a375", "version": "1.12.0" }
diff --git a/box_sdk_gen/managers/ai.py b/box_sdk_gen/managers/ai.py
index 554e346d..d14af870 100644
--- a/box_sdk_gen/managers/ai.py
+++ b/box_sdk_gen/managers/ai.py
@@ -226,23 +226,19 @@ def create_ai_ask(
extra_headers: Optional[Dict[str, Optional[str]]] = None
) -> Optional[AiResponseFull]:
"""
- Sends an AI request to supported LLMs and returns an answer specifically focused on the user's question given the provided context.
- :param mode: The mode specifies if this request is for a single or multiple items. If you select `single_item_qa` the `items` array can have one element only. Selecting `multiple_item_qa` allows you to provide up to 25 items.
- :type mode: CreateAiAskMode
- :param prompt: The prompt provided by the client to be answered by the LLM. The prompt's length is limited to 10000 characters.
- :type prompt: str
- :param items: The items to be processed by the LLM, often files.
-
- **Note**: Box AI handles documents with text representations up to 1MB in size, or a maximum of 25 files, whichever comes first.
- If the file size exceeds 1MB, the first 1MB of text representation will be processed.
- If you set `mode` parameter to `single_item_qa`, the `items` array can have one element only.
- :type items: List[AiItemAsk]
- :param dialogue_history: The history of prompts and answers previously passed to the LLM. This provides additional context to the LLM in generating the response., defaults to None
- :type dialogue_history: Optional[List[AiDialogueHistory]], optional
- :param include_citations: A flag to indicate whether citations should be returned., defaults to None
- :type include_citations: Optional[bool], optional
- :param extra_headers: Extra headers that will be included in the HTTP request., defaults to None
- :type extra_headers: Optional[Dict[str, Optional[str]]], optional
+ Sends an AI request to supported LLMs and returns an answer specifically focused on the user's question given the provided context.
+ :param mode: Box AI handles text documents with text representations up to 1MB in size, or a maximum of 25 files, whichever comes first. If the text file size exceeds 1MB, the first 1MB of text representation will be processed. Box AI handles image documents with a resolution of 1024 x 1024 pixels, with a maximum of 5 images or 5 pages for multi-page images. If the number of image or image pages exceeds 5, the first 5 images or pages will be processed. If you set mode parameter to `single_item_qa`, the items array can have one element only. Currently Box AI does not support multi-modal requests. If both images and text are sent Box AI will only process the text.
+ :type mode: CreateAiAskMode
+ :param prompt: The prompt provided by the client to be answered by the LLM. The prompt's length is limited to 10000 characters.
+ :type prompt: str
+ :param items: The items to be processed by the LLM, often files.
+ :type items: List[AiItemAsk]
+ :param dialogue_history: The history of prompts and answers previously passed to the LLM. This provides additional context to the LLM in generating the response., defaults to None
+ :type dialogue_history: Optional[List[AiDialogueHistory]], optional
+ :param include_citations: A flag to indicate whether citations should be returned., defaults to None
+ :type include_citations: Optional[bool], optional
+ :param extra_headers: Extra headers that will be included in the HTTP request., defaults to None
+ :type extra_headers: Optional[Dict[str, Optional[str]]], optional
"""
if extra_headers is None:
extra_headers = {}
diff --git a/box_sdk_gen/schemas/ai_ask.py b/box_sdk_gen/schemas/ai_ask.py
index 46ba3635..321a7732 100644
--- a/box_sdk_gen/schemas/ai_ask.py
+++ b/box_sdk_gen/schemas/ai_ask.py
@@ -37,20 +37,16 @@ def __init__(
**kwargs
):
"""
- :param mode: The mode specifies if this request is for a single or multiple items. If you select `single_item_qa` the `items` array can have one element only. Selecting `multiple_item_qa` allows you to provide up to 25 items.
- :type mode: AiAskModeField
- :param prompt: The prompt provided by the client to be answered by the LLM. The prompt's length is limited to 10000 characters.
- :type prompt: str
- :param items: The items to be processed by the LLM, often files.
-
- **Note**: Box AI handles documents with text representations up to 1MB in size, or a maximum of 25 files, whichever comes first.
- If the file size exceeds 1MB, the first 1MB of text representation will be processed.
- If you set `mode` parameter to `single_item_qa`, the `items` array can have one element only.
- :type items: List[AiItemAsk]
- :param dialogue_history: The history of prompts and answers previously passed to the LLM. This provides additional context to the LLM in generating the response., defaults to None
- :type dialogue_history: Optional[List[AiDialogueHistory]], optional
- :param include_citations: A flag to indicate whether citations should be returned., defaults to None
- :type include_citations: Optional[bool], optional
+ :param mode: Box AI handles text documents with text representations up to 1MB in size, or a maximum of 25 files, whichever comes first. If the text file size exceeds 1MB, the first 1MB of text representation will be processed. Box AI handles image documents with a resolution of 1024 x 1024 pixels, with a maximum of 5 images or 5 pages for multi-page images. If the number of image or image pages exceeds 5, the first 5 images or pages will be processed. If you set mode parameter to `single_item_qa`, the items array can have one element only. Currently Box AI does not support multi-modal requests. If both images and text are sent Box AI will only process the text.
+ :type mode: AiAskModeField
+ :param prompt: The prompt provided by the client to be answered by the LLM. The prompt's length is limited to 10000 characters.
+ :type prompt: str
+ :param items: The items to be processed by the LLM, often files.
+ :type items: List[AiItemAsk]
+ :param dialogue_history: The history of prompts and answers previously passed to the LLM. This provides additional context to the LLM in generating the response., defaults to None
+ :type dialogue_history: Optional[List[AiDialogueHistory]], optional
+ :param include_citations: A flag to indicate whether citations should be returned., defaults to None
+ :type include_citations: Optional[bool], optional
"""
super().__init__(**kwargs)
self.mode = mode
diff --git a/docs/ai.md b/docs/ai.md
index 5cbeb2d6..ab25257a 100644
--- a/docs/ai.md
+++ b/docs/ai.md
@@ -35,11 +35,11 @@ client.ai.create_ai_ask(
### Arguments
- mode `CreateAiAskMode`
- - The mode specifies if this request is for a single or multiple items. If you select `single_item_qa` the `items` array can have one element only. Selecting `multiple_item_qa` allows you to provide up to 25 items.
+ - Box AI handles text documents with text representations up to 1MB in size, or a maximum of 25 files, whichever comes first. If the text file size exceeds 1MB, the first 1MB of text representation will be processed. Box AI handles image documents with a resolution of 1024 x 1024 pixels, with a maximum of 5 images or 5 pages for multi-page images. If the number of image or image pages exceeds 5, the first 5 images or pages will be processed. If you set mode parameter to `single_item_qa`, the items array can have one element only. Currently Box AI does not support multi-modal requests. If both images and text are sent Box AI will only process the text.
- prompt `str`
- The prompt provided by the client to be answered by the LLM. The prompt's length is limited to 10000 characters.
- items `List[AiItemAsk]`
- - The items to be processed by the LLM, often files. **Note**: Box AI handles documents with text representations up to 1MB in size, or a maximum of 25 files, whichever comes first. If the file size exceeds 1MB, the first 1MB of text representation will be processed. If you set `mode` parameter to `single_item_qa`, the `items` array can have one element only.
+ - The items to be processed by the LLM, often files.
- dialogue_history `Optional[List[AiDialogueHistory]]`
- The history of prompts and answers previously passed to the LLM. This provides additional context to the LLM in generating the response.
- include_citations `Optional[bool]`
From 5305463a91bed48b7ea4cdbbc491856e203eb9eb Mon Sep 17 00:00:00 2001
From: box-sdk-build
Date: Fri, 21 Feb 2025 05:25:33 -0800
Subject: [PATCH 3/3] chore: Update .codegen.json with commit hash of codegen
and openapi spec
---
.codegen.json | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.codegen.json b/.codegen.json
index 42063025..718862e4 100644
--- a/.codegen.json
+++ b/.codegen.json
@@ -1 +1 @@
-{ "engineHash": "5c674a3", "specHash": "137a375", "version": "1.12.0" }
+{ "engineHash": "5c674a3", "specHash": "06fc5f7", "version": "1.12.0" }