diff --git a/src/libs/tryAGI.OpenAI/Generated/tryAGI.OpenAI.IVectorStoresClient.CreateVectorStoreFile.g.cs b/src/libs/tryAGI.OpenAI/Generated/tryAGI.OpenAI.IVectorStoresClient.CreateVectorStoreFile.g.cs
index bca46e84..9bbcbbab 100644
--- a/src/libs/tryAGI.OpenAI/Generated/tryAGI.OpenAI.IVectorStoresClient.CreateVectorStoreFile.g.cs
+++ b/src/libs/tryAGI.OpenAI/Generated/tryAGI.OpenAI.IVectorStoresClient.CreateVectorStoreFile.g.cs
@@ -5,7 +5,9 @@ namespace tryAGI.OpenAI
public partial interface IVectorStoresClient
{
///
- /// Create a vector store file by attaching a [File](/docs/api-reference/files) to a [vector store](/docs/api-reference/vector-stores/object).
+ /// Create a vector store file by attaching a [File](/docs/api-reference/files) to a [vector store](/docs/api-reference/vector-stores/object).
+ /// This endpoint is subject to a per-vector-store write rate limit of 300 requests per minute, shared with `/vector_stores/{vector_store_id}/file_batches`.
+ /// For uploading multiple files to the same vector store, use the file batches endpoint to reduce request volume.
///
///
/// Example: vs_abc123
@@ -19,13 +21,15 @@ public partial interface IVectorStoresClient
global::tryAGI.OpenAI.CreateVectorStoreFileRequest request,
global::System.Threading.CancellationToken cancellationToken = default);
///
- /// Create a vector store file by attaching a [File](/docs/api-reference/files) to a [vector store](/docs/api-reference/vector-stores/object).
+ /// Create a vector store file by attaching a [File](/docs/api-reference/files) to a [vector store](/docs/api-reference/vector-stores/object).
+ /// This endpoint is subject to a per-vector-store write rate limit of 300 requests per minute, shared with `/vector_stores/{vector_store_id}/file_batches`.
+ /// For uploading multiple files to the same vector store, use the file batches endpoint to reduce request volume.
///
///
/// Example: vs_abc123
///
///
- /// A [File](/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files.
+ /// A [File](/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files. For multi-file ingestion, we recommend [`file_batches`](/docs/api-reference/vector-stores-file-batches/createBatch) to minimize per-vector-store write requests.
///
///
/// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy.
diff --git a/src/libs/tryAGI.OpenAI/Generated/tryAGI.OpenAI.IVectorStoresClient.CreateVectorStoreFileBatch.g.cs b/src/libs/tryAGI.OpenAI/Generated/tryAGI.OpenAI.IVectorStoresClient.CreateVectorStoreFileBatch.g.cs
index a8405c64..117fcb43 100644
--- a/src/libs/tryAGI.OpenAI/Generated/tryAGI.OpenAI.IVectorStoresClient.CreateVectorStoreFileBatch.g.cs
+++ b/src/libs/tryAGI.OpenAI/Generated/tryAGI.OpenAI.IVectorStoresClient.CreateVectorStoreFileBatch.g.cs
@@ -6,7 +6,9 @@ public partial interface IVectorStoresClient
{
///
/// Create a vector store file batch.
- /// The maximum number of files in a single batch request is 2000.
+ /// The maximum number of files in a single batch request is 2000.
+ /// Vector store file attach requests are rate limited per vector store (300 requests per minute across both this endpoint and `/vector_stores/{vector_store_id}/files`).
+ /// For ingesting multiple files into the same vector store, this batch endpoint is recommended.
///
///
/// Example: vs_abc123
@@ -21,16 +23,18 @@ public partial interface IVectorStoresClient
global::System.Threading.CancellationToken cancellationToken = default);
///
/// Create a vector store file batch.
- /// The maximum number of files in a single batch request is 2000.
+ /// The maximum number of files in a single batch request is 2000.
+ /// Vector store file attach requests are rate limited per vector store (300 requests per minute across both this endpoint and `/vector_stores/{vector_store_id}/files`).
+ /// For ingesting multiple files into the same vector store, this batch endpoint is recommended.
///
///
/// Example: vs_abc123
///
///
- /// A list of [File](/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. If `attributes` or `chunking_strategy` are provided, they will be applied to all files in the batch. The maximum batch size is 2000 files. Mutually exclusive with `files`.
+ /// A list of [File](/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. If `attributes` or `chunking_strategy` are provided, they will be applied to all files in the batch. The maximum batch size is 2000 files. This endpoint is recommended for multi-file ingestion and helps reduce per-vector-store write request pressure. Mutually exclusive with `files`.
///
///
- /// A list of objects that each include a `file_id` plus optional `attributes` or `chunking_strategy`. Use this when you need to override metadata for specific files. The global `attributes` or `chunking_strategy` will be ignored and must be specified for each file. The maximum batch size is 2000 files. Mutually exclusive with `file_ids`.
+ /// A list of objects that each include a `file_id` plus optional `attributes` or `chunking_strategy`. Use this when you need to override metadata for specific files. The global `attributes` or `chunking_strategy` will be ignored and must be specified for each file. The maximum batch size is 2000 files. This endpoint is recommended for multi-file ingestion and helps reduce per-vector-store write request pressure. Mutually exclusive with `file_ids`.
///
///
/// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy.
diff --git a/src/libs/tryAGI.OpenAI/Generated/tryAGI.OpenAI.Models.CreateVectorStoreFileBatchRequest.g.cs b/src/libs/tryAGI.OpenAI/Generated/tryAGI.OpenAI.Models.CreateVectorStoreFileBatchRequest.g.cs
index 1f0d1628..a2ded007 100644
--- a/src/libs/tryAGI.OpenAI/Generated/tryAGI.OpenAI.Models.CreateVectorStoreFileBatchRequest.g.cs
+++ b/src/libs/tryAGI.OpenAI/Generated/tryAGI.OpenAI.Models.CreateVectorStoreFileBatchRequest.g.cs
@@ -9,13 +9,13 @@ namespace tryAGI.OpenAI
public sealed partial class CreateVectorStoreFileBatchRequest
{
///
- /// A list of [File](/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. If `attributes` or `chunking_strategy` are provided, they will be applied to all files in the batch. The maximum batch size is 2000 files. Mutually exclusive with `files`.
+ /// A list of [File](/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. If `attributes` or `chunking_strategy` are provided, they will be applied to all files in the batch. The maximum batch size is 2000 files. This endpoint is recommended for multi-file ingestion and helps reduce per-vector-store write request pressure. Mutually exclusive with `files`.
///
[global::System.Text.Json.Serialization.JsonPropertyName("file_ids")]
public global::System.Collections.Generic.IList? FileIds { get; set; }
///
- /// A list of objects that each include a `file_id` plus optional `attributes` or `chunking_strategy`. Use this when you need to override metadata for specific files. The global `attributes` or `chunking_strategy` will be ignored and must be specified for each file. The maximum batch size is 2000 files. Mutually exclusive with `file_ids`.
+ /// A list of objects that each include a `file_id` plus optional `attributes` or `chunking_strategy`. Use this when you need to override metadata for specific files. The global `attributes` or `chunking_strategy` will be ignored and must be specified for each file. The maximum batch size is 2000 files. This endpoint is recommended for multi-file ingestion and helps reduce per-vector-store write request pressure. Mutually exclusive with `file_ids`.
///
[global::System.Text.Json.Serialization.JsonPropertyName("files")]
public global::System.Collections.Generic.IList? Files { get; set; }
@@ -43,10 +43,10 @@ public sealed partial class CreateVectorStoreFileBatchRequest
/// Initializes a new instance of the class.
///
///
- /// A list of [File](/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. If `attributes` or `chunking_strategy` are provided, they will be applied to all files in the batch. The maximum batch size is 2000 files. Mutually exclusive with `files`.
+ /// A list of [File](/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. If `attributes` or `chunking_strategy` are provided, they will be applied to all files in the batch. The maximum batch size is 2000 files. This endpoint is recommended for multi-file ingestion and helps reduce per-vector-store write request pressure. Mutually exclusive with `files`.
///
///
- /// A list of objects that each include a `file_id` plus optional `attributes` or `chunking_strategy`. Use this when you need to override metadata for specific files. The global `attributes` or `chunking_strategy` will be ignored and must be specified for each file. The maximum batch size is 2000 files. Mutually exclusive with `file_ids`.
+ /// A list of objects that each include a `file_id` plus optional `attributes` or `chunking_strategy`. Use this when you need to override metadata for specific files. The global `attributes` or `chunking_strategy` will be ignored and must be specified for each file. The maximum batch size is 2000 files. This endpoint is recommended for multi-file ingestion and helps reduce per-vector-store write request pressure. Mutually exclusive with `file_ids`.
///
///
/// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy.
diff --git a/src/libs/tryAGI.OpenAI/Generated/tryAGI.OpenAI.Models.CreateVectorStoreFileRequest.g.cs b/src/libs/tryAGI.OpenAI/Generated/tryAGI.OpenAI.Models.CreateVectorStoreFileRequest.g.cs
index 853ed6ba..4d62ff9e 100644
--- a/src/libs/tryAGI.OpenAI/Generated/tryAGI.OpenAI.Models.CreateVectorStoreFileRequest.g.cs
+++ b/src/libs/tryAGI.OpenAI/Generated/tryAGI.OpenAI.Models.CreateVectorStoreFileRequest.g.cs
@@ -9,7 +9,7 @@ namespace tryAGI.OpenAI
public sealed partial class CreateVectorStoreFileRequest
{
///
- /// A [File](/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files.
+ /// A [File](/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files. For multi-file ingestion, we recommend [`file_batches`](/docs/api-reference/vector-stores-file-batches/createBatch) to minimize per-vector-store write requests.
///
[global::System.Text.Json.Serialization.JsonPropertyName("file_id")]
[global::System.Text.Json.Serialization.JsonRequired]
@@ -38,7 +38,7 @@ public sealed partial class CreateVectorStoreFileRequest
/// Initializes a new instance of the class.
///
///
- /// A [File](/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files.
+ /// A [File](/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files. For multi-file ingestion, we recommend [`file_batches`](/docs/api-reference/vector-stores-file-batches/createBatch) to minimize per-vector-store write requests.
///
///
/// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy.
diff --git a/src/libs/tryAGI.OpenAI/Generated/tryAGI.OpenAI.VectorStoresClient.CreateVectorStoreFile.g.cs b/src/libs/tryAGI.OpenAI/Generated/tryAGI.OpenAI.VectorStoresClient.CreateVectorStoreFile.g.cs
index 8e2f95c0..3c9faa1b 100644
--- a/src/libs/tryAGI.OpenAI/Generated/tryAGI.OpenAI.VectorStoresClient.CreateVectorStoreFile.g.cs
+++ b/src/libs/tryAGI.OpenAI/Generated/tryAGI.OpenAI.VectorStoresClient.CreateVectorStoreFile.g.cs
@@ -24,7 +24,9 @@ partial void ProcessCreateVectorStoreFileResponseContent(
ref string content);
///
- /// Create a vector store file by attaching a [File](/docs/api-reference/files) to a [vector store](/docs/api-reference/vector-stores/object).
+ /// Create a vector store file by attaching a [File](/docs/api-reference/files) to a [vector store](/docs/api-reference/vector-stores/object).
+ /// This endpoint is subject to a per-vector-store write rate limit of 300 requests per minute, shared with `/vector_stores/{vector_store_id}/file_batches`.
+ /// For uploading multiple files to the same vector store, use the file batches endpoint to reduce request volume.
///
///
/// Example: vs_abc123
@@ -188,13 +190,15 @@ partial void ProcessCreateVectorStoreFileResponseContent(
}
}
///
- /// Create a vector store file by attaching a [File](/docs/api-reference/files) to a [vector store](/docs/api-reference/vector-stores/object).
+ /// Create a vector store file by attaching a [File](/docs/api-reference/files) to a [vector store](/docs/api-reference/vector-stores/object).
+ /// This endpoint is subject to a per-vector-store write rate limit of 300 requests per minute, shared with `/vector_stores/{vector_store_id}/file_batches`.
+ /// For uploading multiple files to the same vector store, use the file batches endpoint to reduce request volume.
///
///
/// Example: vs_abc123
///
///
- /// A [File](/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files.
+ /// A [File](/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files. For multi-file ingestion, we recommend [`file_batches`](/docs/api-reference/vector-stores-file-batches/createBatch) to minimize per-vector-store write requests.
///
///
/// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy.
diff --git a/src/libs/tryAGI.OpenAI/Generated/tryAGI.OpenAI.VectorStoresClient.CreateVectorStoreFileBatch.g.cs b/src/libs/tryAGI.OpenAI/Generated/tryAGI.OpenAI.VectorStoresClient.CreateVectorStoreFileBatch.g.cs
index f8e598d1..26830ed5 100644
--- a/src/libs/tryAGI.OpenAI/Generated/tryAGI.OpenAI.VectorStoresClient.CreateVectorStoreFileBatch.g.cs
+++ b/src/libs/tryAGI.OpenAI/Generated/tryAGI.OpenAI.VectorStoresClient.CreateVectorStoreFileBatch.g.cs
@@ -25,7 +25,9 @@ partial void ProcessCreateVectorStoreFileBatchResponseContent(
///
/// Create a vector store file batch.
- /// The maximum number of files in a single batch request is 2000.
+ /// The maximum number of files in a single batch request is 2000.
+ /// Vector store file attach requests are rate limited per vector store (300 requests per minute across both this endpoint and `/vector_stores/{vector_store_id}/files`).
+ /// For ingesting multiple files into the same vector store, this batch endpoint is recommended.
///
///
/// Example: vs_abc123
@@ -190,16 +192,18 @@ partial void ProcessCreateVectorStoreFileBatchResponseContent(
}
///
/// Create a vector store file batch.
- /// The maximum number of files in a single batch request is 2000.
+ /// The maximum number of files in a single batch request is 2000.
+ /// Vector store file attach requests are rate limited per vector store (300 requests per minute across both this endpoint and `/vector_stores/{vector_store_id}/files`).
+ /// For ingesting multiple files into the same vector store, this batch endpoint is recommended.
///
///
/// Example: vs_abc123
///
///
- /// A list of [File](/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. If `attributes` or `chunking_strategy` are provided, they will be applied to all files in the batch. The maximum batch size is 2000 files. Mutually exclusive with `files`.
+ /// A list of [File](/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. If `attributes` or `chunking_strategy` are provided, they will be applied to all files in the batch. The maximum batch size is 2000 files. This endpoint is recommended for multi-file ingestion and helps reduce per-vector-store write request pressure. Mutually exclusive with `files`.
///
///
- /// A list of objects that each include a `file_id` plus optional `attributes` or `chunking_strategy`. Use this when you need to override metadata for specific files. The global `attributes` or `chunking_strategy` will be ignored and must be specified for each file. The maximum batch size is 2000 files. Mutually exclusive with `file_ids`.
+ /// A list of objects that each include a `file_id` plus optional `attributes` or `chunking_strategy`. Use this when you need to override metadata for specific files. The global `attributes` or `chunking_strategy` will be ignored and must be specified for each file. The maximum batch size is 2000 files. This endpoint is recommended for multi-file ingestion and helps reduce per-vector-store write request pressure. Mutually exclusive with `file_ids`.
///
///
/// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy.
diff --git a/src/libs/tryAGI.OpenAI/openapi.yaml b/src/libs/tryAGI.OpenAI/openapi.yaml
index 0a908a86..6f02beb1 100644
--- a/src/libs/tryAGI.OpenAI/openapi.yaml
+++ b/src/libs/tryAGI.OpenAI/openapi.yaml
@@ -1201,7 +1201,7 @@ paths:
OpenAIClient client = OpenAIOkHttpClient.fromEnv();
TranscriptionCreateParams params = TranscriptionCreateParams.builder()
- .file(ByteArrayInputStream("Example data".getBytes()))
+ .file(new ByteArrayInputStream("Example data".getBytes()))
.model(AudioModel.GPT_4O_TRANSCRIBE)
.build();
TranscriptionCreateResponse transcription = client.audio().transcriptions().create(params);
@@ -1323,7 +1323,7 @@ paths:
OpenAIClient client = OpenAIOkHttpClient.fromEnv();
TranscriptionCreateParams params = TranscriptionCreateParams.builder()
- .file(ByteArrayInputStream("Example data".getBytes()))
+ .file(new ByteArrayInputStream("Example data".getBytes()))
.model(AudioModel.GPT_4O_TRANSCRIBE)
.build();
TranscriptionCreateResponse transcription = client.audio().transcriptions().create(params);
@@ -1453,7 +1453,7 @@ paths:
OpenAIClient client = OpenAIOkHttpClient.fromEnv();
TranscriptionCreateParams params = TranscriptionCreateParams.builder()
- .file(ByteArrayInputStream("Example data".getBytes()))
+ .file(new ByteArrayInputStream("Example data".getBytes()))
.model(AudioModel.GPT_4O_TRANSCRIBE)
.build();
TranscriptionCreateResponse transcription = client.audio().transcriptions().create(params);
@@ -1729,7 +1729,7 @@ paths:
OpenAIClient client = OpenAIOkHttpClient.fromEnv();
TranscriptionCreateParams params = TranscriptionCreateParams.builder()
- .file(ByteArrayInputStream("Example data".getBytes()))
+ .file(new ByteArrayInputStream("Example data".getBytes()))
.model(AudioModel.GPT_4O_TRANSCRIBE)
.build();
TranscriptionCreateResponse transcription = client.audio().transcriptions().create(params);
@@ -1911,7 +1911,7 @@ paths:
OpenAIClient client = OpenAIOkHttpClient.fromEnv();
TranscriptionCreateParams params = TranscriptionCreateParams.builder()
- .file(ByteArrayInputStream("Example data".getBytes()))
+ .file(new ByteArrayInputStream("Example data".getBytes()))
.model(AudioModel.GPT_4O_TRANSCRIBE)
.build();
TranscriptionCreateResponse transcription = client.audio().transcriptions().create(params);
@@ -2061,7 +2061,7 @@ paths:
OpenAIClient client = OpenAIOkHttpClient.fromEnv();
TranscriptionCreateParams params = TranscriptionCreateParams.builder()
- .file(ByteArrayInputStream("Example data".getBytes()))
+ .file(new ByteArrayInputStream("Example data".getBytes()))
.model(AudioModel.GPT_4O_TRANSCRIBE)
.build();
TranscriptionCreateResponse transcription = client.audio().transcriptions().create(params);
@@ -2227,7 +2227,7 @@ paths:
OpenAIClient client = OpenAIOkHttpClient.fromEnv();
TranslationCreateParams params = TranslationCreateParams.builder()
- .file(ByteArrayInputStream("Example data".getBytes()))
+ .file(new ByteArrayInputStream("Example data".getBytes()))
.model(AudioModel.WHISPER_1)
.build();
TranslationCreateResponse translation = client.audio().translations().create(params);
@@ -9569,7 +9569,7 @@ paths:
OpenAIClient client = OpenAIOkHttpClient.fromEnv();
FileCreateParams params = FileCreateParams.builder()
- .file(ByteArrayInputStream("Example data".getBytes()))
+ .file(new ByteArrayInputStream("Example data".getBytes()))
.purpose(FilePurpose.ASSISTANTS)
.build();
FileObject fileObject = client.files().create(params);
@@ -12694,7 +12694,7 @@ paths:
OpenAIClient client = OpenAIOkHttpClient.fromEnv();
ImageEditParams params = ImageEditParams.builder()
- .image(ByteArrayInputStream("Example data".getBytes()))
+ .image(new ByteArrayInputStream("Example data".getBytes()))
.prompt("A cute baby sea otter wearing a beret")
.build();
ImagesResponse imagesResponse = client.images().edit(params);
@@ -12799,7 +12799,7 @@ paths:
OpenAIClient client = OpenAIOkHttpClient.fromEnv();
ImageEditParams params = ImageEditParams.builder()
- .image(ByteArrayInputStream("Example data".getBytes()))
+ .image(new ByteArrayInputStream("Example data".getBytes()))
.prompt("A cute baby sea otter wearing a beret")
.build();
ImagesResponse imagesResponse = client.images().edit(params);
@@ -13162,7 +13162,7 @@ paths:
OpenAIClient client = OpenAIOkHttpClient.fromEnv();
ImageCreateVariationParams params = ImageCreateVariationParams.builder()
- .image(ByteArrayInputStream("Example data".getBytes()))
+ .image(new ByteArrayInputStream("Example data".getBytes()))
.build();
ImagesResponse imagesResponse = client.images().createVariation(params);
}
@@ -26773,7 +26773,7 @@ paths:
PartCreateParams params = PartCreateParams.builder()
.uploadId("upload_abc123")
- .data(ByteArrayInputStream("Example data".getBytes()))
+ .data(new ByteArrayInputStream("Example data".getBytes()))
.build();
UploadPart uploadPart = client.uploads().parts().create(params);
}
@@ -27388,7 +27388,15 @@ paths:
tags:
- Vector stores
summary: Create a vector store file batch.
- description: The maximum number of files in a single batch request is 2000.
+ description: >
+ The maximum number of files in a single batch request is 2000.
+
+ Vector store file attach requests are rate limited per vector store (300
+ requests per minute across both this endpoint and
+ `/vector_stores/{vector_store_id}/files`).
+
+ For ingesting multiple files into the same vector store, this batch
+ endpoint is recommended.
parameters:
- in: path
name: vector_store_id
@@ -27414,6 +27422,11 @@ paths:
x-oaiMeta:
name: Create vector store file batch
group: vector_stores
+ description: >
+ Attaches multiple files to a vector store in one request. This is the
+ recommended approach for multi-file ingestion, especially because
+ per-vector-store file attach writes are rate-limited (300
+ requests/minute shared with `/vector_stores/{vector_store_id}/files`).
examples:
request:
curl: >
@@ -28204,6 +28217,13 @@ paths:
Create a vector store file by attaching a
[File](/docs/api-reference/files) to a [vector
store](/docs/api-reference/vector-stores/object).
+ description: >-
+ This endpoint is subject to a per-vector-store write rate limit of 300
+ requests per minute, shared with
+ `/vector_stores/{vector_store_id}/file_batches`.
+
+ For uploading multiple files to the same vector store, use the file
+ batches endpoint to reduce request volume.
parameters:
- in: path
name: vector_store_id
@@ -28229,6 +28249,11 @@ paths:
x-oaiMeta:
name: Create vector store file
group: vector_stores
+ description: >
+ Attaches one file to a vector store. File attach writes are
+ rate-limited per vector store (300 requests/minute shared with
+ `/vector_stores/{vector_store_id}/file_batches`), so use file batches
+ when uploading multiple files.
examples:
request:
curl: |
@@ -29740,7 +29765,7 @@ paths:
VideoCreateCharacterParams params = VideoCreateCharacterParams.builder()
.name("x")
- .video(ByteArrayInputStream("Example data".getBytes()))
+ .video(new ByteArrayInputStream("Example data".getBytes()))
.build();
VideoCreateCharacterResponse response = client.videos().createCharacter(params);
}
@@ -29902,7 +29927,7 @@ paths:
VideoEditParams params = VideoEditParams.builder()
.prompt("x")
- .video(ByteArrayInputStream("Example data".getBytes()))
+ .video(new ByteArrayInputStream("Example data".getBytes()))
.build();
Video video = client.videos().edit(params);
}
@@ -29993,7 +30018,7 @@ paths:
VideoExtendParams params = VideoExtendParams.builder()
.prompt("x")
.seconds(VideoSeconds._4)
- .video(ByteArrayInputStream("Example data".getBytes()))
+ .video(new ByteArrayInputStream("Example data".getBytes()))
.build();
Video video = client.videos().extend(params);
}
@@ -41173,7 +41198,9 @@ components:
store should use. Useful for tools like `file_search` that can
access files. If `attributes` or `chunking_strategy` are provided,
they will be applied to all files in the batch. The maximum batch
- size is 2000 files. Mutually exclusive with `files`.
+ size is 2000 files. This endpoint is recommended for multi-file
+ ingestion and helps reduce per-vector-store write request pressure.
+ Mutually exclusive with `files`.
type: array
minItems: 1
maxItems: 2000
@@ -41185,7 +41212,9 @@ components:
`attributes` or `chunking_strategy`. Use this when you need to
override metadata for specific files. The global `attributes` or
`chunking_strategy` will be ignored and must be specified for each
- file. The maximum batch size is 2000 files. Mutually exclusive with
+ file. The maximum batch size is 2000 files. This endpoint is
+ recommended for multi-file ingestion and helps reduce
+ per-vector-store write request pressure. Mutually exclusive with
`file_ids`.
type: array
minItems: 1
@@ -41208,7 +41237,10 @@ components:
file_id:
description: >-
A [File](/docs/api-reference/files) ID that the vector store should
- use. Useful for tools like `file_search` that can access files.
+ use. Useful for tools like `file_search` that can access files. For
+ multi-file ingestion, we recommend
+ [`file_batches`](/docs/api-reference/vector-stores-file-batches/createBatch)
+ to minimize per-vector-store write requests.
type: string
chunking_strategy:
$ref: '#/components/schemas/ChunkingStrategyRequestParam'