Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 22 additions & 0 deletions src/gradient/types/responses/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
# Types for the Responses API. See docs/RESPONSES_API_PR_BREAKDOWN.md.

from __future__ import annotations

from .response_create_params import (
ResponseTool as ResponseTool,
ResponseInputItem as ResponseInputItem,
ResponseToolChoice as ResponseToolChoice,
ResponseCreateParams as ResponseCreateParams,
ResponseToolFunction as ResponseToolFunction,
ResponseToolChoiceNamed as ResponseToolChoiceNamed,
ResponseInputUserMessage as ResponseInputUserMessage,
ResponseInputFunctionCall as ResponseInputFunctionCall,
ResponseToolChoiceFunction as ResponseToolChoiceFunction,
ResponseInputFunctionCallOutput as ResponseInputFunctionCallOutput,
)
from .response_create_response import (
ResponseOutputItem as ResponseOutputItem,
ResponseOutputMessage as ResponseOutputMessage,
ResponseCreateResponse as ResponseCreateResponse,
ResponseOutputFunctionCall as ResponseOutputFunctionCall,
)
105 changes: 105 additions & 0 deletions src/gradient/types/responses/response_create_params.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,105 @@
# Types for the Responses API (POST /v1/responses). See docs/RESPONSES_API_PR_BREAKDOWN.md.

from __future__ import annotations

from typing import Dict, Union, Iterable, Optional
from typing_extensions import Literal, Required, TypeAlias, TypedDict

__all__ = [
"ResponseCreateParams",
"ResponseInputItem",
"ResponseInputUserMessage",
"ResponseInputFunctionCall",
"ResponseInputFunctionCallOutput",
"ResponseToolChoice",
"ResponseToolChoiceFunction",
"ResponseTool",
"ResponseToolFunction",
]


class ResponseInputUserMessage(TypedDict, total=False):
"""User message in the request input list."""

type: Required[Literal["message"]]
role: Required[Literal["user"]]
content: Required[str]


class ResponseInputFunctionCall(TypedDict, total=False):
"""Function call (assistant turn) in the request input list."""

type: Required[Literal["function_call"]]
id: Required[str]
name: Required[str]
arguments: Required[str]


class ResponseInputFunctionCallOutput(TypedDict, total=False):
"""Function call result (tool output) in the request input list."""

type: Required[Literal["function_call_output"]]
call_id: Required[str]
output: Required[str]


ResponseInputItem: TypeAlias = Union[
ResponseInputUserMessage,
ResponseInputFunctionCall,
ResponseInputFunctionCallOutput,
]


class ResponseToolFunction(TypedDict, total=False):
"""Function definition for a tool."""

name: Required[str]
description: str
parameters: Dict[str, object]


class ResponseTool(TypedDict, total=False):
"""Tool the model may call (e.g. a function)."""

type: Required[Literal["function"]]
function: Required[ResponseToolFunction]


class ResponseToolChoiceFunction(TypedDict, total=False):
name: Required[str]


class ResponseToolChoiceNamed(TypedDict, total=False):
type: Required[Literal["function"]]
function: Required[ResponseToolChoiceFunction]


ResponseToolChoice: TypeAlias = Union[
Literal["none", "auto", "required"],
ResponseToolChoiceNamed,
]


class ResponseCreateParams(TypedDict, total=False):
"""Request body for POST /v1/responses."""

model: Required[str]
"""Model ID (e.g. openai-gpt-5.2-pro)."""

input: Required[Iterable[ResponseInputItem]]
"""List of input items: user messages, function_call, function_call_output."""

tools: Iterable[ResponseTool]
"""Optional list of tools the model may call."""

max_output_tokens: Optional[int]
"""Maximum tokens to generate."""

instructions: Optional[str]
"""System or developer instructions."""

temperature: Optional[float]
"""Sampling temperature."""

tool_choice: ResponseToolChoice
"""Which tool (if any) the model must or may call."""
83 changes: 83 additions & 0 deletions src/gradient/types/responses/response_create_response.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
# Response type for the Responses API (POST /v1/responses). See docs/RESPONSES_API_PR_BREAKDOWN.md.

from __future__ import annotations

from typing import List, Union, Optional
from typing_extensions import Literal, Annotated, TypeAlias

from ..._utils import PropertyInfo
from ..._models import BaseModel
from ..shared.completion_usage import CompletionUsage

__all__ = [
"ResponseCreateResponse",
"ResponseOutputItem",
"ResponseOutputMessage",
"ResponseOutputFunctionCall",
]


class ResponseOutputMessage(BaseModel):
"""Message item in the response output list."""

type: Literal["message"] = "message"
role: Literal["assistant"] = "assistant"
content: Optional[str] = None
"""Text content of the message."""
output_text: Optional[str] = None
"""Aggregated or final text for this item (when present)."""


class ResponseOutputFunctionCall(BaseModel):
"""Function call item in the response output list."""

type: Literal["function_call"] = "function_call"
id: str
name: str
arguments: str


# Discriminated union so Pydantic parses each output item by "type".
ResponseOutputItem: TypeAlias = Annotated[
Union[ResponseOutputMessage, ResponseOutputFunctionCall],
PropertyInfo(discriminator="type"),
]


class ResponseCreateResponse(BaseModel):
"""
Response from POST /v1/responses.
Use the `output_text` property to get aggregated text from message items in `output`.
"""

id: str
"""Unique identifier for the response."""

output: List[ResponseOutputItem]
"""List of output items (messages, function calls)."""

status: str
"""Status of the response (e.g. completed, failed)."""

error: Optional[str] = None
"""Error message if status indicates failure."""

model: Optional[str] = None
"""Model used for the response."""

usage: Optional[CompletionUsage] = None
"""Token usage statistics."""

@property
def output_text(self) -> str:
"""
Aggregate text from all message items in `output`.
For each item with type "message", uses `output_text` if present, else `content`.
"""
parts: List[str] = []
for item in self.output:
if isinstance(item, ResponseOutputMessage):
text: Optional[str] = item.output_text if item.output_text is not None else item.content
if text:
parts.append(text)
return "".join(parts)
Empty file added tests/types/__init__.py
Empty file.
Empty file.
130 changes: 130 additions & 0 deletions tests/types/responses/test_response_create_response.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,130 @@
# Tests for Responses API response types. No network; static payloads only.

from __future__ import annotations

from typing import Any

from gradient._compat import parse_obj
from gradient.types.responses import (
ResponseOutputMessage,
ResponseCreateResponse,
ResponseOutputFunctionCall,
)

# Minimal valid response payload (static, no network).
MINIMAL_RESPONSE: dict[str, Any] = {
"id": "resp_123",
"output": [],
"status": "completed",
"model": "openai-gpt-5.2-pro",
}


class TestResponseCreateResponseParse:
"""Test that ResponseCreateResponse parses minimal and extended JSON."""

def test_parse_minimal_response(self) -> None:
parsed = parse_obj(ResponseCreateResponse, MINIMAL_RESPONSE)
assert parsed.id == "resp_123"
assert parsed.output == []
assert parsed.status == "completed"
assert parsed.model == "openai-gpt-5.2-pro"
assert parsed.output_text == ""

def test_parse_response_with_usage(self) -> None:
payload: dict[str, Any] = {
**MINIMAL_RESPONSE,
"usage": {
"prompt_tokens": 10,
"completion_tokens": 5,
"total_tokens": 15,
},
}
parsed = parse_obj(ResponseCreateResponse, payload)
assert parsed.usage is not None
assert parsed.usage.prompt_tokens == 10
assert parsed.usage.completion_tokens == 5
assert parsed.usage.total_tokens == 15


class TestResponseCreateResponseOutputText:
"""Test that output_text aggregates text from message items in output."""

def test_output_text_aggregates_content(self) -> None:
payload: dict[str, Any] = {
**MINIMAL_RESPONSE,
"output": [
{"type": "message", "role": "assistant", "content": "Hello "},
{"type": "message", "role": "assistant", "content": "world."},
],
}
parsed = parse_obj(ResponseCreateResponse, payload)
assert parsed.output_text == "Hello world."

def test_output_text_prefers_output_text_field(self) -> None:
payload: dict[str, Any] = {
**MINIMAL_RESPONSE,
"output": [
{
"type": "message",
"role": "assistant",
"content": "raw",
"output_text": "aggregated",
},
],
}
parsed = parse_obj(ResponseCreateResponse, payload)
assert parsed.output_text == "aggregated"

def test_output_text_skips_function_call_items(self) -> None:
payload: dict[str, Any] = {
**MINIMAL_RESPONSE,
"output": [
{"type": "message", "role": "assistant", "content": "Here is "},
{
"type": "function_call",
"id": "call_1",
"name": "get_weather",
"arguments": "{}",
},
{"type": "message", "role": "assistant", "content": "the result."},
],
}
parsed = parse_obj(ResponseCreateResponse, payload)
assert parsed.output_text == "Here is the result."

def test_output_text_empty_message_content_treated_as_empty(self) -> None:
payload: dict[str, Any] = {
**MINIMAL_RESPONSE,
"output": [
{"type": "message", "role": "assistant", "content": None},
{"type": "message", "role": "assistant", "output_text": "only this"},
],
}
parsed = parse_obj(ResponseCreateResponse, payload)
assert parsed.output_text == "only this"


class TestResponseOutputItemTypes:
"""Test that output item types parse correctly."""

def test_message_item_parses(self) -> None:
msg = parse_obj(ResponseOutputMessage, {"type": "message", "role": "assistant", "content": "Hi"})
assert msg.type == "message"
assert msg.role == "assistant"
assert msg.content == "Hi"

def test_function_call_item_parses(self) -> None:
fc = parse_obj(
ResponseOutputFunctionCall,
{
"type": "function_call",
"id": "call_1",
"name": "foo",
"arguments": '{"x": 1}',
},
)
assert fc.type == "function_call"
assert fc.id == "call_1"
assert fc.name == "foo"
assert fc.arguments == '{"x": 1}'