diff --git a/src/agents/models/openai_responses.py b/src/agents/models/openai_responses.py index 36a981404..102179e85 100644 --- a/src/agents/models/openai_responses.py +++ b/src/agents/models/openai_responses.py @@ -299,7 +299,12 @@ async def _fetch_response( if response_format is not omit: response_format["verbosity"] = model_settings.verbosity # type: ignore [index] else: - response_format = {"verbosity": model_settings.verbosity} + # When no output_schema is present, `text` needs an explicit format object. + # Use the plain text format object with verbosity to produce a valid `text` payload. + response_format = { + "format": {"type": "text"}, + "verbosity": model_settings.verbosity, + } stream_param: Literal[True] | Omit = True if stream else omit @@ -501,7 +506,7 @@ def _convert_tool(cls, tool: Tool) -> tuple[ToolParam, ResponseIncludable | None } includes = None else: - raise UserError(f"Unknown tool type: {type(tool)}, tool") + raise UserError(f"Unknown tool type: {type(tool)} for tool {tool!r}") return converted_tool, includes