8000 Tool Use by A-F-V · Pull Request #2 · A-F-V/instructor · GitHub
[go: up one dir, main page]
More Web Proxy on the site http://driver.im/
Skip to content

Tool Use #2

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Mar 1, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8000
12 changes: 5 additions & 7 deletions instructor/client_anthropic.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,13 +59,11 @@ def from_anthropic(
TypeError: If enable_prompt_caching is True and client is not Anthropic or AsyncAnthropic
AssertionError: If mode is not ANTHROPIC_JSON or ANTHROPIC_TOOLS
"""
assert (
mode
in {
instructor.Mode.ANTHROPIC_JSON,
instructor.Mode.ANTHROPIC_TOOLS,
}
), "Mode be one of {instructor.Mode.ANTHROPIC_JSON, instructor.Mode.ANTHROPIC_TOOLS}"
assert mode in {
instructor.Mode.ANTHROPIC_JSON,
instructor.Mode.ANTHROPIC_TOOLS,
instructor.Mode.ANTHROPIC_REASONING_TOOLS,
}, "Mode be one of {instructor.Mode.ANTHROPIC_JSON, instructor.Mode.ANTHROPIC_TOOLS, instructor.Mode.ANTHROPIC_REASONING_TOOLS}"

assert isinstance(
client,
Expand Down
2 changes: 1 addition & 1 deletion instructor/function_calls.py
10000
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ def from_response(
Returns:
cls (OpenAISchema): An instance of the class
"""
if mode == Mode.ANTHROPIC_TOOLS:
if mode == Mode.ANTHROPIC_TOOLS or mode == Mode.ANTHROPIC_REASONING_TOOLS:
return cls.parse_anthropic_tools(completion, validation_context, strict)

if mode == Mode.ANTHROPIC_JSON:
Expand Down
1 change: 1 addition & 0 deletions instructor/mode.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ class Mode(enum.Enum):
MD_JSON = "markdown_json_mode"
JSON_SCHEMA = "json_schema_mode"
ANTHROPIC_TOOLS = "anthropic_tools"
ANTHROPIC_REASONING_TOOLS = "anthropic_reasoning_tools"
ANTHROPIC_JSON = "anthropic_json"
COHERE_TOOLS = "cohere_tools"
VERTEXAI_TOOLS = "vertexai_tools"
Expand Down
14 changes: 11 additions & 3 deletions instructor/multimodal.py
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,9 @@ def to_openai(self) -> dict[str, Any]:
class Audio(BaseModel):
"""Represents an audio that can be loaded from a URL or file path."""

source: str | Path = Field(description="URL or file path of the audio") # noqa: UP007
source: str | Path = Field(
description="URL or file path of the audio"
) # noqa: UP007
data: Union[str, None] = Field( # noqa: UP007
None, description="Base64 encoded audio data", repr=False
)
Expand Down Expand Up @@ -293,7 +295,11 @@ def convert_contents(
elif isinstance(content, dict):
converted_contents.append(content)
elif isinstance(content, (Image, Audio)):
if mode in {Mode.ANTHROPIC_JSON, Mode.ANTHROPIC_TOOLS}:
if mode in {
Mode.ANTHROPIC_JSON,
Mode.ANTHROPIC_TOOLS,
Mode.ANTHROPIC_REASONING_TOOLS,
}:
converted_contents.append(content.to_anthropic())
elif mode in {Mode.GEMINI_JSON, Mode.GEMINI_TOOLS}:
raise NotImplementedError("Gemini is not supported yet")
Expand Down Expand Up @@ -339,7 +345,9 @@ def is_image_params(x: Any) -> bool:
}
if autodetect_images:
if isinstance(content, list):
new_content: list[str | dict[str, Any] | Image | Audio] = [] # noqa: UP007
new_content: list[str | dict[str, Any] | Image | Audio] = (
[]
) # noqa: UP007
for item in content:
if isinstance(item, str):
new_content.append(Image.autodetect_safely(item))
Expand Down
43 changes: 34 additions & 9 deletions instructor/process_response.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,12 @@
from instructor.mode import Mode
from instructor.dsl.iterable import IterableBase, IterableModel
from instructor.dsl.parallel import (
ParallelBase,
ParallelModel,
handle_parallel_model,
ParallelBase,
ParallelModel,
handle_parallel_model,
get_types_array,
VertexAIParallelBase,
VertexAIParallelModel
VertexAIParallelModel,
)
from instructor.dsl.partial import PartialBase
from instructor.dsl.simple_type import AdapterBase, ModelAdapter, is_simple_type
Expand Down Expand Up @@ -357,6 +357,30 @@ def handle_anthropic_tools(
return response_model, new_kwargs


def handle_anthropic_reasoning_tools(
response_model: type[T], new_kwargs: dict[str, Any]
) -> tuple[type[T], dict[str, Any]]:
# https://docs.anthropic.com/en/docs/build-with-claude/tool-use/overview#forcing-tool-use

response_model, new_kwargs = handle_anthropic_tools(response_model, new_kwargs)

# https://docs.anthropic.com/en/docs/build-with-claude/tool-use/overview#forcing-tool-use
# Reasoning does not allow forced tool use
new_kwargs["tool_choice"] = {"type": "auto"}

# But add a message recommending only to use the tools if they are relevant
implict_forced_tool_message = dedent(
f"""
Return only the tool call and no additional text.
"""
)
new_kwargs["system"] = combine_system_messages(
new_kwargs.get("system"),
[{"type": "text", "text": implict_forced_tool_message}],
)
return response_model, new_kwargs


def handle_anthropic_json(
response_model: type[T], new_kwargs: dict[str, Any]
) -> tuple[type[T], dict[str, Any]]:
Expand Down Expand Up @@ -498,17 +522,17 @@ def handle_vertexai_parallel_tools(
assert (
new_kwargs.get("stream", False) is False
), "stream=True is not supported when using PARALLEL_TOOLS mode"

from instructor.client_vertexai import vertexai_process_response

# Extract concrete types before passing to vertexai_process_response
model_types = list(get_types_array(response_model))
contents, tools, tool_config = vertexai_process_response(new_kwargs, model_types)

new_kwargs["contents"] = contents
new_kwargs["tools"] = tools
new_kwargs["tool_config"] = tool_config

return VertexAIParallelModel(typehint=response_model), new_kwargs


Expand Down Expand Up @@ -612,7 +636,7 @@ def handle_cohere_tools(


def handle_writer_tools(
response_model: type[T], new_kwargs: dict[str, Any]
response_model: type[T], new_kwargs: dict[str, Any]
) -> tuple[type[T], dict[str, Any]]:
new_kwargs["tools"] = [
{
Expand Down Expand Up @@ -732,6 +756,7 @@ def handle_response_model(
Mode.MD_JSON: lambda rm, nk: handle_json_modes(rm, nk, Mode.MD_JSON), # type: ignore
Mode.JSON_SCHEMA: lambda rm, nk: handle_json_modes(rm, nk, Mode.JSON_SCHEMA), # type: ignore
Mode.ANTHROPIC_TOOLS: handle_anthropic_tools,
Mode.ANTHROPIC_REASONING_TOOLS: handle_anthropic_reasoning_tools,
Mode.ANTHROPIC_JSON: handle_anthropic_json,
Mode.COHERE_JSON_SCHEMA: handle_cohere_json_schema,
Mode.COHERE_TOOLS: handle_cohere_tools,
Expand Down
1 change: 1 addition & 0 deletions instructor/reask.py
Original file line number Diff line number Diff line change
Expand Up @@ -336,6 +336,7 @@ def handle_reask_kwargs(

functions = {
Mode.ANTHROPIC_TOOLS: reask_anthropic_tools,
Mode.ANTHROPIC_REASONING_TOOLS: reask_anthropic_tools,
Mode.ANTHROPIC_JSON: reask_anthropic_json,
Mode.COHERE_TOOLS: reask_cohere_tools,
Mode.COHERE_JSON_SCHEMA: reask_cohere_tools, # Same Function
Expand Down
14 changes: 9 additions & 5 deletions tests/llm/test_anthropic/test_reasoning.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,19 +3,23 @@
import instructor
from pydantic import BaseModel
from itertools import product
from .util import models, modes
from anthropic.types.message import Message


class Answer(BaseModel):
answer: float


def test_reasoning():
modes = [
instructor.Mode.ANTHROPIC_REASONING_TOOLS,
instructor.Mode.ANTHROPIC_JSON,
]


@pytest.mark.parametrize("mode", modes)
def test_reasoning(mode):
anthropic_client = anthropic.Anthropic()
client = instructor.from_anthropic(
anthropic_client, mode=instructor.Mode.ANTHROPIC_JSON
)
client = instructor.from_anthropic(anthropic_client, mode=mode)
response = client.chat.completions.create(
model="claude-3-7-sonnet-latest",
response_model=Answer,
Expand Down
0