feat: add Anthropic effort level support for Claude models

Add effort parameter (high/medium/low) for Claude 4.5+ and 4.6 models,
consistent with OpenAI reasoning_effort and Google thinking_level.
Also add content normalization for Anthropic responses.
This commit is contained in:
Yijia-Xiao
2026-03-22 21:57:05 +00:00
parent 77755f0431
commit bd9b1e5efa
5 changed files with 57 additions and 3 deletions

View File

@@ -556,6 +556,7 @@ def get_user_selections():
# Step 7: Provider-specific thinking configuration
thinking_level = None
reasoning_effort = None
anthropic_effort = None
provider_lower = selected_llm_provider.lower()
if provider_lower == "google":
@@ -574,6 +575,14 @@ def get_user_selections():
)
)
reasoning_effort = ask_openai_reasoning_effort()
elif provider_lower == "anthropic":
console.print(
create_question_box(
"Step 7: Effort Level",
"Configure Claude effort level"
)
)
anthropic_effort = ask_anthropic_effort()
return {
"ticker": selected_ticker,
@@ -586,6 +595,7 @@ def get_user_selections():
"deep_thinker": selected_deep_thinker,
"google_thinking_level": thinking_level,
"openai_reasoning_effort": reasoning_effort,
"anthropic_effort": anthropic_effort,
}
@@ -911,6 +921,7 @@ def run_analysis():
# Provider-specific thinking configuration
config["google_thinking_level"] = selections.get("google_thinking_level")
config["openai_reasoning_effort"] = selections.get("openai_reasoning_effort")
config["anthropic_effort"] = selections.get("anthropic_effort")
# Create stats callback handler for tracking LLM/tool calls
stats_handler = StatsCallbackHandler()

View File

@@ -311,6 +311,26 @@ def ask_openai_reasoning_effort() -> str:
).ask()
def ask_anthropic_effort() -> str | None:
"""Ask for Anthropic effort level.
Controls token usage and response thoroughness on Claude 4.5+ and 4.6 models.
"""
return questionary.select(
"Select Effort Level:",
choices=[
questionary.Choice("High (recommended)", "high"),
questionary.Choice("Medium (balanced)", "medium"),
questionary.Choice("Low (faster, cheaper)", "low"),
],
style=questionary.Style([
("selected", "fg:cyan noinherit"),
("highlighted", "fg:cyan noinherit"),
("pointer", "fg:cyan noinherit"),
]),
).ask()
def ask_gemini_thinking_config() -> str | None:
"""Ask for Gemini thinking configuration.