mirror of
https://github.com/affaan-m/everything-claude-code.git
synced 2026-05-15 00:48:39 +08:00
docs: salvage focused stale PR contributions
- add Vite and Redis pattern skills from closed stale PRs - add frontend-slides support assets - port skill-comply runner fixes and LLM prompt/provider regressions - harden agent frontmatter validation and sync catalog counts
This commit is contained in:
committed by
Affaan Mustafa
parent
d8f879e671
commit
b39d2244cf
@@ -1,13 +1,23 @@
|
||||
"""Prompt module for prompt building and normalization."""
|
||||
|
||||
from llm.prompt.builder import PromptBuilder, adapt_messages_for_provider, get_provider_builder
|
||||
from llm.prompt.templates import TEMPLATES, get_template, get_template_or_default
|
||||
from llm.prompt.templates import (
|
||||
TEMPLATES,
|
||||
clear_templates,
|
||||
deregister_template,
|
||||
get_template,
|
||||
get_template_or_default,
|
||||
register_template,
|
||||
)
|
||||
|
||||
__all__ = (
|
||||
"PromptBuilder",
|
||||
"TEMPLATES",
|
||||
"clear_templates",
|
||||
"deregister_template",
|
||||
"adapt_messages_for_provider",
|
||||
"get_provider_builder",
|
||||
"get_template",
|
||||
"get_template_or_default",
|
||||
"register_template",
|
||||
)
|
||||
|
||||
@@ -19,9 +19,32 @@ class PromptConfig:
|
||||
tool_format: str = "native"
|
||||
|
||||
|
||||
class PromptBuilder:
|
||||
def __init__(self, config: PromptConfig | None = None) -> None:
|
||||
self.config = config or PromptConfig()
|
||||
class PromptBuilder:
|
||||
def __init__(
|
||||
self,
|
||||
config: PromptConfig | None = None,
|
||||
*,
|
||||
system_template: str | None = None,
|
||||
user_template: str | None = None,
|
||||
include_tools_in_system: bool | None = None,
|
||||
tool_format: str | None = None,
|
||||
) -> None:
|
||||
if config is not None and any(
|
||||
value is not None
|
||||
for value in (system_template, user_template, include_tools_in_system, tool_format)
|
||||
):
|
||||
raise ValueError("Pass either config or PromptBuilder keyword options, not both")
|
||||
|
||||
if config is None:
|
||||
overrides = {
|
||||
"system_template": system_template,
|
||||
"user_template": user_template,
|
||||
"include_tools_in_system": include_tools_in_system,
|
||||
"tool_format": tool_format,
|
||||
}
|
||||
config = PromptConfig(**{key: value for key, value in overrides.items() if value is not None})
|
||||
|
||||
self.config = config
|
||||
|
||||
def build(self, messages: list[Message], tools: list[ToolDefinition] | None = None) -> list[Message]:
|
||||
if not messages:
|
||||
|
||||
@@ -1 +1,41 @@
|
||||
# Templates module for provider-specific prompt templates
|
||||
"""Provider-specific prompt template helpers."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
_TEMPLATE_REGISTRY: dict[str, str] = {}
|
||||
TEMPLATES = _TEMPLATE_REGISTRY
|
||||
|
||||
|
||||
def _validate_template_input(name: str, template: str | None = None) -> None:
|
||||
"""Validate template registry inputs before mutating the registry."""
|
||||
if not isinstance(name, str) or not name.strip():
|
||||
raise ValueError("Template name must be a non-empty string")
|
||||
if template is not None and (not isinstance(template, str) or not template.strip()):
|
||||
raise ValueError("Template content must be a non-empty string")
|
||||
|
||||
|
||||
def register_template(name: str, template: str) -> None:
|
||||
"""Register or replace a named prompt template."""
|
||||
_validate_template_input(name, template)
|
||||
_TEMPLATE_REGISTRY[name] = template
|
||||
|
||||
|
||||
def deregister_template(name: str) -> None:
|
||||
"""Remove a named prompt template if it is registered."""
|
||||
_validate_template_input(name)
|
||||
_TEMPLATE_REGISTRY.pop(name, None)
|
||||
|
||||
|
||||
def clear_templates() -> None:
|
||||
"""Remove all registered prompt templates."""
|
||||
_TEMPLATE_REGISTRY.clear()
|
||||
|
||||
|
||||
def get_template(name: str) -> str | None:
|
||||
"""Return a named prompt template when one is registered."""
|
||||
return _TEMPLATE_REGISTRY.get(name)
|
||||
|
||||
|
||||
def get_template_or_default(name: str, default: str = "") -> str:
|
||||
"""Return a named prompt template or a caller-provided default."""
|
||||
return _TEMPLATE_REGISTRY.get(name, default)
|
||||
|
||||
@@ -57,27 +57,39 @@ class ClaudeProvider(LLMProvider):
|
||||
}
|
||||
if input.max_tokens:
|
||||
params["max_tokens"] = input.max_tokens
|
||||
else:
|
||||
params["max_tokens"] = 8192 # required by Anthropic API
|
||||
if input.tools:
|
||||
else:
|
||||
params["max_tokens"] = 8192 # required by Anthropic API
|
||||
if input.tools:
|
||||
params["tools"] = [tool.to_anthropic_tool() for tool in input.tools]
|
||||
|
||||
response = self.client.messages.create(**params)
|
||||
|
||||
tool_calls = None
|
||||
if response.content and hasattr(response.content[0], "type"):
|
||||
if response.content[0].type == "tool_use":
|
||||
tool_calls = [
|
||||
ToolCall(
|
||||
id=getattr(response.content[0], "id", ""),
|
||||
name=getattr(response.content[0], "name", ""),
|
||||
arguments=getattr(response.content[0].input, "__dict__", {}),
|
||||
)
|
||||
]
|
||||
|
||||
return LLMOutput(
|
||||
content=response.content[0].text if response.content else "",
|
||||
tool_calls=tool_calls,
|
||||
text_parts: list[str] = []
|
||||
tool_calls: list[ToolCall] = []
|
||||
for block in response.content or []:
|
||||
block_type = getattr(block, "type", None)
|
||||
if block_type == "text":
|
||||
text = getattr(block, "text", "")
|
||||
if text:
|
||||
text_parts.append(text)
|
||||
elif block_type == "tool_use":
|
||||
raw_arguments = getattr(block, "input", {})
|
||||
arguments = (
|
||||
raw_arguments.copy()
|
||||
if isinstance(raw_arguments, dict)
|
||||
else getattr(raw_arguments, "__dict__", {}).copy()
|
||||
)
|
||||
tool_calls.append(
|
||||
ToolCall(
|
||||
id=getattr(block, "id", ""),
|
||||
name=getattr(block, "name", ""),
|
||||
arguments=arguments,
|
||||
)
|
||||
)
|
||||
|
||||
return LLMOutput(
|
||||
content="".join(text_parts),
|
||||
tool_calls=tool_calls or None,
|
||||
model=response.model,
|
||||
usage={
|
||||
"input_tokens": response.usage.input_tokens,
|
||||
|
||||
Reference in New Issue
Block a user