Skip to content

AI Refinement#

Configuration#

flet_pkg.core.ai.config #

AI refinement configuration.

Loads settings from CLI flags, environment variables, or provider defaults. Priority: CLI flags > env vars > provider defaults.

AIConfig dataclass #

AIConfig(provider: str = 'ollama', model: str = '', api_key: str = '', base_url: str = '', temperature: float = 0.1, max_tokens: int = 4096)

Configuration for the AI refinement pipeline.

load classmethod #

load(provider: str | None = None, model: str | None = None) -> AIConfig

Load configuration from CLI flags and environment variables.

Parameters:

Name Type Description Default
provider str | None

Provider name (anthropic, openai, google, ollama).

None
model str | None

Model name override.

None

Returns:

Type Description
AIConfig

Populated AIConfig instance.

Source code in src/flet_pkg/core/ai/config.py
@classmethod
def load(
    cls,
    provider: str | None = None,
    model: str | None = None,
) -> AIConfig:
    """Load configuration from CLI flags and environment variables.

    Args:
        provider: Provider name (anthropic, openai, google, ollama).
        model: Model name override.

    Returns:
        Populated AIConfig instance.
    """
    # Determine provider
    resolved_provider = provider or os.environ.get("FLET_PKG_AI_PROVIDER", "ollama")
    if resolved_provider not in _PROVIDER_DEFAULTS:
        resolved_provider = "ollama"

    default_model, env_key = _PROVIDER_DEFAULTS[resolved_provider]

    # Determine model
    resolved_model = model or os.environ.get("FLET_PKG_AI_MODEL", "") or default_model

    # Determine API key
    api_key = ""
    if env_key:
        api_key = os.environ.get(env_key, "")

    # Ollama base URL
    base_url = ""
    if resolved_provider == "ollama":
        base_url = os.environ.get("OLLAMA_BASE_URL", "http://localhost:11434/v1")

    return cls(
        provider=resolved_provider,
        model=resolved_model,
        api_key=api_key,
        base_url=base_url,
    )

is_available #

is_available() -> bool

Check if the provider is configured and usable.

Returns:

Type Description
bool

True if the provider can be used (Ollama always, others need API key).

Source code in src/flet_pkg/core/ai/config.py
def is_available(self) -> bool:
    """Check if the provider is configured and usable.

    Returns:
        ``True`` if the provider can be used (Ollama always, others need API key).
    """
    if self.provider == "ollama":
        return True
    return bool(self.api_key)

list_ollama_models #

list_ollama_models() -> list[str]

Query the Ollama API for locally installed models.

Returns a list of model names (e.g. ["qwen2.5-coder:7b", "llama3:8b"]). Returns an empty list if Ollama is unreachable.

Source code in src/flet_pkg/core/ai/config.py
def list_ollama_models() -> list[str]:
    """Query the Ollama API for locally installed models.

    Returns a list of model names (e.g. ``["qwen2.5-coder:7b", "llama3:8b"]``).
    Returns an empty list if Ollama is unreachable.
    """
    import httpx

    base = os.environ.get("OLLAMA_BASE_URL", "http://localhost:11434")
    # Strip /v1 suffix if present (the tags endpoint is on the base URL)
    base = base.removesuffix("/v1")

    try:
        resp = httpx.get(f"{base}/api/tags", timeout=5.0)
        resp.raise_for_status()
        data = resp.json()
        return [m["name"] for m in data.get("models", [])]
    except Exception:
        return []

Gap Analyzer#

flet_pkg.core.ai.gap_analyzer #

Deterministic coverage gap analyzer.

Compares DartPackageAPI (source) against GenerationPlan (output) to identify exactly what the pipeline missed and why. Produces a structured GapReport — no LLM required.

GapAnalyzer #

Deterministic gap analysis between Dart source and generated plan.

analyze #

analyze(api: DartPackageAPI, plan: GenerationPlan, extension_type: str) -> GapReport

Analyze gaps between Dart API surface and generated plan.

Parameters:

Name Type Description Default
api DartPackageAPI

Parsed Dart package API.

required
plan GenerationPlan

Generated plan to compare against.

required
extension_type str

Either "service" or "ui_control".

required

Returns:

Type Description
GapReport

A GapReport with coverage stats and missing items.

Source code in src/flet_pkg/core/ai/gap_analyzer.py
def analyze(
    self,
    api: DartPackageAPI,
    plan: GenerationPlan,
    extension_type: str,
) -> GapReport:
    """Analyze gaps between Dart API surface and generated plan.

    Args:
        api: Parsed Dart package API.
        plan: Generated plan to compare against.
        extension_type: Either ``"service"`` or ``"ui_control"``.

    Returns:
        A ``GapReport`` with coverage stats and missing items.
    """
    if extension_type == "service":
        return self._analyze_service_gaps(api, plan)
    return self._analyze_ui_control_gaps(api, plan)

Models#

flet_pkg.core.ai.models #

Data models for the AI refinement pipeline.

Uses Pydantic BaseModel for structured LLM output validation. All models are importable without pydantic-ai installed (only pydantic needed at runtime when AI features are used).

GapKind #

Bases: str, Enum

Classification of a coverage gap.

GapItem dataclass #

GapItem(kind: GapKind, dart_name: str, dart_type: str = '', dart_class: str = '', reason: str = '', feasible: bool = True, context: str = '')

A single coverage gap between Dart source and generated code.

GapReport dataclass #

GapReport(flutter_package: str, extension_type: str, total_dart_api: int = 0, total_generated: int = 0, coverage_pct: float = 0.0, gaps: list[GapItem] = list(), category_counts: dict[str, tuple[int, int]] = dict())

Structured report of coverage gaps (deterministic output).

category_counts class-attribute instance-attribute #

category_counts: dict[str, tuple[int, int]] = field(default_factory=dict)

Per-category (dart_api_count, generated_count) — e.g. {"Methods": (25, 24)}.

ImprovementSuggestion dataclass #

ImprovementSuggestion(target_file: str, description: str, gap_refs: list[int] = list(), priority: int = 1)

A single improvement suggested by the Architect.

ArchitectPlan dataclass #

ArchitectPlan(analysis: str, suggestions: list[ImprovementSuggestion] = list(), files_to_skip: list[str] = list())

The Architect's analysis and improvement plan.

FileEdit dataclass #

FileEdit(filename: str, search: str, replace: str, rationale: str = '')

A precise search/replace edit to apply to a file.

EditorResult dataclass #

EditorResult(edits: list[FileEdit] = list(), summary: str = '')

Collection of edits from the Editor agent.

RefinementResult dataclass #

RefinementResult(gap_report: GapReport, architect_plan: ArchitectPlan | None = None, edits_applied: int = 0, edits_failed: int = 0, validation_passed: bool = False, overall_assessment: str = '', input_tokens: int = 0, output_tokens: int = 0, file_diffs: list[tuple[str, str]] = list(), pending_suggestions: list[ImprovementSuggestion] = list())

Final output of the AI refinement pipeline.

file_diffs class-attribute instance-attribute #

file_diffs: list[tuple[str, str]] = field(default_factory=list)

(filename, unified_diff_text) for files modified by AI.

pending_suggestions class-attribute instance-attribute #

pending_suggestions: list[ImprovementSuggestion] = field(default_factory=list)

Architect suggestions that the Editor did not apply.

Refiner#

flet_pkg.core.ai.refiner #

AI refinement orchestrator.

Coordinates the four-step pipeline: 1. Gap Report (deterministic) 2. Architect (LLM — reasons about improvements) 3. Editor (LLM — produces search/replace edits) 4. Validator (deterministic — syntax check + retry loop)

AIRefiner #

AIRefiner(config: AIConfig)

Orchestrates AI-powered code refinement.

Uses the Architect/Editor pattern: - Architect reasons about WHAT to fix (structured plan) - Editor produces HOW to fix it (search/replace edits) - Validator checks syntax and retries on failure

Initialise the refiner with an AI configuration.

Parameters:

Name Type Description Default
config AIConfig

Provider and model settings for the LLM.

required
Source code in src/flet_pkg/core/ai/refiner.py
def __init__(self, config: AIConfig):
    """Initialise the refiner with an AI configuration.

    Args:
        config: Provider and model settings for the LLM.
    """
    self.config = config
    self._model: Any = None

refine #

refine(api: DartPackageAPI, plan: GenerationPlan, generated_files: dict[str, str], extension_type: str, package_path: Path | None = None, verbose: bool = False) -> RefinementResult

Run the full AI refinement pipeline.

Parameters:

Name Type Description Default
api DartPackageAPI

Parsed Dart package API.

required
plan GenerationPlan

Generation plan from the analyzer.

required
generated_files dict[str, str]

Dict of filename → content from generators.

required
extension_type str

"service" or "ui_control".

required
package_path Path | None

Path to the downloaded Flutter package source.

None
verbose bool

If True, print detailed progress at each step.

False

Returns:

Type Description
RefinementResult

RefinementResult with gap report, edits applied, and validation status.

Source code in src/flet_pkg/core/ai/refiner.py
def refine(
    self,
    api: DartPackageAPI,
    plan: GenerationPlan,
    generated_files: dict[str, str],
    extension_type: str,
    package_path: Path | None = None,
    verbose: bool = False,
) -> RefinementResult:
    """Run the full AI refinement pipeline.

    Args:
        api: Parsed Dart package API.
        plan: Generation plan from the analyzer.
        generated_files: Dict of filename → content from generators.
        extension_type: "service" or "ui_control".
        package_path: Path to the downloaded Flutter package source.
        verbose: If True, print detailed progress at each step.

    Returns:
        RefinementResult with gap report, edits applied, and validation status.
    """
    return asyncio.run(
        self._refine_async(api, plan, generated_files, extension_type, package_path, verbose)
    )