feat: AI code generation — LLM generates fix actions from findings

This commit is contained in:
Yuriy 2026-02-12 09:16:21 +03:00
parent 2be548d078
commit af3c81b04d
5 changed files with 249 additions and 3 deletions

View File

@ -0,0 +1,134 @@
use crate::types::{Action, ActionKind};
use serde::{Deserialize, Serialize};
#[derive(Debug, Serialize, Deserialize)]
pub struct GenerateActionsRequest {
pub provider: String,
pub model: String,
pub api_key: Option<String>,
pub base_url: Option<String>,
pub context: String, // llm_context JSON
pub findings_json: String, // findings array JSON
pub project_path: String,
pub max_tokens: Option<u32>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct GenerateActionsResponse {
pub ok: bool,
pub actions: Vec<Action>,
pub explanation: String,
pub error: Option<String>,
}
#[derive(Debug, Deserialize)]
struct LlmActionsOutput {
actions: Vec<LlmAction>,
explanation: String,
}
#[derive(Debug, Deserialize)]
struct LlmAction {
id: String,
title: String,
description: String,
kind: String, // "create_file" | "update_file" | "create_dir"
path: String,
content: Option<String>,
}
#[tauri::command]
pub async fn generate_ai_actions(
request: GenerateActionsRequest,
) -> Result<GenerateActionsResponse, String> {
let api_key = request.api_key.clone().unwrap_or_default();
if api_key.is_empty() && request.provider != "ollama" {
return Ok(GenerateActionsResponse {
ok: false,
actions: vec![],
explanation: String::new(),
error: Some("API-ключ не указан.".into()),
});
}
let user_prompt = format!(
"Ты — PAPA YU, AI-аудитор проектов. На основе контекста и списка найденных проблем сгенерируй конкретные действия для исправления.\n\nВАЖНО: Отвечай ТОЛЬКО валидным JSON без markdown-обёртки. Формат:\n{{\n \"actions\": [\n {{\n \"id\": \"уникальный-id\",\n \"title\": \"Краткое название\",\n \"description\": \"Что делает\",\n \"kind\": \"create_file\",\n \"path\": \"путь/к/файлу\",\n \"content\": \"содержимое\"\n }}\n ],\n \"explanation\": \"Краткое объяснение\"\n}}\n\nДопустимые kind: \"create_file\", \"update_file\", \"create_dir\"\nПуть — относительный от корня проекта. Не более 10 действий.\nПуть проекта: {}\n\nПроблемы:\n{}",
request.project_path,
request.findings_json
);
let llm_request = super::ask_llm::LlmRequest {
provider: request.provider,
model: request.model,
api_key: request.api_key,
base_url: request.base_url,
context: request.context,
prompt: user_prompt,
max_tokens: request.max_tokens.or(Some(4096)),
};
let llm_response = super::ask_llm::ask_llm(llm_request).await?;
if !llm_response.ok {
return Ok(GenerateActionsResponse {
ok: false,
actions: vec![],
explanation: String::new(),
error: llm_response.error,
});
}
// Parse JSON from LLM response
let content = llm_response.content.trim().to_string();
// Strip markdown code fences if present
let json_str = content
.strip_prefix("```json")
.or_else(|| content.strip_prefix("```"))
.unwrap_or(&content)
.strip_suffix("```")
.unwrap_or(&content)
.trim();
match serde_json::from_str::<LlmActionsOutput>(json_str) {
Ok(output) => {
let actions: Vec<Action> = output
.actions
.into_iter()
.filter_map(|a| {
let kind = match a.kind.as_str() {
"create_file" => ActionKind::CreateFile,
"update_file" => ActionKind::UpdateFile,
"create_dir" => ActionKind::CreateDir,
"delete_file" => ActionKind::DeleteFile,
_ => return None,
};
Some(Action {
id: format!("ai-{}", a.id),
title: a.title,
description: a.description,
kind,
path: a.path,
content: a.content,
})
})
.collect();
Ok(GenerateActionsResponse {
ok: true,
actions,
explanation: output.explanation,
error: None,
})
}
Err(e) => Ok(GenerateActionsResponse {
ok: false,
actions: vec![],
explanation: String::new(),
error: Some(format!(
"Ошибка парсинга ответа LLM: {}. Ответ: {}",
e,
&json_str[..json_str.len().min(200)]
)),
}),
}
}

View File

@ -1,6 +1,7 @@
mod analyze_project;
mod apply_actions;
mod ask_llm;
pub mod ask_llm;
mod generate_ai_actions;
mod get_app_info;
mod preview_actions;
mod undo_last;
@ -8,6 +9,7 @@ mod undo_last;
pub use analyze_project::analyze_project;
pub use apply_actions::apply_actions;
pub use ask_llm::ask_llm;
pub use generate_ai_actions::generate_ai_actions;
pub use get_app_info::get_app_info;
pub use preview_actions::preview_actions;
pub use undo_last::undo_last;

View File

@ -1,7 +1,7 @@
mod commands;
mod types;
use commands::{analyze_project, apply_actions, ask_llm, get_app_info, preview_actions, undo_last};
use commands::{analyze_project, apply_actions, ask_llm, generate_ai_actions, get_app_info, preview_actions, undo_last};
#[cfg_attr(mobile, tauri::mobile_entry_point)]
pub fn run() {
@ -26,6 +26,7 @@ pub fn run() {
undo_last,
get_app_info,
ask_llm,
generate_ai_actions,
])
.run(tauri::generate_context!())
.expect("error while running tauri application");

View File

@ -187,3 +187,30 @@ export async function askLlm(
},
});
}
// ---- AI Code Generation ----
export interface GenerateActionsResponse {
ok: boolean;
actions: Action[];
explanation: string;
error?: string | null;
}
export async function generateAiActions(
settings: LlmSettings,
report: AnalyzeReport,
): Promise<GenerateActionsResponse> {
return invoke<GenerateActionsResponse>('generate_ai_actions', {
request: {
provider: settings.provider,
model: settings.model,
api_key: settings.apiKey || null,
base_url: settings.baseUrl || null,
context: JSON.stringify(report.llm_context),
findings_json: JSON.stringify(report.findings),
project_path: report.path,
max_tokens: 4096,
},
});
}

View File

@ -19,7 +19,7 @@ import {
X,
} from 'lucide-react';
import { invoke } from '@tauri-apps/api/core';
import { analyzeProject, askLlm, type AnalyzeReport, type Action, type ApplyResult, type UndoResult, type PreviewResult, type DiffItem, type LlmSettings, DEFAULT_LLM_SETTINGS } from '../lib/analyze';
import { analyzeProject, askLlm, generateAiActions, type AnalyzeReport, type Action, type ApplyResult, type UndoResult, type PreviewResult, type DiffItem, type LlmSettings, DEFAULT_LLM_SETTINGS } from '../lib/analyze';
import { animateFadeInUp } from '../lib/anime-utils';
import { useAppStore } from '../store/app-store';
@ -121,6 +121,73 @@ export function Tasks() {
setIsAiAnalyzing(false);
};
const [isGeneratingActions, setIsGeneratingActions] = useState(false);
const handleAiCodeGen = async (report: AnalyzeReport) => {
const settings = loadLlmSettings();
if (!settings.apiKey && settings.provider !== 'ollama') {
setMessages((prev) => [
...prev,
{ role: 'system', text: '⚠️ API-ключ не настроен. Перейдите в Настройки LLM.' },
]);
return;
}
setIsGeneratingActions(true);
setMessages((prev) => [...prev, { role: 'system', text: '🔧 AI генерирует исправления...' }]);
try {
const resp = await generateAiActions(settings, report);
if (resp.ok && resp.actions.length > 0) {
// Merge AI actions into the report
const updatedReport = {
...report,
actions: [...(report.actions ?? []), ...resp.actions],
};
setLastReport(updatedReport);
storeSetLastReport(updatedReport, report.path);
// Init selection for new actions
const newSelection: Record<string, boolean> = { ...selectedActions };
resp.actions.forEach((a) => { newSelection[a.id] = true; });
setSelectedActions(newSelection);
// Update the last assistant message with new report
setMessages((prev) => {
const updated = [...prev];
// Find the last assistant message with this report and update it
for (let i = updated.length - 1; i >= 0; i--) {
const msg = updated[i];
if ('report' in msg && msg.report.path === report.path) {
updated[i] = { ...msg, report: updatedReport };
break;
}
}
return [
...updated,
{ role: 'assistant', text: `🔧 **AI сгенерировал ${resp.actions.length} исправлений** (${settings.model}):\n\n${resp.explanation}` },
];
});
} else if (resp.ok && resp.actions.length === 0) {
setMessages((prev) => [
...prev,
{ role: 'system', text: '✓ AI не нашёл дополнительных исправлений — проект в хорошем состоянии.' },
]);
} else {
setMessages((prev) => [
...prev,
{ role: 'system', text: `❌ Ошибка генерации: ${resp.error}` },
]);
}
} catch (e) {
setMessages((prev) => [
...prev,
{ role: 'system', text: `❌ Ошибка: ${e}` },
]);
}
setIsGeneratingActions(false);
};
useEffect(() => {
messagesEndRef.current?.scrollIntoView({ behavior: 'smooth' });
}, [messages]);
@ -605,6 +672,8 @@ export function Tasks() {
onUndo={handleUndoLast}
onAiAnalysis={handleAiAnalysis}
isAiAnalyzing={isAiAnalyzing}
onAiCodeGen={handleAiCodeGen}
isGeneratingActions={isGeneratingActions}
/>
)}
</div>
@ -803,6 +872,8 @@ function ReportBlock({
onUndo: (projectPath: string) => void;
onAiAnalysis?: (report: AnalyzeReport) => void;
isAiAnalyzing?: boolean;
onAiCodeGen?: (report: AnalyzeReport) => void;
isGeneratingActions?: boolean;
}) {
if (error) {
return <div className="text-sm text-destructive">Ошибка: {error}</div>;
@ -948,6 +1019,17 @@ function ReportBlock({
{isAiAnalyzing ? 'AI анализирует...' : 'AI Анализ'}
</button>
)}
{isCurrentReport && onAiCodeGen && (
<button
type="button"
onClick={() => onAiCodeGen(r)}
disabled={isGeneratingActions}
className="inline-flex items-center gap-2 px-3 py-1.5 rounded-lg border bg-green-600 text-white text-sm font-medium hover:opacity-90 disabled:opacity-50"
>
<RefreshCw className={`w-4 h-4 ${isGeneratingActions ? 'animate-spin' : ''}`} />
{isGeneratingActions ? 'Генерирую...' : 'AI Исправления'}
</button>
)}
<button
type="button"
onClick={() => onDownload(r)}