diff --git a/desktop/src-tauri/src/commands/generate_ai_actions.rs b/desktop/src-tauri/src/commands/generate_ai_actions.rs new file mode 100644 index 0000000..728d5b7 --- /dev/null +++ b/desktop/src-tauri/src/commands/generate_ai_actions.rs @@ -0,0 +1,134 @@ +use crate::types::{Action, ActionKind}; +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Serialize, Deserialize)] +pub struct GenerateActionsRequest { + pub provider: String, + pub model: String, + pub api_key: Option, + pub base_url: Option, + pub context: String, // llm_context JSON + pub findings_json: String, // findings array JSON + pub project_path: String, + pub max_tokens: Option, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct GenerateActionsResponse { + pub ok: bool, + pub actions: Vec, + pub explanation: String, + pub error: Option, +} + +#[derive(Debug, Deserialize)] +struct LlmActionsOutput { + actions: Vec, + explanation: String, +} + +#[derive(Debug, Deserialize)] +struct LlmAction { + id: String, + title: String, + description: String, + kind: String, // "create_file" | "update_file" | "create_dir" + path: String, + content: Option, +} + +#[tauri::command] +pub async fn generate_ai_actions( + request: GenerateActionsRequest, +) -> Result { + let api_key = request.api_key.clone().unwrap_or_default(); + if api_key.is_empty() && request.provider != "ollama" { + return Ok(GenerateActionsResponse { + ok: false, + actions: vec![], + explanation: String::new(), + error: Some("API-ключ не указан.".into()), + }); + } + + let user_prompt = format!( + "Ты — PAPA YU, AI-аудитор проектов. На основе контекста и списка найденных проблем сгенерируй конкретные действия для исправления.\n\nВАЖНО: Отвечай ТОЛЬКО валидным JSON без markdown-обёртки. Формат:\n{{\n \"actions\": [\n {{\n \"id\": \"уникальный-id\",\n \"title\": \"Краткое название\",\n \"description\": \"Что делает\",\n \"kind\": \"create_file\",\n \"path\": \"путь/к/файлу\",\n \"content\": \"содержимое\"\n }}\n ],\n \"explanation\": \"Краткое объяснение\"\n}}\n\nДопустимые kind: \"create_file\", \"update_file\", \"create_dir\"\nПуть — относительный от корня проекта. Не более 10 действий.\nПуть проекта: {}\n\nПроблемы:\n{}", + request.project_path, + request.findings_json + ); + + let llm_request = super::ask_llm::LlmRequest { + provider: request.provider, + model: request.model, + api_key: request.api_key, + base_url: request.base_url, + context: request.context, + prompt: user_prompt, + max_tokens: request.max_tokens.or(Some(4096)), + }; + + let llm_response = super::ask_llm::ask_llm(llm_request).await?; + + if !llm_response.ok { + return Ok(GenerateActionsResponse { + ok: false, + actions: vec![], + explanation: String::new(), + error: llm_response.error, + }); + } + + // Parse JSON from LLM response + let content = llm_response.content.trim().to_string(); + // Strip markdown code fences if present + let json_str = content + .strip_prefix("```json") + .or_else(|| content.strip_prefix("```")) + .unwrap_or(&content) + .strip_suffix("```") + .unwrap_or(&content) + .trim(); + + match serde_json::from_str::(json_str) { + Ok(output) => { + let actions: Vec = output + .actions + .into_iter() + .filter_map(|a| { + let kind = match a.kind.as_str() { + "create_file" => ActionKind::CreateFile, + "update_file" => ActionKind::UpdateFile, + "create_dir" => ActionKind::CreateDir, + "delete_file" => ActionKind::DeleteFile, + _ => return None, + }; + Some(Action { + id: format!("ai-{}", a.id), + title: a.title, + description: a.description, + kind, + path: a.path, + content: a.content, + }) + }) + .collect(); + + Ok(GenerateActionsResponse { + ok: true, + actions, + explanation: output.explanation, + error: None, + }) + } + Err(e) => Ok(GenerateActionsResponse { + ok: false, + actions: vec![], + explanation: String::new(), + error: Some(format!( + "Ошибка парсинга ответа LLM: {}. Ответ: {}", + e, + &json_str[..json_str.len().min(200)] + )), + }), + } +} diff --git a/desktop/src-tauri/src/commands/mod.rs b/desktop/src-tauri/src/commands/mod.rs index 4ba7b63..97eae4f 100644 --- a/desktop/src-tauri/src/commands/mod.rs +++ b/desktop/src-tauri/src/commands/mod.rs @@ -1,6 +1,7 @@ mod analyze_project; mod apply_actions; -mod ask_llm; +pub mod ask_llm; +mod generate_ai_actions; mod get_app_info; mod preview_actions; mod undo_last; @@ -8,6 +9,7 @@ mod undo_last; pub use analyze_project::analyze_project; pub use apply_actions::apply_actions; pub use ask_llm::ask_llm; +pub use generate_ai_actions::generate_ai_actions; pub use get_app_info::get_app_info; pub use preview_actions::preview_actions; pub use undo_last::undo_last; diff --git a/desktop/src-tauri/src/lib.rs b/desktop/src-tauri/src/lib.rs index c4f5717..293fb5b 100644 --- a/desktop/src-tauri/src/lib.rs +++ b/desktop/src-tauri/src/lib.rs @@ -1,7 +1,7 @@ mod commands; mod types; -use commands::{analyze_project, apply_actions, ask_llm, get_app_info, preview_actions, undo_last}; +use commands::{analyze_project, apply_actions, ask_llm, generate_ai_actions, get_app_info, preview_actions, undo_last}; #[cfg_attr(mobile, tauri::mobile_entry_point)] pub fn run() { @@ -26,6 +26,7 @@ pub fn run() { undo_last, get_app_info, ask_llm, + generate_ai_actions, ]) .run(tauri::generate_context!()) .expect("error while running tauri application"); diff --git a/desktop/ui/src/lib/analyze.ts b/desktop/ui/src/lib/analyze.ts index 4392922..31c2d93 100644 --- a/desktop/ui/src/lib/analyze.ts +++ b/desktop/ui/src/lib/analyze.ts @@ -187,3 +187,30 @@ export async function askLlm( }, }); } + +// ---- AI Code Generation ---- + +export interface GenerateActionsResponse { + ok: boolean; + actions: Action[]; + explanation: string; + error?: string | null; +} + +export async function generateAiActions( + settings: LlmSettings, + report: AnalyzeReport, +): Promise { + return invoke('generate_ai_actions', { + request: { + provider: settings.provider, + model: settings.model, + api_key: settings.apiKey || null, + base_url: settings.baseUrl || null, + context: JSON.stringify(report.llm_context), + findings_json: JSON.stringify(report.findings), + project_path: report.path, + max_tokens: 4096, + }, + }); +} diff --git a/desktop/ui/src/pages/Tasks.tsx b/desktop/ui/src/pages/Tasks.tsx index 5d0b077..fbed860 100644 --- a/desktop/ui/src/pages/Tasks.tsx +++ b/desktop/ui/src/pages/Tasks.tsx @@ -19,7 +19,7 @@ import { X, } from 'lucide-react'; import { invoke } from '@tauri-apps/api/core'; -import { analyzeProject, askLlm, type AnalyzeReport, type Action, type ApplyResult, type UndoResult, type PreviewResult, type DiffItem, type LlmSettings, DEFAULT_LLM_SETTINGS } from '../lib/analyze'; +import { analyzeProject, askLlm, generateAiActions, type AnalyzeReport, type Action, type ApplyResult, type UndoResult, type PreviewResult, type DiffItem, type LlmSettings, DEFAULT_LLM_SETTINGS } from '../lib/analyze'; import { animateFadeInUp } from '../lib/anime-utils'; import { useAppStore } from '../store/app-store'; @@ -121,6 +121,73 @@ export function Tasks() { setIsAiAnalyzing(false); }; + const [isGeneratingActions, setIsGeneratingActions] = useState(false); + + const handleAiCodeGen = async (report: AnalyzeReport) => { + const settings = loadLlmSettings(); + if (!settings.apiKey && settings.provider !== 'ollama') { + setMessages((prev) => [ + ...prev, + { role: 'system', text: '⚠️ API-ключ не настроен. Перейдите в Настройки LLM.' }, + ]); + return; + } + + setIsGeneratingActions(true); + setMessages((prev) => [...prev, { role: 'system', text: '🔧 AI генерирует исправления...' }]); + + try { + const resp = await generateAiActions(settings, report); + if (resp.ok && resp.actions.length > 0) { + // Merge AI actions into the report + const updatedReport = { + ...report, + actions: [...(report.actions ?? []), ...resp.actions], + }; + setLastReport(updatedReport); + storeSetLastReport(updatedReport, report.path); + + // Init selection for new actions + const newSelection: Record = { ...selectedActions }; + resp.actions.forEach((a) => { newSelection[a.id] = true; }); + setSelectedActions(newSelection); + + // Update the last assistant message with new report + setMessages((prev) => { + const updated = [...prev]; + // Find the last assistant message with this report and update it + for (let i = updated.length - 1; i >= 0; i--) { + const msg = updated[i]; + if ('report' in msg && msg.report.path === report.path) { + updated[i] = { ...msg, report: updatedReport }; + break; + } + } + return [ + ...updated, + { role: 'assistant', text: `🔧 **AI сгенерировал ${resp.actions.length} исправлений** (${settings.model}):\n\n${resp.explanation}` }, + ]; + }); + } else if (resp.ok && resp.actions.length === 0) { + setMessages((prev) => [ + ...prev, + { role: 'system', text: '✓ AI не нашёл дополнительных исправлений — проект в хорошем состоянии.' }, + ]); + } else { + setMessages((prev) => [ + ...prev, + { role: 'system', text: `❌ Ошибка генерации: ${resp.error}` }, + ]); + } + } catch (e) { + setMessages((prev) => [ + ...prev, + { role: 'system', text: `❌ Ошибка: ${e}` }, + ]); + } + setIsGeneratingActions(false); + }; + useEffect(() => { messagesEndRef.current?.scrollIntoView({ behavior: 'smooth' }); }, [messages]); @@ -605,6 +672,8 @@ export function Tasks() { onUndo={handleUndoLast} onAiAnalysis={handleAiAnalysis} isAiAnalyzing={isAiAnalyzing} + onAiCodeGen={handleAiCodeGen} + isGeneratingActions={isGeneratingActions} /> )} @@ -803,6 +872,8 @@ function ReportBlock({ onUndo: (projectPath: string) => void; onAiAnalysis?: (report: AnalyzeReport) => void; isAiAnalyzing?: boolean; + onAiCodeGen?: (report: AnalyzeReport) => void; + isGeneratingActions?: boolean; }) { if (error) { return
Ошибка: {error}
; @@ -948,6 +1019,17 @@ function ReportBlock({ {isAiAnalyzing ? 'AI анализирует...' : 'AI Анализ'} )} + {isCurrentReport && onAiCodeGen && ( + + )}