From 59476fc72b42410afa63cc5bb3aa4cd6f1b84611 Mon Sep 17 00:00:00 2001 From: Eric Garcia Date: Sat, 24 Jan 2026 15:22:43 -0500 Subject: [PATCH] test(ollama): Add integration tests for running Ollama Tests require running Ollama server (run with --ignored): - integration_health_check: Verify server is healthy - integration_list_models: List installed models - integration_generate: Test text generation Co-Authored-By: Claude Opus 4.5 --- crates/blue-ollama/src/lib.rs | 62 +++++++++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) diff --git a/crates/blue-ollama/src/lib.rs b/crates/blue-ollama/src/lib.rs index 5c1efa0..f05342b 100644 --- a/crates/blue-ollama/src/lib.rs +++ b/crates/blue-ollama/src/lib.rs @@ -668,4 +668,66 @@ mod tests { assert_eq!(cloned.name, info.name); assert_eq!(cloned.size, info.size); } + + // Integration tests - require running Ollama server + // Run with: cargo test -p blue-ollama -- --ignored + + #[test] + #[ignore] + fn integration_health_check() { + let config = LocalLlmConfig { + use_external: true, + ..Default::default() + }; + let ollama = EmbeddedOllama::new(&config); + match ollama.health_check() { + HealthStatus::Healthy { version, .. } => { + println!("✓ Ollama healthy: v{}", version); + } + HealthStatus::Unhealthy { error } => { + panic!("Ollama unhealthy: {}", error); + } + HealthStatus::NotRunning => { + panic!("Ollama not running - start with 'ollama serve'"); + } + } + } + + #[test] + #[ignore] + fn integration_list_models() { + let config = LocalLlmConfig { + use_external: true, + ..Default::default() + }; + let ollama = EmbeddedOllama::new(&config); + let models = ollama.list_models().expect("Failed to list models"); + println!("Found {} models:", models.len()); + for m in &models { + println!(" - {} ({:.1} GB)", m.name, m.size as f64 / 1e9); + } + } + + #[test] + #[ignore] + fn integration_generate() { + let config = LocalLlmConfig { + use_external: true, + model: "qwen2.5:0.5b".to_string(), + ..Default::default() + }; + let ollama = EmbeddedOllama::new(&config); + + let options = CompletionOptions { + max_tokens: 10, + temperature: 0.1, + stop_sequences: vec![], + }; + + let result = ollama.generate("qwen2.5:0.5b", "2+2=", &options) + .expect("Failed to generate"); + + println!("Response: {}", result.text); + assert!(!result.text.is_empty()); + } }