feat(realm): Implement RFC 0001 cross-repo coordination and RFC 0002 Phase 1 MCP tools

RFC 0001 - Cross-Repo Coordination with Realms:
- Daemon architecture with HTTP server on localhost:7865
- SQLite persistence for sessions, realms, notifications
- Realm service with git-based storage and caching
- CLI commands: realm status/sync/check/worktree/pr/admin
- Session coordination for multi-repo work

RFC 0002 Phase 1 - Realm MCP Integration:
- realm_status: Get realm overview (repos, domains, contracts)
- realm_check: Validate contracts/bindings with errors/warnings
- contract_get: Get contract details with bindings
- Context detection from .blue/config.yaml
- 98% expert panel alignment via 12-expert dialogue

Also includes:
- CLI documentation in docs/cli/
- Spike for Forgejo tunnelless access
- 86 tests passing

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
Eric Garcia 2026-01-24 07:14:35 -05:00
parent eef4a8460e
commit daaaea5c82
26 changed files with 6692 additions and 72 deletions

View file

@ -17,6 +17,7 @@ authors = ["Eric Minton Garcia"]
# Serialization
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
serde_yaml = "0.9"
# Async runtime
tokio = { version = "1.0", features = ["full", "io-std"] }
@ -44,6 +45,26 @@ git2 = "0.19"
# Regex
regex = "1.10"
# Semver
semver = { version = "1.0", features = ["serde"] }
# HTTP server (daemon)
axum = "0.8"
tower-http = { version = "0.6", features = ["cors", "trace"] }
# HTTP client (Forgejo API)
reqwest = { version = "0.12", features = ["json"] }
# Directories
dirs = "5.0"
# Testing HTTP services
tower = { version = "0.5", features = ["util"] }
http-body-util = "0.1"
# Testing
tempfile = "3.15"
# Internal
blue-core = { path = "crates/blue-core" }
blue-mcp = { path = "crates/blue-mcp" }

View file

@ -17,3 +17,4 @@ anyhow.workspace = true
tokio.workspace = true
tracing.workspace = true
tracing-subscriber.workspace = true
chrono.workspace = true

View file

@ -4,6 +4,8 @@
use clap::{Parser, Subcommand};
use anyhow::Result;
use blue_core::daemon::{DaemonClient, DaemonDb, DaemonPaths, DaemonState, run_daemon};
use blue_core::realm::RealmService;
#[derive(Parser)]
#[command(name = "blue")]
@ -55,6 +57,219 @@ enum Commands {
/// Run as MCP server
Mcp,
/// Daemon commands
Daemon {
#[command(subcommand)]
command: Option<DaemonCommands>,
},
/// Realm commands (cross-repo coordination)
Realm {
#[command(subcommand)]
command: RealmCommands,
},
/// Session commands (work coordination)
Session {
#[command(subcommand)]
command: SessionCommands,
},
}
#[derive(Subcommand)]
enum DaemonCommands {
/// Start the daemon (foreground)
Start,
/// Check daemon status
Status,
/// Stop the daemon
Stop,
}
#[derive(Subcommand)]
enum RealmCommands {
/// Show realm status
Status,
/// Sync with realm repository
Sync {
/// Force sync even if no changes detected
#[arg(long)]
force: bool,
},
/// Check realm for CI validation
Check {
/// Specific realm to check (default: all)
#[arg(long)]
realm: Option<String>,
/// Exit with error code on warnings
#[arg(long)]
strict: bool,
},
/// Worktree management for multi-repo RFC work
Worktree {
#[command(subcommand)]
command: RealmWorktreeCommands,
},
/// PR workflow for cross-repo changes
Pr {
#[command(subcommand)]
command: RealmPrCommands,
},
/// Realm admin commands
Admin {
#[command(subcommand)]
command: RealmAdminCommands,
},
}
#[derive(Subcommand)]
enum RealmPrCommands {
/// Show PR status for an RFC across repos
Status {
/// RFC name
#[arg(long)]
rfc: String,
},
/// Prepare changes for PR (commit uncommitted changes)
Prepare {
/// RFC name
#[arg(long)]
rfc: String,
/// Commit message
#[arg(long, short)]
message: Option<String>,
},
}
#[derive(Subcommand)]
enum RealmWorktreeCommands {
/// Create worktrees for an RFC across repos
Create {
/// RFC name (becomes branch name)
#[arg(long)]
rfc: String,
/// Specific repos (default: all in realm)
#[arg(long, value_delimiter = ',')]
repos: Option<Vec<String>>,
},
/// List active worktrees
List,
/// Remove worktrees for an RFC
Remove {
/// RFC name
#[arg(long)]
rfc: String,
},
}
#[derive(Subcommand)]
enum RealmAdminCommands {
/// Initialize a new realm
Init {
/// Realm name
#[arg(long)]
name: String,
/// Forgejo URL (optional, uses local git if not provided)
#[arg(long)]
forgejo: Option<String>,
},
/// Join an existing realm
Join {
/// Realm name
name: String,
/// Repo name (defaults to current directory name)
#[arg(long)]
repo: Option<String>,
},
/// Create a domain in a realm
Domain {
/// Realm name
#[arg(long)]
realm: String,
/// Domain name
#[arg(long)]
name: String,
/// Member repos (comma-separated)
#[arg(long, value_delimiter = ',')]
repos: Vec<String>,
},
/// Create a contract in a domain
Contract {
/// Realm name
#[arg(long)]
realm: String,
/// Domain name
#[arg(long)]
domain: String,
/// Contract name
#[arg(long)]
name: String,
/// Owner repo (the repo that can modify this contract)
#[arg(long)]
owner: String,
},
/// Create a binding for a repo in a domain
Binding {
/// Realm name
#[arg(long)]
realm: String,
/// Domain name
#[arg(long)]
domain: String,
/// Repo name
#[arg(long)]
repo: String,
/// Role: provider, consumer, or both
#[arg(long, default_value = "consumer")]
role: String,
},
}
#[derive(Subcommand)]
enum SessionCommands {
/// Start a work session
Start {
/// RFC being worked on
#[arg(long)]
rfc: Option<String>,
},
/// List active sessions
List,
/// Stop current session
Stop,
/// Show session status
Status,
}
#[derive(Subcommand)]
@ -128,6 +343,15 @@ async fn main() -> Result<()> {
Some(Commands::Mcp) => {
blue_mcp::run().await?;
}
Some(Commands::Daemon { command }) => {
handle_daemon_command(command).await?;
}
Some(Commands::Realm { command }) => {
handle_realm_command(command).await?;
}
Some(Commands::Session { command }) => {
handle_session_command(command).await?;
}
Some(Commands::Rfc { command }) => match command {
RfcCommands::Create { title } => {
println!("{}", blue_core::voice::success(
@ -171,3 +395,725 @@ async fn main() -> Result<()> {
Ok(())
}
async fn handle_daemon_command(command: Option<DaemonCommands>) -> Result<()> {
match command {
None | Some(DaemonCommands::Start) => {
// Start daemon in foreground
let paths = DaemonPaths::new()?;
paths.ensure_dirs()?;
let db = DaemonDb::open(&paths.database)?;
let state = DaemonState::new(db, paths);
println!("Starting Blue daemon on localhost:7865...");
run_daemon(state).await?;
}
Some(DaemonCommands::Status) => {
let client = DaemonClient::new();
match client.health().await {
Ok(health) => {
println!("Daemon running. Version: {}", health.version);
// Show active sessions
if let Ok(sessions) = client.list_sessions().await {
if !sessions.is_empty() {
println!("\nActive sessions:");
for session in sessions {
println!(" {} ({}) - {}", session.repo, session.realm, session.id);
}
}
}
// Show tracked realms
if let Ok(realms) = client.list_realms().await {
if !realms.is_empty() {
println!("\nTracked realms:");
for realm in realms {
println!(" {} - {}", realm.name, realm.forgejo_url);
}
}
}
}
Err(_) => {
println!("Daemon not running.");
}
}
}
Some(DaemonCommands::Stop) => {
// TODO: Implement graceful shutdown
println!("Stopping daemon not yet implemented.");
}
}
Ok(())
}
async fn handle_realm_command(command: RealmCommands) -> Result<()> {
let client = DaemonClient::new();
// Ensure daemon is running for all realm commands
client.ensure_running().await?;
match command {
RealmCommands::Status => {
let paths = DaemonPaths::new()?;
let service = RealmService::new(paths.realms.clone());
let realm_names = service.list_realms()?;
if realm_names.is_empty() {
println!("No realms configured. Run 'blue realm admin init' to create one.");
return Ok(());
}
let sessions = client.list_sessions().await.unwrap_or_default();
let notifications = client.list_notifications().await.unwrap_or_default();
for realm_name in &realm_names {
// Load detailed realm info
match service.load_realm_details(realm_name) {
Ok(details) => {
println!("Realm: {}", details.info.name);
println!(" Path: {}", details.info.path.display());
println!(" Version: {}", details.info.config.version);
// Repos
if !details.repos.is_empty() {
println!("\n Repos:");
for repo in &details.repos {
let path_info = repo.path.as_deref().unwrap_or("remote");
println!(" {} ({})", repo.name, path_info);
}
}
// Domains
if !details.domains.is_empty() {
println!("\n Domains:");
for domain_detail in &details.domains {
let d = &domain_detail.domain;
println!(" {} ({} members)", d.name, d.members.len());
// Contracts in domain
for contract in &domain_detail.contracts {
println!(
" Contract: {} v{} (owner: {})",
contract.name, contract.version, contract.owner
);
}
// Bindings in domain
for binding in &domain_detail.bindings {
let exports = binding.exports.len();
let imports = binding.imports.len();
println!(
" Binding: {} ({:?}, {} exports, {} imports)",
binding.repo, binding.role, exports, imports
);
}
}
}
// Sessions in this realm
let realm_sessions: Vec<_> = sessions
.iter()
.filter(|s| s.realm == *realm_name)
.collect();
if !realm_sessions.is_empty() {
println!("\n Active sessions:");
for s in realm_sessions {
let rfc = s.active_rfc.as_deref().unwrap_or("idle");
println!(" {} - {}", s.repo, rfc);
}
}
// Notifications in this realm
let realm_notifs: Vec<_> = notifications
.iter()
.filter(|n| n.realm == *realm_name)
.collect();
if !realm_notifs.is_empty() {
println!("\n Notifications:");
for n in realm_notifs {
println!(
" [{:?}] {} updated {} in {}",
n.change_type, n.from_repo, n.contract, n.domain
);
}
}
}
Err(e) => {
println!("Realm: {} (error: {})", realm_name, e);
}
}
println!();
}
}
RealmCommands::Sync { force } => {
let paths = DaemonPaths::new()?;
let service = RealmService::new(paths.realms.clone());
let realm_names = service.list_realms()?;
if realm_names.is_empty() {
println!("No realms configured.");
return Ok(());
}
for realm_name in &realm_names {
// First show status
match service.realm_sync_status(realm_name) {
Ok(status) if status.has_changes() => {
println!("Realm '{}' has pending changes:", realm_name);
for f in &status.new_files {
println!(" + {}", f);
}
for f in &status.modified_files {
println!(" ~ {}", f);
}
for f in &status.deleted_files {
println!(" - {}", f);
}
}
Ok(_) => {
println!("Realm '{}' is clean.", realm_name);
}
Err(e) => {
println!("Realm '{}': error getting status: {}", realm_name, e);
continue;
}
}
// Sync
println!("Syncing realm '{}'...", realm_name);
match service.sync_realm(realm_name, force) {
Ok(result) => {
println!(" {}", result.message);
if let Some(commit) = result.last_commit {
println!(" Latest: {}", commit);
}
}
Err(e) => {
println!(" Error: {}", e);
}
}
}
}
RealmCommands::Check { realm, strict } => {
let paths = DaemonPaths::new()?;
let service = RealmService::new(paths.realms.clone());
let realm_names = match realm {
Some(name) => vec![name],
None => service.list_realms()?,
};
if realm_names.is_empty() {
println!("No realms configured.");
return Ok(());
}
let mut has_errors = false;
let mut has_warnings = false;
for realm_name in &realm_names {
println!("Checking realm '{}'...", realm_name);
match service.check_realm(realm_name) {
Ok(result) => {
if result.is_ok() && !result.has_warnings() {
println!(" All checks passed.");
}
for warning in &result.warnings {
has_warnings = true;
println!(" WARNING [{}]: {}", warning.domain, warning.message);
}
for error in &result.errors {
has_errors = true;
println!(" ERROR [{}]: {}", error.domain, error.message);
}
}
Err(e) => {
has_errors = true;
println!(" Error checking realm: {}", e);
}
}
}
if has_errors {
std::process::exit(1);
} else if strict && has_warnings {
std::process::exit(1);
}
}
RealmCommands::Worktree { command } => {
handle_worktree_command(command).await?;
}
RealmCommands::Pr { command } => {
handle_realm_pr_command(command).await?;
}
RealmCommands::Admin { command } => {
handle_realm_admin_command(command, &client).await?;
}
}
Ok(())
}
async fn handle_worktree_command(command: RealmWorktreeCommands) -> Result<()> {
use blue_core::realm::LocalRepoConfig;
let paths = DaemonPaths::new()?;
let service = RealmService::new(paths.realms.clone());
match command {
RealmWorktreeCommands::Create { rfc, repos } => {
// Get current directory and check for .blue/config.yaml
let cwd = std::env::current_dir()?;
let config_path = cwd.join(".blue").join("config.yaml");
if !config_path.exists() {
println!("This repo is not part of a realm.");
println!("Run 'blue realm admin join <realm>' first.");
return Ok(());
}
let local_config = LocalRepoConfig::load(&config_path)?;
let realm_name = &local_config.realm.name;
// Get repos to create worktrees for
let details = service.load_realm_details(realm_name)?;
let repo_names: Vec<String> = match repos {
Some(r) => r,
None => details.repos.iter().map(|r| r.name.clone()).collect(),
};
if repo_names.is_empty() {
println!("No repos found in realm '{}'.", realm_name);
return Ok(());
}
println!("Creating worktrees for RFC '{}' in realm '{}'...", rfc, realm_name);
for repo in &details.repos {
if !repo_names.contains(&repo.name) {
continue;
}
let repo_path = match &repo.path {
Some(p) => std::path::PathBuf::from(p),
None => {
println!(" {} - skipped (no local path)", repo.name);
continue;
}
};
match service.create_worktree(realm_name, &repo.name, &rfc, &repo_path) {
Ok(info) => {
if info.already_existed {
println!(" {} - already exists at {}", info.repo, info.path.display());
} else {
println!(" {} - created at {}", info.repo, info.path.display());
}
}
Err(e) => {
println!(" {} - error: {}", repo.name, e);
}
}
}
}
RealmWorktreeCommands::List => {
let realm_names = service.list_realms()?;
if realm_names.is_empty() {
println!("No realms configured.");
return Ok(());
}
let mut found_any = false;
for realm_name in &realm_names {
let worktrees = service.list_worktrees(realm_name)?;
if !worktrees.is_empty() {
found_any = true;
println!("Realm '{}' worktrees:", realm_name);
for wt in worktrees {
println!(" {} ({}) - {}", wt.rfc, wt.repo, wt.path.display());
}
}
}
if !found_any {
println!("No active worktrees.");
}
}
RealmWorktreeCommands::Remove { rfc } => {
// Get current realm from config
let cwd = std::env::current_dir()?;
let config_path = cwd.join(".blue").join("config.yaml");
let realm_name = if config_path.exists() {
let local_config = LocalRepoConfig::load(&config_path)?;
local_config.realm.name
} else {
// Try to find any realm with this RFC worktree
let realm_names = service.list_realms()?;
let mut found_realm = None;
for name in &realm_names {
let worktrees = service.list_worktrees(name)?;
if worktrees.iter().any(|wt| wt.rfc == rfc) {
found_realm = Some(name.clone());
break;
}
}
match found_realm {
Some(name) => name,
None => {
println!("No worktrees found for RFC '{}'.", rfc);
return Ok(());
}
}
};
println!("Removing worktrees for RFC '{}' in realm '{}'...", rfc, realm_name);
match service.remove_worktrees(&realm_name, &rfc) {
Ok(removed) => {
if removed.is_empty() {
println!(" No worktrees found.");
} else {
for repo in removed {
println!(" {} - removed", repo);
}
}
}
Err(e) => {
println!(" Error: {}", e);
}
}
}
}
Ok(())
}
async fn handle_realm_pr_command(command: RealmPrCommands) -> Result<()> {
use blue_core::realm::LocalRepoConfig;
let paths = DaemonPaths::new()?;
let service = RealmService::new(paths.realms.clone());
// Get realm from current directory config or find from worktrees
let get_realm_name = |rfc: &str| -> Result<String> {
let cwd = std::env::current_dir()?;
let config_path = cwd.join(".blue").join("config.yaml");
if config_path.exists() {
let local_config = LocalRepoConfig::load(&config_path)?;
return Ok(local_config.realm.name);
}
// Try to find realm from worktrees
let realm_names = service.list_realms()?;
for name in &realm_names {
let worktrees = service.list_worktrees(name)?;
if worktrees.iter().any(|wt| wt.rfc == rfc) {
return Ok(name.clone());
}
}
anyhow::bail!("No realm found for RFC '{}'", rfc);
};
match command {
RealmPrCommands::Status { rfc } => {
let realm_name = get_realm_name(&rfc)?;
let statuses = service.pr_status(&realm_name, &rfc)?;
if statuses.is_empty() {
println!("No worktrees found for RFC '{}' in realm '{}'.", rfc, realm_name);
println!("Run 'blue realm worktree create --rfc {}' first.", rfc);
return Ok(());
}
println!("PR status for RFC '{}' in realm '{}':\n", rfc, realm_name);
for status in &statuses {
let icon = if status.has_uncommitted { "!" } else { "" };
println!(
"{} {} (branch: {}, {} commits ahead)",
icon, status.repo, status.branch, status.commits_ahead
);
println!(" Path: {}", status.path.display());
if status.has_uncommitted {
println!(" Uncommitted changes:");
for file in &status.modified_files {
println!(" - {}", file);
}
}
}
// Summary
let uncommitted_count = statuses.iter().filter(|s| s.has_uncommitted).count();
let total_commits: usize = statuses.iter().map(|s| s.commits_ahead).sum();
println!("\nSummary:");
println!(" {} repos with worktrees", statuses.len());
println!(" {} repos with uncommitted changes", uncommitted_count);
println!(" {} total commits ahead of main", total_commits);
if uncommitted_count > 0 {
println!("\nRun 'blue realm pr prepare --rfc {}' to commit changes.", rfc);
}
}
RealmPrCommands::Prepare { rfc, message } => {
let realm_name = get_realm_name(&rfc)?;
let msg = message.as_deref();
println!("Preparing PR for RFC '{}' in realm '{}'...\n", rfc, realm_name);
let results = service.pr_prepare(&realm_name, &rfc, msg)?;
if results.is_empty() {
println!("No worktrees found for RFC '{}'.", rfc);
return Ok(());
}
for (repo, committed) in &results {
if *committed {
println!(" {} - changes committed", repo);
} else {
println!(" {} - no changes to commit", repo);
}
}
let committed_count = results.iter().filter(|(_, c)| *c).count();
println!("\n{} repos had changes committed.", committed_count);
}
}
Ok(())
}
async fn handle_realm_admin_command(command: RealmAdminCommands, _client: &DaemonClient) -> Result<()> {
let paths = DaemonPaths::new()?;
paths.ensure_dirs()?;
let service = RealmService::new(paths.realms.clone());
match command {
RealmAdminCommands::Init { name, forgejo } => {
// Create realm locally
let info = service.init_realm(&name)?;
// Register with daemon
let realm = service.to_daemon_realm(&info);
// For now, directly update the daemon's database
// In the future, this would go through the daemon API
let db = DaemonDb::open(&paths.database)?;
db.upsert_realm(&realm)?;
println!("Created realm '{}'", name);
println!(" Path: {}", info.path.display());
if let Some(url) = forgejo {
println!(" Forgejo: {} (push deferred - remote down)", url);
} else {
println!(" Mode: local git");
}
println!("\nNext: Run 'blue realm admin join {}' in your repos.", name);
}
RealmAdminCommands::Join { name, repo } => {
// Get current directory
let cwd = std::env::current_dir()?;
// Determine repo name
let repo_name = repo.unwrap_or_else(|| {
cwd.file_name()
.and_then(|n| n.to_str())
.unwrap_or("unknown")
.to_string()
});
// Join the realm
service.join_realm(&name, &repo_name, &cwd)?;
println!("Joined realm '{}'", name);
println!(" Repo: {}", repo_name);
println!(" Config: {}/.blue/config.yaml", cwd.display());
}
RealmAdminCommands::Domain { realm, name, repos } => {
// Create domain
service.create_domain(&realm, &name, &repos)?;
println!("Created domain '{}' in realm '{}'", name, realm);
println!(" Members: {}", repos.join(", "));
println!("\nNext: Create contracts and bindings for this domain.");
}
RealmAdminCommands::Contract { realm, domain, name, owner } => {
service.create_contract(&realm, &domain, &name, &owner)?;
println!("Created contract '{}' in domain '{}'", name, domain);
println!(" Owner: {}", owner);
println!(" Version: 1.0.0");
println!("\nNext: Create bindings to export/import this contract.");
}
RealmAdminCommands::Binding { realm, domain, repo, role } => {
use blue_core::realm::BindingRole;
let binding_role = match role.to_lowercase().as_str() {
"provider" => BindingRole::Provider,
"consumer" => BindingRole::Consumer,
"both" => BindingRole::Both,
_ => {
println!("Invalid role '{}'. Use: provider, consumer, or both.", role);
return Ok(());
}
};
service.create_binding(&realm, &domain, &repo, binding_role)?;
println!("Created binding for '{}' in domain '{}'", repo, domain);
println!(" Role: {:?}", binding_role);
println!("\nNext: Run 'blue realm check' to validate the configuration.");
}
}
Ok(())
}
async fn handle_session_command(command: SessionCommands) -> Result<()> {
use blue_core::daemon::CreateSessionRequest;
use blue_core::realm::LocalRepoConfig;
let client = DaemonClient::new();
client.ensure_running().await?;
match command {
SessionCommands::Start { rfc } => {
// Get current directory and check for .blue/config.yaml
let cwd = std::env::current_dir()?;
let config_path = cwd.join(".blue").join("config.yaml");
if !config_path.exists() {
println!("This repo is not part of a realm.");
println!("Run 'blue realm admin join <realm>' first.");
return Ok(());
}
// Load local config to get realm and repo info
let local_config = LocalRepoConfig::load(&config_path)?;
// Generate session ID
let session_id = format!(
"{}-{}-{}",
local_config.repo,
std::process::id(),
chrono::Utc::now().timestamp()
);
// Create session
let req = CreateSessionRequest {
id: session_id.clone(),
repo: local_config.repo.clone(),
realm: local_config.realm.name.clone(),
client_id: Some(format!("cli-{}", std::process::id())),
active_rfc: rfc.clone(),
active_domains: Vec::new(),
exports_modified: Vec::new(),
imports_watching: Vec::new(),
};
let session = client.create_session(req).await?;
println!("Session started: {}", session.id);
println!(" Repo: {}", session.repo);
println!(" Realm: {}", session.realm);
if let Some(rfc) = &session.active_rfc {
println!(" RFC: {}", rfc);
}
// Save session ID to .blue/session
let session_file = cwd.join(".blue").join("session");
std::fs::write(&session_file, &session.id)?;
println!("\nSession ID saved to .blue/session");
}
SessionCommands::List => {
let sessions = client.list_sessions().await?;
if sessions.is_empty() {
println!("No active sessions.");
} else {
println!("Active sessions:");
for s in sessions {
let rfc = s.active_rfc.as_deref().unwrap_or("idle");
println!(
" {} ({}/{}) - {}",
s.id, s.realm, s.repo, rfc
);
}
}
}
SessionCommands::Stop => {
// Try to read session ID from .blue/session
let cwd = std::env::current_dir()?;
let session_file = cwd.join(".blue").join("session");
if !session_file.exists() {
println!("No active session in this repo.");
return Ok(());
}
let session_id = std::fs::read_to_string(&session_file)?;
let session_id = session_id.trim();
client.remove_session(session_id).await?;
std::fs::remove_file(&session_file)?;
println!("Session stopped: {}", session_id);
}
SessionCommands::Status => {
// Check for local session
let cwd = std::env::current_dir()?;
let session_file = cwd.join(".blue").join("session");
if session_file.exists() {
let session_id = std::fs::read_to_string(&session_file)?;
let session_id = session_id.trim();
println!("Current session: {}", session_id);
} else {
println!("No active session in this repo.");
}
// List all sessions
let sessions = client.list_sessions().await?;
if !sessions.is_empty() {
println!("\nAll active sessions:");
for s in sessions {
let rfc = s.active_rfc.as_deref().unwrap_or("idle");
println!(" {} ({}/{}) - {}", s.id, s.realm, s.repo, rfc);
}
}
// Check for notifications
let notifications = client.list_notifications().await?;
if !notifications.is_empty() {
println!("\nPending notifications:");
for n in notifications {
println!(
" [{:?}] {} updated {} in {}",
n.change_type, n.from_repo, n.contract, n.domain
);
}
}
}
}
Ok(())
}

View file

@ -11,6 +11,7 @@ test-helpers = []
[dependencies]
serde.workspace = true
serde_json.workspace = true
serde_yaml.workspace = true
thiserror.workspace = true
anyhow.workspace = true
tokio.workspace = true
@ -18,3 +19,13 @@ tracing.workspace = true
rusqlite.workspace = true
chrono.workspace = true
git2.workspace = true
axum.workspace = true
tower-http.workspace = true
reqwest.workspace = true
dirs.workspace = true
semver.workspace = true
[dev-dependencies]
tower.workspace = true
http-body-util.workspace = true
tempfile.workspace = true

View file

@ -0,0 +1,268 @@
//! Daemon client for CLI and GUI
//!
//! Provides a typed interface to the daemon HTTP API with auto-start support.
use reqwest::Client;
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use std::process::{Command, Stdio};
use std::time::Duration;
use thiserror::Error;
use tokio::time::sleep;
use tracing::{debug, info};
use super::db::{Notification, Realm, Session};
use super::DAEMON_PORT;
#[derive(Debug, Error)]
pub enum ClientError {
#[error("Daemon not running and failed to start: {0}")]
DaemonStartFailed(String),
#[error("Daemon not reachable after {0} attempts")]
DaemonUnreachable(u32),
#[error("HTTP error: {0}")]
Http(#[from] reqwest::Error),
#[error("API error: {status} - {message}")]
Api { status: u16, message: String },
}
#[derive(Debug, Deserialize)]
struct ApiError {
error: String,
}
// ─── Request/Response Types ─────────────────────────────────────────────────
#[derive(Debug, Deserialize)]
pub struct HealthResponse {
pub status: String,
pub version: String,
}
#[derive(Debug, Serialize)]
struct SyncRequest {
force: bool,
}
#[derive(Debug, Deserialize)]
pub struct SyncResponse {
pub status: String,
pub message: String,
}
#[derive(Debug, Serialize)]
pub struct CreateSessionRequest {
pub id: String,
pub repo: String,
pub realm: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub client_id: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub active_rfc: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub active_domains: Vec<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub exports_modified: Vec<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub imports_watching: Vec<String>,
}
#[derive(Debug, Serialize)]
struct AckRequest {
repo: String,
}
// ─── Client ─────────────────────────────────────────────────────────────────
/// Client for communicating with the Blue daemon
#[derive(Clone)]
pub struct DaemonClient {
client: Client,
base_url: String,
}
impl DaemonClient {
/// Create a new daemon client
pub fn new() -> Self {
Self {
client: Client::builder()
.timeout(Duration::from_secs(30))
.build()
.expect("Failed to create HTTP client"),
base_url: format!("http://127.0.0.1:{}", DAEMON_PORT),
}
}
/// Ensure daemon is running, starting it if necessary
pub async fn ensure_running(&self) -> Result<(), ClientError> {
// Check if daemon is already running
if self.health().await.is_ok() {
debug!("Daemon already running");
return Ok(());
}
info!("Daemon not running, starting...");
self.start_daemon()?;
// Wait for daemon to become available
let max_attempts = 10;
for attempt in 1..=max_attempts {
sleep(Duration::from_millis(200)).await;
if self.health().await.is_ok() {
info!("Daemon started successfully");
return Ok(());
}
debug!("Waiting for daemon... attempt {}/{}", attempt, max_attempts);
}
Err(ClientError::DaemonUnreachable(max_attempts))
}
/// Start the daemon as a background process
fn start_daemon(&self) -> Result<(), ClientError> {
// Get the path to the blue binary (assumes it's in PATH or same location)
let exe = std::env::current_exe()
.map_err(|e| ClientError::DaemonStartFailed(e.to_string()))?;
// Start daemon in background
let child = Command::new(&exe)
.arg("daemon")
.arg("start")
.stdin(Stdio::null())
.stdout(Stdio::null())
.stderr(Stdio::null())
.spawn()
.map_err(|e| ClientError::DaemonStartFailed(e.to_string()))?;
debug!("Spawned daemon process with PID {}", child.id());
Ok(())
}
// ─── Health ─────────────────────────────────────────────────────────────
/// Check daemon health
pub async fn health(&self) -> Result<HealthResponse, ClientError> {
self.get("/health").await
}
// ─── Realms ─────────────────────────────────────────────────────────────
/// List all tracked realms
pub async fn list_realms(&self) -> Result<Vec<Realm>, ClientError> {
self.get("/realms").await
}
/// Get a specific realm
pub async fn get_realm(&self, name: &str) -> Result<Realm, ClientError> {
self.get(&format!("/realms/{}", name)).await
}
/// Trigger a sync for a realm
pub async fn sync_realm(&self, name: &str, force: bool) -> Result<SyncResponse, ClientError> {
self.post(&format!("/realms/{}/sync", name), &SyncRequest { force })
.await
}
// ─── Sessions ───────────────────────────────────────────────────────────
/// List all active sessions
pub async fn list_sessions(&self) -> Result<Vec<Session>, ClientError> {
self.get("/sessions").await
}
/// Register a new session
pub async fn create_session(&self, req: CreateSessionRequest) -> Result<Session, ClientError> {
self.post("/sessions", &req).await
}
/// Deregister a session
pub async fn remove_session(&self, id: &str) -> Result<(), ClientError> {
self.delete(&format!("/sessions/{}", id)).await
}
// ─── Notifications ──────────────────────────────────────────────────────
/// List pending notifications
pub async fn list_notifications(&self) -> Result<Vec<Notification>, ClientError> {
self.get("/notifications").await
}
/// Acknowledge a notification
pub async fn acknowledge_notification(&self, id: i64, repo: &str) -> Result<(), ClientError> {
self.post(
&format!("/notifications/{}/ack", id),
&AckRequest {
repo: repo.to_string(),
},
)
.await
}
// ─── HTTP Helpers ───────────────────────────────────────────────────────
async fn get<T: DeserializeOwned>(&self, path: &str) -> Result<T, ClientError> {
let url = format!("{}{}", self.base_url, path);
let response = self.client.get(&url).send().await?;
if response.status().is_success() {
Ok(response.json().await?)
} else {
let status = response.status().as_u16();
let error: ApiError = response.json().await.unwrap_or(ApiError {
error: "Unknown error".to_string(),
});
Err(ClientError::Api {
status,
message: error.error,
})
}
}
async fn post<Req: Serialize, Res: DeserializeOwned>(
&self,
path: &str,
body: &Req,
) -> Result<Res, ClientError> {
let url = format!("{}{}", self.base_url, path);
let response = self.client.post(&url).json(body).send().await?;
if response.status().is_success() {
Ok(response.json().await?)
} else {
let status = response.status().as_u16();
let error: ApiError = response.json().await.unwrap_or(ApiError {
error: "Unknown error".to_string(),
});
Err(ClientError::Api {
status,
message: error.error,
})
}
}
async fn delete(&self, path: &str) -> Result<(), ClientError> {
let url = format!("{}{}", self.base_url, path);
let response = self.client.delete(&url).send().await?;
if response.status().is_success() {
Ok(())
} else {
let status = response.status().as_u16();
let error: ApiError = response.json().await.unwrap_or(ApiError {
error: "Unknown error".to_string(),
});
Err(ClientError::Api {
status,
message: error.error,
})
}
}
}
impl Default for DaemonClient {
fn default() -> Self {
Self::new()
}
}

View file

@ -0,0 +1,576 @@
//! Daemon SQLite database
//!
//! Stores realm state, sessions, and notifications in ~/.blue/daemon.db
use chrono::{DateTime, Utc};
use rusqlite::{params, Connection, OptionalExtension};
use serde::{Deserialize, Serialize};
use std::path::Path;
use thiserror::Error;
#[derive(Debug, Error)]
pub enum DaemonDbError {
#[error("Database error: {0}")]
Sqlite(#[from] rusqlite::Error),
#[error("Realm not found: {0}")]
RealmNotFound(String),
#[error("Session not found: {0}")]
SessionNotFound(String),
}
/// A realm tracked by the daemon
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Realm {
pub name: String,
pub forgejo_url: String,
pub local_path: String,
pub last_sync: Option<DateTime<Utc>>,
pub status: RealmStatus,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum RealmStatus {
Active,
Syncing,
Error,
}
impl RealmStatus {
fn as_str(&self) -> &'static str {
match self {
Self::Active => "active",
Self::Syncing => "syncing",
Self::Error => "error",
}
}
fn from_str(s: &str) -> Self {
match s {
"syncing" => Self::Syncing,
"error" => Self::Error,
_ => Self::Active,
}
}
}
/// An active session (CLI or GUI instance)
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Session {
pub id: String,
pub repo: String,
pub realm: String,
pub client_id: Option<String>,
pub started_at: DateTime<Utc>,
pub last_activity: DateTime<Utc>,
pub active_rfc: Option<String>,
pub active_domains: Vec<String>,
pub exports_modified: Vec<String>,
pub imports_watching: Vec<String>,
}
/// A notification for cross-repo coordination
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Notification {
pub id: i64,
pub realm: String,
pub domain: String,
pub contract: String,
pub from_repo: String,
pub change_type: ChangeType,
pub changes: Option<serde_json::Value>,
pub created_at: DateTime<Utc>,
pub acknowledged_by: Vec<String>,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum ChangeType {
Updated,
Breaking,
New,
}
impl ChangeType {
fn as_str(&self) -> &'static str {
match self {
Self::Updated => "updated",
Self::Breaking => "breaking",
Self::New => "new",
}
}
fn from_str(s: &str) -> Self {
match s {
"breaking" => Self::Breaking,
"new" => Self::New,
_ => Self::Updated,
}
}
}
/// Daemon database handle
///
/// Note: rusqlite::Connection is not Sync, so this must be wrapped
/// in a std::sync::Mutex (not tokio::sync::RwLock) for async contexts.
pub struct DaemonDb {
conn: Connection,
}
// Safety: We ensure exclusive access via external synchronization (Mutex)
unsafe impl Send for DaemonDb {}
impl DaemonDb {
/// Open or create the daemon database
pub fn open(path: &Path) -> Result<Self, DaemonDbError> {
let conn = Connection::open(path)?;
let db = Self { conn };
db.init_schema()?;
Ok(db)
}
/// Open an in-memory database (for testing)
#[cfg(test)]
pub fn open_memory() -> Result<Self, DaemonDbError> {
let conn = Connection::open_in_memory()?;
let db = Self { conn };
db.init_schema()?;
Ok(db)
}
/// Initialize the database schema
fn init_schema(&self) -> Result<(), DaemonDbError> {
self.conn.execute_batch(
r#"
CREATE TABLE IF NOT EXISTS realms (
name TEXT PRIMARY KEY,
forgejo_url TEXT NOT NULL,
local_path TEXT NOT NULL,
last_sync TEXT,
status TEXT DEFAULT 'active'
);
CREATE TABLE IF NOT EXISTS sessions (
id TEXT PRIMARY KEY,
repo TEXT NOT NULL,
realm TEXT NOT NULL,
client_id TEXT,
started_at TEXT NOT NULL,
last_activity TEXT NOT NULL,
active_rfc TEXT,
active_domains TEXT DEFAULT '[]',
exports_modified TEXT DEFAULT '[]',
imports_watching TEXT DEFAULT '[]'
);
CREATE TABLE IF NOT EXISTS notifications (
id INTEGER PRIMARY KEY AUTOINCREMENT,
realm TEXT NOT NULL,
domain TEXT NOT NULL,
contract TEXT NOT NULL,
from_repo TEXT NOT NULL,
change_type TEXT NOT NULL,
changes TEXT,
created_at TEXT NOT NULL,
acknowledged_by TEXT DEFAULT '[]'
);
CREATE INDEX IF NOT EXISTS idx_sessions_realm ON sessions(realm);
CREATE INDEX IF NOT EXISTS idx_notifications_realm ON notifications(realm);
CREATE INDEX IF NOT EXISTS idx_notifications_created ON notifications(created_at);
"#,
)?;
Ok(())
}
// ─── Realm Operations ───────────────────────────────────────────────────
/// List all tracked realms
pub fn list_realms(&self) -> Result<Vec<Realm>, DaemonDbError> {
let mut stmt = self.conn.prepare(
"SELECT name, forgejo_url, local_path, last_sync, status FROM realms",
)?;
let realms = stmt
.query_map([], |row| {
Ok(Realm {
name: row.get(0)?,
forgejo_url: row.get(1)?,
local_path: row.get(2)?,
last_sync: row
.get::<_, Option<String>>(3)?
.and_then(|s| DateTime::parse_from_rfc3339(&s).ok())
.map(|dt| dt.with_timezone(&Utc)),
status: RealmStatus::from_str(&row.get::<_, String>(4)?),
})
})?
.collect::<Result<Vec<_>, _>>()?;
Ok(realms)
}
/// Get a specific realm
pub fn get_realm(&self, name: &str) -> Result<Option<Realm>, DaemonDbError> {
let mut stmt = self.conn.prepare(
"SELECT name, forgejo_url, local_path, last_sync, status FROM realms WHERE name = ?",
)?;
let realm = stmt
.query_row([name], |row| {
Ok(Realm {
name: row.get(0)?,
forgejo_url: row.get(1)?,
local_path: row.get(2)?,
last_sync: row
.get::<_, Option<String>>(3)?
.and_then(|s| DateTime::parse_from_rfc3339(&s).ok())
.map(|dt| dt.with_timezone(&Utc)),
status: RealmStatus::from_str(&row.get::<_, String>(4)?),
})
})
.optional()?;
Ok(realm)
}
/// Add or update a realm
pub fn upsert_realm(&self, realm: &Realm) -> Result<(), DaemonDbError> {
self.conn.execute(
r#"
INSERT INTO realms (name, forgejo_url, local_path, last_sync, status)
VALUES (?1, ?2, ?3, ?4, ?5)
ON CONFLICT(name) DO UPDATE SET
forgejo_url = excluded.forgejo_url,
local_path = excluded.local_path,
last_sync = excluded.last_sync,
status = excluded.status
"#,
params![
&realm.name,
&realm.forgejo_url,
&realm.local_path,
realm.last_sync.map(|dt| dt.to_rfc3339()),
realm.status.as_str(),
],
)?;
Ok(())
}
/// Remove a realm
pub fn remove_realm(&self, name: &str) -> Result<(), DaemonDbError> {
self.conn
.execute("DELETE FROM realms WHERE name = ?", [name])?;
Ok(())
}
// ─── Session Operations ─────────────────────────────────────────────────
/// List all active sessions
pub fn list_sessions(&self) -> Result<Vec<Session>, DaemonDbError> {
let mut stmt = self.conn.prepare(
r#"
SELECT id, repo, realm, client_id, started_at, last_activity,
active_rfc, active_domains, exports_modified, imports_watching
FROM sessions
"#,
)?;
let sessions = stmt
.query_map([], |row| Self::row_to_session(row))?
.collect::<Result<Vec<_>, _>>()?;
Ok(sessions)
}
/// List sessions for a specific realm
pub fn list_sessions_for_realm(&self, realm: &str) -> Result<Vec<Session>, DaemonDbError> {
let mut stmt = self.conn.prepare(
r#"
SELECT id, repo, realm, client_id, started_at, last_activity,
active_rfc, active_domains, exports_modified, imports_watching
FROM sessions WHERE realm = ?
"#,
)?;
let sessions = stmt
.query_map([realm], |row| Self::row_to_session(row))?
.collect::<Result<Vec<_>, _>>()?;
Ok(sessions)
}
/// Get a specific session
pub fn get_session(&self, id: &str) -> Result<Option<Session>, DaemonDbError> {
let mut stmt = self.conn.prepare(
r#"
SELECT id, repo, realm, client_id, started_at, last_activity,
active_rfc, active_domains, exports_modified, imports_watching
FROM sessions WHERE id = ?
"#,
)?;
let session = stmt
.query_row([id], |row| Self::row_to_session(row))
.optional()?;
Ok(session)
}
/// Register a new session
pub fn create_session(&self, session: &Session) -> Result<(), DaemonDbError> {
self.conn.execute(
r#"
INSERT INTO sessions (id, repo, realm, client_id, started_at, last_activity,
active_rfc, active_domains, exports_modified, imports_watching)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10)
"#,
params![
&session.id,
&session.repo,
&session.realm,
&session.client_id,
session.started_at.to_rfc3339(),
session.last_activity.to_rfc3339(),
&session.active_rfc,
serde_json::to_string(&session.active_domains).unwrap_or_default(),
serde_json::to_string(&session.exports_modified).unwrap_or_default(),
serde_json::to_string(&session.imports_watching).unwrap_or_default(),
],
)?;
Ok(())
}
/// Update session activity timestamp
pub fn touch_session(&self, id: &str) -> Result<(), DaemonDbError> {
let now = Utc::now().to_rfc3339();
self.conn.execute(
"UPDATE sessions SET last_activity = ? WHERE id = ?",
params![now, id],
)?;
Ok(())
}
/// Remove a session
pub fn remove_session(&self, id: &str) -> Result<(), DaemonDbError> {
self.conn
.execute("DELETE FROM sessions WHERE id = ?", [id])?;
Ok(())
}
fn row_to_session(row: &rusqlite::Row) -> Result<Session, rusqlite::Error> {
Ok(Session {
id: row.get(0)?,
repo: row.get(1)?,
realm: row.get(2)?,
client_id: row.get(3)?,
started_at: DateTime::parse_from_rfc3339(&row.get::<_, String>(4)?)
.map(|dt| dt.with_timezone(&Utc))
.unwrap_or_else(|_| Utc::now()),
last_activity: DateTime::parse_from_rfc3339(&row.get::<_, String>(5)?)
.map(|dt| dt.with_timezone(&Utc))
.unwrap_or_else(|_| Utc::now()),
active_rfc: row.get(6)?,
active_domains: serde_json::from_str(&row.get::<_, String>(7)?).unwrap_or_default(),
exports_modified: serde_json::from_str(&row.get::<_, String>(8)?).unwrap_or_default(),
imports_watching: serde_json::from_str(&row.get::<_, String>(9)?).unwrap_or_default(),
})
}
// ─── Notification Operations ────────────────────────────────────────────
/// List pending notifications (not fully acknowledged)
pub fn list_notifications(&self) -> Result<Vec<Notification>, DaemonDbError> {
let mut stmt = self.conn.prepare(
r#"
SELECT id, realm, domain, contract, from_repo, change_type,
changes, created_at, acknowledged_by
FROM notifications
ORDER BY created_at DESC
"#,
)?;
let notifications = stmt
.query_map([], |row| Self::row_to_notification(row))?
.collect::<Result<Vec<_>, _>>()?;
Ok(notifications)
}
/// List notifications for a specific realm
pub fn list_notifications_for_realm(
&self,
realm: &str,
) -> Result<Vec<Notification>, DaemonDbError> {
let mut stmt = self.conn.prepare(
r#"
SELECT id, realm, domain, contract, from_repo, change_type,
changes, created_at, acknowledged_by
FROM notifications WHERE realm = ?
ORDER BY created_at DESC
"#,
)?;
let notifications = stmt
.query_map([realm], |row| Self::row_to_notification(row))?
.collect::<Result<Vec<_>, _>>()?;
Ok(notifications)
}
/// Create a notification
pub fn create_notification(
&self,
realm: &str,
domain: &str,
contract: &str,
from_repo: &str,
change_type: ChangeType,
changes: Option<serde_json::Value>,
) -> Result<i64, DaemonDbError> {
let now = Utc::now().to_rfc3339();
self.conn.execute(
r#"
INSERT INTO notifications (realm, domain, contract, from_repo, change_type, changes, created_at)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)
"#,
params![
realm,
domain,
contract,
from_repo,
change_type.as_str(),
changes.map(|v| v.to_string()),
now,
],
)?;
Ok(self.conn.last_insert_rowid())
}
/// Acknowledge a notification from a repo
pub fn acknowledge_notification(&self, id: i64, repo: &str) -> Result<(), DaemonDbError> {
// Get current acknowledged_by list
let mut stmt = self
.conn
.prepare("SELECT acknowledged_by FROM notifications WHERE id = ?")?;
let ack_json: String = stmt.query_row([id], |row| row.get(0))?;
let mut ack_list: Vec<String> = serde_json::from_str(&ack_json).unwrap_or_default();
// Add repo if not already acknowledged
if !ack_list.contains(&repo.to_string()) {
ack_list.push(repo.to_string());
self.conn.execute(
"UPDATE notifications SET acknowledged_by = ? WHERE id = ?",
params![serde_json::to_string(&ack_list).unwrap_or_default(), id],
)?;
}
Ok(())
}
fn row_to_notification(row: &rusqlite::Row) -> Result<Notification, rusqlite::Error> {
Ok(Notification {
id: row.get(0)?,
realm: row.get(1)?,
domain: row.get(2)?,
contract: row.get(3)?,
from_repo: row.get(4)?,
change_type: ChangeType::from_str(&row.get::<_, String>(5)?),
changes: row
.get::<_, Option<String>>(6)?
.and_then(|s| serde_json::from_str(&s).ok()),
created_at: DateTime::parse_from_rfc3339(&row.get::<_, String>(7)?)
.map(|dt| dt.with_timezone(&Utc))
.unwrap_or_else(|_| Utc::now()),
acknowledged_by: serde_json::from_str(&row.get::<_, String>(8)?).unwrap_or_default(),
})
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_realm_crud() {
let db = DaemonDb::open_memory().unwrap();
let realm = Realm {
name: "test-realm".to_string(),
forgejo_url: "https://git.example.com/realms/test".to_string(),
local_path: "/home/user/.blue/realms/test-realm".to_string(),
last_sync: None,
status: RealmStatus::Active,
};
db.upsert_realm(&realm).unwrap();
let realms = db.list_realms().unwrap();
assert_eq!(realms.len(), 1);
assert_eq!(realms[0].name, "test-realm");
let fetched = db.get_realm("test-realm").unwrap();
assert!(fetched.is_some());
assert_eq!(fetched.unwrap().forgejo_url, realm.forgejo_url);
db.remove_realm("test-realm").unwrap();
let realms = db.list_realms().unwrap();
assert!(realms.is_empty());
}
#[test]
fn test_session_crud() {
let db = DaemonDb::open_memory().unwrap();
let session = Session {
id: "sess-123".to_string(),
repo: "aperture".to_string(),
realm: "letemcook".to_string(),
client_id: Some("cli-456".to_string()),
started_at: Utc::now(),
last_activity: Utc::now(),
active_rfc: Some("training-metrics".to_string()),
active_domains: vec!["s3-access".to_string()],
exports_modified: vec![],
imports_watching: vec!["s3-permissions".to_string()],
};
db.create_session(&session).unwrap();
let sessions = db.list_sessions().unwrap();
assert_eq!(sessions.len(), 1);
assert_eq!(sessions[0].repo, "aperture");
db.touch_session("sess-123").unwrap();
db.remove_session("sess-123").unwrap();
let sessions = db.list_sessions().unwrap();
assert!(sessions.is_empty());
}
#[test]
fn test_notification_crud() {
let db = DaemonDb::open_memory().unwrap();
let id = db
.create_notification(
"letemcook",
"s3-access",
"s3-permissions",
"aperture",
ChangeType::Updated,
Some(serde_json::json!({"added": ["training-metrics/*"]})),
)
.unwrap();
let notifications = db.list_notifications().unwrap();
assert_eq!(notifications.len(), 1);
assert_eq!(notifications[0].contract, "s3-permissions");
db.acknowledge_notification(id, "fungal").unwrap();
let notifications = db.list_notifications().unwrap();
assert!(notifications[0].acknowledged_by.contains(&"fungal".to_string()));
}
}

View file

@ -0,0 +1,27 @@
//! Blue Daemon - Per-machine service for realm coordination
//!
//! The daemon manages:
//! - Realm state and git operations
//! - Session tracking
//! - Notifications between repos
//!
//! Architecture:
//! - HTTP server on localhost:7865
//! - SQLite database at ~/.blue/daemon.db
//! - Realm repos cloned to ~/.blue/realms/
mod client;
mod db;
mod paths;
mod server;
pub use client::{ClientError, CreateSessionRequest, DaemonClient, HealthResponse, SyncResponse};
pub use db::{DaemonDb, DaemonDbError, Notification, Realm, RealmStatus, Session};
pub use paths::{DaemonPaths, PathError};
pub use server::{run_daemon, DaemonState};
/// Default port for the daemon HTTP server
pub const DAEMON_PORT: u16 = 7865;
/// Daemon version for API compatibility checks
pub const DAEMON_VERSION: &str = env!("CARGO_PKG_VERSION");

View file

@ -0,0 +1,126 @@
//! Daemon filesystem paths
//!
//! Handles platform-specific paths for daemon state.
use std::path::PathBuf;
use thiserror::Error;
#[derive(Debug, Error)]
pub enum PathError {
#[error("Could not determine home directory")]
NoHomeDir,
#[error("Could not determine runtime directory")]
NoRuntimeDir,
#[error("Failed to create directory {path}: {source}")]
CreateDir {
path: PathBuf,
source: std::io::Error,
},
}
/// Paths used by the Blue daemon
#[derive(Debug, Clone)]
pub struct DaemonPaths {
/// Base directory: ~/.blue/
pub base: PathBuf,
/// Database file: ~/.blue/daemon.db
pub database: PathBuf,
/// Realm clones directory: ~/.blue/realms/
pub realms: PathBuf,
/// Runtime directory for PID file
/// macOS: /var/run/blue/ or ~/Library/Caches/blue/
/// Linux: $XDG_RUNTIME_DIR/blue/ or /tmp/blue-{uid}/
pub runtime: PathBuf,
/// PID file path
pub pid_file: PathBuf,
}
impl DaemonPaths {
/// Create paths, ensuring directories exist
pub fn new() -> Result<Self, PathError> {
let home = dirs::home_dir().ok_or(PathError::NoHomeDir)?;
let base = home.join(".blue");
let database = base.join("daemon.db");
let realms = base.join("realms");
// Runtime directory varies by platform
let runtime = Self::runtime_dir()?;
let pid_file = runtime.join("blue.pid");
let paths = Self {
base,
database,
realms,
runtime,
pid_file,
};
Ok(paths)
}
/// Ensure all directories exist
pub fn ensure_dirs(&self) -> Result<(), PathError> {
for dir in [&self.base, &self.realms, &self.runtime] {
if !dir.exists() {
std::fs::create_dir_all(dir).map_err(|e| PathError::CreateDir {
path: dir.clone(),
source: e,
})?;
}
}
Ok(())
}
/// Get the path for a specific realm's clone
pub fn realm_path(&self, realm_name: &str) -> PathBuf {
self.realms.join(realm_name)
}
/// Determine the runtime directory based on platform
fn runtime_dir() -> Result<PathBuf, PathError> {
// Try XDG_RUNTIME_DIR first (Linux)
if let Ok(xdg) = std::env::var("XDG_RUNTIME_DIR") {
return Ok(PathBuf::from(xdg).join("blue"));
}
// macOS: Use ~/Library/Caches/blue for runtime
#[cfg(target_os = "macos")]
{
if let Some(home) = dirs::home_dir() {
return Ok(home.join("Library/Caches/blue"));
}
}
// Fallback: Use cache directory
if let Some(cache) = dirs::cache_dir() {
return Ok(cache.join("blue"));
}
Err(PathError::NoRuntimeDir)
}
}
impl Default for DaemonPaths {
fn default() -> Self {
Self::new().expect("Failed to determine daemon paths")
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_paths_creation() {
let paths = DaemonPaths::new().unwrap();
assert!(paths.base.ends_with(".blue"));
assert!(paths.database.ends_with("daemon.db"));
assert!(paths.realms.ends_with("realms"));
}
}

View file

@ -0,0 +1,298 @@
//! Daemon HTTP server
//!
//! Runs on localhost:7865 and provides the API for CLI and GUI clients.
use axum::{
extract::{Path, State},
http::StatusCode,
response::IntoResponse,
routing::{delete, get, post},
Json, Router,
};
use serde::{Deserialize, Serialize};
use std::sync::{Arc, Mutex};
use tower_http::trace::TraceLayer;
use tracing::{info, warn};
use super::db::{DaemonDb, Notification, Realm, Session};
use super::paths::DaemonPaths;
use super::DAEMON_PORT;
/// Shared daemon state
pub struct DaemonState {
/// Database wrapped in Mutex (rusqlite::Connection is not Sync)
pub db: Mutex<DaemonDb>,
pub paths: DaemonPaths,
}
impl DaemonState {
pub fn new(db: DaemonDb, paths: DaemonPaths) -> Self {
Self {
db: Mutex::new(db),
paths,
}
}
}
type AppState = Arc<DaemonState>;
/// Run the daemon HTTP server
pub async fn run_daemon(state: DaemonState) -> anyhow::Result<()> {
let state = Arc::new(state);
let app = create_router(state);
let addr = format!("127.0.0.1:{}", DAEMON_PORT);
info!("Blue daemon starting on {}", addr);
let listener = tokio::net::TcpListener::bind(&addr).await?;
axum::serve(listener, app).await?;
Ok(())
}
fn create_router(state: AppState) -> Router {
Router::new()
// Health check
.route("/health", get(health))
// Realms
.route("/realms", get(list_realms))
.route("/realms/{name}", get(get_realm))
.route("/realms/{name}/sync", post(sync_realm))
// Sessions
.route("/sessions", get(list_sessions).post(create_session))
.route("/sessions/{id}", delete(remove_session))
// Notifications
.route("/notifications", get(list_notifications))
.route("/notifications/{id}/ack", post(acknowledge_notification))
.layer(TraceLayer::new_for_http())
.with_state(state)
}
// ─── Health ─────────────────────────────────────────────────────────────────
#[derive(Serialize)]
struct HealthResponse {
status: &'static str,
version: &'static str,
}
async fn health() -> Json<HealthResponse> {
Json(HealthResponse {
status: "ok",
version: super::DAEMON_VERSION,
})
}
// ─── Realms ─────────────────────────────────────────────────────────────────
async fn list_realms(State(state): State<AppState>) -> Result<Json<Vec<Realm>>, AppError> {
let db = state.db.lock().map_err(|_| AppError::LockPoisoned)?;
let realms = db.list_realms()?;
Ok(Json(realms))
}
async fn get_realm(
State(state): State<AppState>,
Path(name): Path<String>,
) -> Result<Json<Realm>, AppError> {
let db = state.db.lock().map_err(|_| AppError::LockPoisoned)?;
let realm = db.get_realm(&name)?.ok_or(AppError::NotFound)?;
Ok(Json(realm))
}
#[derive(Deserialize, Default)]
struct SyncRealmRequest {
#[serde(default)]
force: bool,
}
#[derive(Serialize)]
struct SyncRealmResponse {
status: &'static str,
message: String,
}
async fn sync_realm(
State(state): State<AppState>,
Path(name): Path<String>,
body: Option<Json<SyncRealmRequest>>,
) -> Result<Json<SyncRealmResponse>, AppError> {
let req = body.map(|b| b.0).unwrap_or_default();
let realm = {
let db = state.db.lock().map_err(|_| AppError::LockPoisoned)?;
db.get_realm(&name)?.ok_or(AppError::NotFound)?
};
// TODO: Implement actual git sync via git2
info!(
realm = %name,
force = req.force,
"Sync requested for realm"
);
Ok(Json(SyncRealmResponse {
status: "ok",
message: format!("Sync initiated for realm '{}'", realm.name),
}))
}
// ─── Sessions ───────────────────────────────────────────────────────────────
async fn list_sessions(State(state): State<AppState>) -> Result<Json<Vec<Session>>, AppError> {
let db = state.db.lock().map_err(|_| AppError::LockPoisoned)?;
let sessions = db.list_sessions()?;
Ok(Json(sessions))
}
#[derive(Deserialize)]
struct CreateSessionRequest {
id: String,
repo: String,
realm: String,
client_id: Option<String>,
active_rfc: Option<String>,
#[serde(default)]
active_domains: Vec<String>,
#[serde(default)]
exports_modified: Vec<String>,
#[serde(default)]
imports_watching: Vec<String>,
}
async fn create_session(
State(state): State<AppState>,
Json(req): Json<CreateSessionRequest>,
) -> Result<(StatusCode, Json<Session>), AppError> {
let now = chrono::Utc::now();
let session = Session {
id: req.id,
repo: req.repo,
realm: req.realm,
client_id: req.client_id,
started_at: now,
last_activity: now,
active_rfc: req.active_rfc,
active_domains: req.active_domains,
exports_modified: req.exports_modified,
imports_watching: req.imports_watching,
};
{
let db = state.db.lock().map_err(|_| AppError::LockPoisoned)?;
db.create_session(&session)?;
}
info!(session_id = %session.id, repo = %session.repo, "Session registered");
Ok((StatusCode::CREATED, Json(session)))
}
async fn remove_session(
State(state): State<AppState>,
Path(id): Path<String>,
) -> Result<StatusCode, AppError> {
let db = state.db.lock().map_err(|_| AppError::LockPoisoned)?;
db.remove_session(&id)?;
info!(session_id = %id, "Session deregistered");
Ok(StatusCode::NO_CONTENT)
}
// ─── Notifications ──────────────────────────────────────────────────────────
async fn list_notifications(
State(state): State<AppState>,
) -> Result<Json<Vec<Notification>>, AppError> {
let db = state.db.lock().map_err(|_| AppError::LockPoisoned)?;
let notifications = db.list_notifications()?;
Ok(Json(notifications))
}
#[derive(Deserialize)]
struct AcknowledgeRequest {
repo: String,
}
async fn acknowledge_notification(
State(state): State<AppState>,
Path(id): Path<i64>,
Json(req): Json<AcknowledgeRequest>,
) -> Result<StatusCode, AppError> {
let db = state.db.lock().map_err(|_| AppError::LockPoisoned)?;
db.acknowledge_notification(id, &req.repo)?;
info!(notification_id = id, repo = %req.repo, "Notification acknowledged");
Ok(StatusCode::OK)
}
// ─── Error Handling ─────────────────────────────────────────────────────────
#[derive(Debug)]
enum AppError {
NotFound,
Database(super::db::DaemonDbError),
LockPoisoned,
}
impl From<super::db::DaemonDbError> for AppError {
fn from(err: super::db::DaemonDbError) -> Self {
AppError::Database(err)
}
}
impl IntoResponse for AppError {
fn into_response(self) -> axum::response::Response {
#[derive(Serialize)]
struct ErrorResponse {
error: String,
}
let (status, message) = match self {
AppError::NotFound => (StatusCode::NOT_FOUND, "Not found".to_string()),
AppError::Database(err) => {
warn!("Database error: {}", err);
(StatusCode::INTERNAL_SERVER_ERROR, err.to_string())
}
AppError::LockPoisoned => {
warn!("Lock poisoned");
(StatusCode::INTERNAL_SERVER_ERROR, "Internal server error".to_string())
}
};
(status, Json(ErrorResponse { error: message })).into_response()
}
}
#[cfg(test)]
mod tests {
use super::*;
use axum::body::Body;
use axum::http::Request;
use tower::ServiceExt;
fn test_state() -> AppState {
let db = DaemonDb::open_memory().unwrap();
let paths = DaemonPaths::new().unwrap();
Arc::new(DaemonState::new(db, paths))
}
#[tokio::test]
async fn test_health() {
let app = create_router(test_state());
let response = app
.oneshot(Request::builder().uri("/health").body(Body::empty()).unwrap())
.await
.unwrap();
assert_eq!(response.status(), StatusCode::OK);
}
#[tokio::test]
async fn test_list_realms_empty() {
let app = create_router(test_state());
let response = app
.oneshot(Request::builder().uri("/realms").body(Body::empty()).unwrap())
.await
.unwrap();
assert_eq!(response.status(), StatusCode::OK);
}
}

View file

@ -8,11 +8,14 @@
//! - Git worktree operations
//! - Project state management
//! - Blue's voice and tone
//! - Daemon for cross-repo coordination
// Blue's true name, between friends
const _BLUE_SECRET_NAME: &str = "Sheepey"; // pronounced "Shee-paay"
pub mod daemon;
pub mod documents;
pub mod realm;
pub mod repo;
pub mod state;
pub mod store;

View file

@ -0,0 +1,246 @@
//! Realm configuration (realm.yaml)
//!
//! Defines the top-level realm configuration including governance and trust settings.
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::path::Path;
use super::RealmError;
/// Top-level realm configuration stored in realm.yaml
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RealmConfig {
/// Realm name (unique identifier)
pub name: String,
/// Realm version (semver)
pub version: String,
/// When the realm was created
#[serde(default = "Utc::now")]
pub created_at: DateTime<Utc>,
/// Governance settings
#[serde(default)]
pub governance: Governance,
/// Trust settings
#[serde(default)]
pub trust: TrustConfig,
}
impl RealmConfig {
/// Create a new realm with defaults
pub fn new(name: impl Into<String>) -> Self {
Self {
name: name.into(),
version: "1.0.0".to_string(),
created_at: Utc::now(),
governance: Governance::default(),
trust: TrustConfig::default(),
}
}
/// Load from a YAML file
pub fn load(path: &Path) -> Result<Self, RealmError> {
let content = std::fs::read_to_string(path).map_err(|e| RealmError::ReadFile {
path: path.display().to_string(),
source: e,
})?;
let config: Self = serde_yaml::from_str(&content)?;
Ok(config)
}
/// Save to a YAML file
pub fn save(&self, path: &Path) -> Result<(), RealmError> {
let content = serde_yaml::to_string(self)?;
std::fs::write(path, content).map_err(|e| RealmError::WriteFile {
path: path.display().to_string(),
source: e,
})?;
Ok(())
}
/// Validate the configuration
pub fn validate(&self) -> Result<(), RealmError> {
// Validate version is valid semver
semver::Version::parse(&self.version)?;
Ok(())
}
}
/// Governance settings for the realm
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Governance {
/// How new repos can join
#[serde(default)]
pub admission: AdmissionPolicy,
/// Who can approve new repos (email addresses)
#[serde(default)]
pub approvers: Vec<String>,
/// Policy for breaking changes
#[serde(default)]
pub breaking_changes: BreakingChangePolicy,
}
impl Default for Governance {
fn default() -> Self {
Self {
admission: AdmissionPolicy::Approval,
approvers: Vec::new(),
breaking_changes: BreakingChangePolicy::default(),
}
}
}
/// How new repos can join the realm
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Default)]
#[serde(rename_all = "kebab-case")]
pub enum AdmissionPolicy {
/// Anyone can join
Open,
/// Requires approval from an approver
#[default]
Approval,
/// Only explicitly invited repos can join
InviteOnly,
}
/// Policy for breaking changes to contracts
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BreakingChangePolicy {
/// Whether breaking changes require approval
#[serde(default = "default_true")]
pub require_approval: bool,
/// Grace period in days before breaking changes take effect
#[serde(default = "default_grace_period")]
pub grace_period_days: u32,
}
impl Default for BreakingChangePolicy {
fn default() -> Self {
Self {
require_approval: true,
grace_period_days: 14,
}
}
}
fn default_true() -> bool {
true
}
fn default_grace_period() -> u32 {
14
}
/// Trust configuration for the realm
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TrustConfig {
/// Trust model
#[serde(default)]
pub mode: TrustMode,
/// Whether commits must be signed
#[serde(default)]
pub require_signed_commits: bool,
/// File path patterns to permission groups
/// e.g., "repos/{repo}.yaml" -> ["repo_maintainers"]
#[serde(default)]
pub permissions: HashMap<String, Vec<String>>,
}
impl Default for TrustConfig {
fn default() -> Self {
let mut permissions = HashMap::new();
permissions.insert(
"repos/{repo}.yaml".to_string(),
vec!["repo_maintainers".to_string()],
);
permissions.insert(
"domains/{domain}/domain.yaml".to_string(),
vec!["domain_owners".to_string()],
);
permissions.insert(
"domains/{domain}/contracts/{name}.yaml".to_string(),
vec!["contract_owner".to_string()],
);
permissions.insert(
"domains/{domain}/bindings/{repo}.yaml".to_string(),
vec!["repo_maintainers".to_string()],
);
Self {
mode: TrustMode::Collaborative,
require_signed_commits: false,
permissions,
}
}
}
/// Trust model for the realm
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Default)]
#[serde(rename_all = "kebab-case")]
pub enum TrustMode {
/// All participants are equal peers
#[default]
Collaborative,
/// One party provides, others consume
VendorCustomer,
/// Loose coordination between independent parties
Federation,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_realm_config_new() {
let config = RealmConfig::new("test-realm");
assert_eq!(config.name, "test-realm");
assert_eq!(config.version, "1.0.0");
assert!(config.validate().is_ok());
}
#[test]
fn test_realm_config_yaml_roundtrip() {
let config = RealmConfig::new("letemcook");
let yaml = serde_yaml::to_string(&config).unwrap();
let parsed: RealmConfig = serde_yaml::from_str(&yaml).unwrap();
assert_eq!(parsed.name, config.name);
}
#[test]
fn test_governance_defaults() {
let gov = Governance::default();
assert_eq!(gov.admission, AdmissionPolicy::Approval);
assert!(gov.breaking_changes.require_approval);
assert_eq!(gov.breaking_changes.grace_period_days, 14);
}
#[test]
fn test_trust_config_defaults() {
let trust = TrustConfig::default();
assert_eq!(trust.mode, TrustMode::Collaborative);
assert!(!trust.require_signed_commits);
assert!(trust.permissions.contains_key("repos/{repo}.yaml"));
}
#[test]
fn test_admission_policy_serde() {
let yaml = "invite-only";
let policy: AdmissionPolicy = serde_yaml::from_str(yaml).unwrap();
assert_eq!(policy, AdmissionPolicy::InviteOnly);
}
}

View file

@ -0,0 +1,241 @@
//! Contract definitions for cross-repo coordination
//!
//! Contracts define the schema and values shared between repos.
use serde::{Deserialize, Serialize};
use std::path::Path;
use super::RealmError;
/// A contract defining shared data between repos
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Contract {
/// Contract name (unique within domain)
pub name: String,
/// Contract version (semver)
pub version: String,
/// Which repo owns this contract (only owner can modify)
pub owner: String,
/// Compatibility settings
#[serde(default)]
pub compatibility: Compatibility,
/// JSON Schema for the contract value
pub schema: serde_json::Value,
/// The actual contract value
pub value: ContractValue,
/// Validation configuration
#[serde(default)]
pub validation: Option<ValidationConfig>,
/// Evolution history
#[serde(default)]
pub evolution: Vec<EvolutionEntry>,
}
impl Contract {
/// Create a new contract
pub fn new(name: impl Into<String>, owner: impl Into<String>) -> Self {
Self {
name: name.into(),
version: "1.0.0".to_string(),
owner: owner.into(),
compatibility: Compatibility::default(),
schema: serde_json::json!({
"type": "object"
}),
value: ContractValue::default(),
validation: None,
evolution: vec![EvolutionEntry {
version: "1.0.0".to_string(),
changes: vec!["Initial version".to_string()],
compatible: true,
}],
}
}
/// Load from a YAML file
pub fn load(path: &Path) -> Result<Self, RealmError> {
let content = std::fs::read_to_string(path).map_err(|e| RealmError::ReadFile {
path: path.display().to_string(),
source: e,
})?;
let contract: Self = serde_yaml::from_str(&content)?;
Ok(contract)
}
/// Save to a YAML file
pub fn save(&self, path: &Path) -> Result<(), RealmError> {
let content = serde_yaml::to_string(self)?;
std::fs::write(path, content).map_err(|e| RealmError::WriteFile {
path: path.display().to_string(),
source: e,
})?;
Ok(())
}
/// Validate the contract
pub fn validate(&self) -> Result<(), RealmError> {
// Validate version is valid semver
semver::Version::parse(&self.version)?;
// Validate all evolution entries have valid versions
for entry in &self.evolution {
semver::Version::parse(&entry.version)?;
}
Ok(())
}
/// Check if this contract can be modified by a given repo
pub fn can_modify(&self, repo: &str) -> bool {
self.owner == repo
}
/// Check if a version upgrade is compatible
pub fn is_compatible_upgrade(&self, new_version: &str) -> Result<bool, RealmError> {
let current = semver::Version::parse(&self.version)?;
let new = semver::Version::parse(new_version)?;
// Major version bump = breaking change
if new.major > current.major {
return Ok(false);
}
// Same major, any minor/patch = compatible if backwards compatible
Ok(self.compatibility.backwards)
}
}
/// The actual value of a contract
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct ContractValue {
/// Read paths (for S3-style contracts)
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub read: Vec<String>,
/// Write paths
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub write: Vec<String>,
/// Delete paths
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub delete: Vec<String>,
/// Additional fields (for flexibility)
#[serde(flatten)]
pub extra: serde_json::Map<String, serde_json::Value>,
}
/// Compatibility settings for a contract
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Compatibility {
/// New version readable by old importers
#[serde(default = "default_true")]
pub backwards: bool,
/// Old version readable by new importers
#[serde(default)]
pub forwards: bool,
}
impl Default for Compatibility {
fn default() -> Self {
Self {
backwards: true,
forwards: false,
}
}
}
fn default_true() -> bool {
true
}
/// Validation configuration for a contract
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ValidationConfig {
/// Script to run on export (validates exporter's code matches contract)
#[serde(default, skip_serializing_if = "Option::is_none")]
pub exporter: Option<String>,
/// Script to run on import (validates importer's bindings)
#[serde(default, skip_serializing_if = "Option::is_none")]
pub importer: Option<String>,
/// Scripts that only run in CI
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub ci_only: Vec<String>,
}
/// An entry in the contract evolution history
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct EvolutionEntry {
/// Version this entry describes
pub version: String,
/// What changed in this version
pub changes: Vec<String>,
/// Whether this version is compatible with the previous
#[serde(default = "default_true")]
pub compatible: bool,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_contract_new() {
let contract = Contract::new("s3-permissions", "aperture");
assert_eq!(contract.name, "s3-permissions");
assert_eq!(contract.owner, "aperture");
assert_eq!(contract.version, "1.0.0");
assert!(contract.can_modify("aperture"));
assert!(!contract.can_modify("fungal"));
}
#[test]
fn test_contract_yaml_roundtrip() {
let mut contract = Contract::new("s3-permissions", "aperture");
contract.value.read = vec!["jobs/*/masks/*".to_string()];
contract.value.write = vec!["jobs/*/*/manifest.json".to_string()];
let yaml = serde_yaml::to_string(&contract).unwrap();
let parsed: Contract = serde_yaml::from_str(&yaml).unwrap();
assert_eq!(parsed.name, contract.name);
assert_eq!(parsed.value.read, contract.value.read);
}
#[test]
fn test_compatibility_check() {
let contract = Contract::new("test", "owner");
// Minor bump should be compatible
assert!(contract.is_compatible_upgrade("1.1.0").unwrap());
// Major bump should be incompatible
assert!(!contract.is_compatible_upgrade("2.0.0").unwrap());
}
#[test]
fn test_contract_value_extra_fields() {
let yaml = r#"
read:
- "path/a"
write:
- "path/b"
custom_field: "custom_value"
"#;
let value: ContractValue = serde_yaml::from_str(yaml).unwrap();
assert_eq!(value.read, vec!["path/a"]);
assert!(value.extra.contains_key("custom_field"));
}
}

View file

@ -0,0 +1,342 @@
//! Domain definitions for cross-repo coordination
//!
//! A domain is the coordination context between repos - the "edge" connecting nodes.
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use std::path::Path;
use super::RealmError;
/// A domain is a coordination context between repos
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Domain {
/// Domain name (unique within realm)
pub name: String,
/// Human-readable description
#[serde(default)]
pub description: String,
/// When the domain was created
#[serde(default = "Utc::now")]
pub created_at: DateTime<Utc>,
/// Member repos in this domain
#[serde(default)]
pub members: Vec<String>,
}
impl Domain {
/// Create a new domain
pub fn new(name: impl Into<String>) -> Self {
Self {
name: name.into(),
description: String::new(),
created_at: Utc::now(),
members: Vec::new(),
}
}
/// Add a member repo
pub fn add_member(&mut self, repo: impl Into<String>) {
let repo = repo.into();
if !self.members.contains(&repo) {
self.members.push(repo);
}
}
/// Check if a repo is a member
pub fn has_member(&self, repo: &str) -> bool {
self.members.iter().any(|m| m == repo)
}
/// Load from a YAML file
pub fn load(path: &Path) -> Result<Self, RealmError> {
let content = std::fs::read_to_string(path).map_err(|e| RealmError::ReadFile {
path: path.display().to_string(),
source: e,
})?;
let domain: Self = serde_yaml::from_str(&content)?;
Ok(domain)
}
/// Save to a YAML file
pub fn save(&self, path: &Path) -> Result<(), RealmError> {
let content = serde_yaml::to_string(self)?;
std::fs::write(path, content).map_err(|e| RealmError::WriteFile {
path: path.display().to_string(),
source: e,
})?;
Ok(())
}
}
/// A binding declares what a repo exports or imports in a domain
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Binding {
/// Which repo this binding is for
pub repo: String,
/// Role in the domain
pub role: BindingRole,
/// What this repo exports
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub exports: Vec<ExportBinding>,
/// What this repo imports
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub imports: Vec<ImportBinding>,
}
impl Binding {
/// Create a new provider binding
pub fn provider(repo: impl Into<String>) -> Self {
Self {
repo: repo.into(),
role: BindingRole::Provider,
exports: Vec::new(),
imports: Vec::new(),
}
}
/// Create a new consumer binding
pub fn consumer(repo: impl Into<String>) -> Self {
Self {
repo: repo.into(),
role: BindingRole::Consumer,
exports: Vec::new(),
imports: Vec::new(),
}
}
/// Add an export
pub fn add_export(&mut self, export: ExportBinding) {
self.exports.push(export);
}
/// Add an import
pub fn add_import(&mut self, import: ImportBinding) {
self.imports.push(import);
}
/// Load from a YAML file
pub fn load(path: &Path) -> Result<Self, RealmError> {
let content = std::fs::read_to_string(path).map_err(|e| RealmError::ReadFile {
path: path.display().to_string(),
source: e,
})?;
let binding: Self = serde_yaml::from_str(&content)?;
Ok(binding)
}
/// Save to a YAML file
pub fn save(&self, path: &Path) -> Result<(), RealmError> {
let content = serde_yaml::to_string(self)?;
std::fs::write(path, content).map_err(|e| RealmError::WriteFile {
path: path.display().to_string(),
source: e,
})?;
Ok(())
}
}
/// Role of a repo in a domain
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum BindingRole {
/// Provides/exports data
Provider,
/// Consumes/imports data
Consumer,
/// Both provides and consumes
Both,
}
/// An export declaration
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ExportBinding {
/// Which contract this exports
pub contract: String,
/// Source files that define the exported values
#[serde(default)]
pub source_files: Vec<String>,
}
impl ExportBinding {
/// Create a new export binding
pub fn new(contract: impl Into<String>) -> Self {
Self {
contract: contract.into(),
source_files: Vec::new(),
}
}
/// Add a source file
pub fn with_source(mut self, path: impl Into<String>) -> Self {
self.source_files.push(path.into());
self
}
}
/// An import declaration
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ImportBinding {
/// Which contract this imports
pub contract: String,
/// Semver version requirement
#[serde(default = "default_version_req")]
pub version: String,
/// File that binds to this contract
#[serde(default)]
pub binding: String,
/// Current status of this import
#[serde(default)]
pub status: ImportStatus,
/// Actually resolved version
#[serde(default, skip_serializing_if = "Option::is_none")]
pub resolved_version: Option<String>,
/// When the version was resolved
#[serde(default, skip_serializing_if = "Option::is_none")]
pub resolved_at: Option<DateTime<Utc>>,
}
fn default_version_req() -> String {
">=1.0.0".to_string()
}
impl ImportBinding {
/// Create a new import binding
pub fn new(contract: impl Into<String>) -> Self {
Self {
contract: contract.into(),
version: default_version_req(),
binding: String::new(),
status: ImportStatus::Pending,
resolved_version: None,
resolved_at: None,
}
}
/// Set the version requirement
pub fn with_version(mut self, version: impl Into<String>) -> Self {
self.version = version.into();
self
}
/// Set the binding file
pub fn with_binding(mut self, binding: impl Into<String>) -> Self {
self.binding = binding.into();
self
}
/// Resolve to a specific version
pub fn resolve(&mut self, version: impl Into<String>) {
self.resolved_version = Some(version.into());
self.resolved_at = Some(Utc::now());
self.status = ImportStatus::Current;
}
/// Check if this import satisfies a given version
pub fn satisfies(&self, version: &str) -> Result<bool, RealmError> {
let req = semver::VersionReq::parse(&self.version)
.map_err(|e| RealmError::InvalidVersion(e))?;
let ver = semver::Version::parse(version)?;
Ok(req.matches(&ver))
}
}
/// Status of an import binding
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Default)]
#[serde(rename_all = "lowercase")]
pub enum ImportStatus {
/// Not yet resolved
#[default]
Pending,
/// Resolved and up to date
Current,
/// A newer version is available
Outdated,
/// The imported contract was removed
Broken,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_domain_new() {
let mut domain = Domain::new("s3-access");
domain.add_member("aperture");
domain.add_member("fungal");
assert_eq!(domain.name, "s3-access");
assert!(domain.has_member("aperture"));
assert!(domain.has_member("fungal"));
assert!(!domain.has_member("ml-infra"));
}
#[test]
fn test_binding_provider() {
let mut binding = Binding::provider("aperture");
binding.add_export(
ExportBinding::new("s3-permissions")
.with_source("models/training/s3_paths.py"),
);
assert_eq!(binding.role, BindingRole::Provider);
assert_eq!(binding.exports.len(), 1);
assert_eq!(binding.exports[0].contract, "s3-permissions");
}
#[test]
fn test_binding_consumer() {
let mut binding = Binding::consumer("fungal");
binding.add_import(
ImportBinding::new("s3-permissions")
.with_version(">=1.0.0, <2.0.0")
.with_binding("cdk/training_tools_access_stack.py"),
);
assert_eq!(binding.role, BindingRole::Consumer);
assert_eq!(binding.imports.len(), 1);
assert_eq!(binding.imports[0].version, ">=1.0.0, <2.0.0");
}
#[test]
fn test_import_satisfies() {
// semver uses comma to separate version requirements
let import = ImportBinding::new("test")
.with_version(">=1.0.0, <2.0.0");
assert!(import.satisfies("1.0.0").unwrap());
assert!(import.satisfies("1.5.0").unwrap());
assert!(!import.satisfies("2.0.0").unwrap());
assert!(!import.satisfies("0.9.0").unwrap());
}
#[test]
fn test_binding_yaml_roundtrip() {
let mut binding = Binding::provider("aperture");
binding.add_export(ExportBinding::new("s3-permissions"));
let yaml = serde_yaml::to_string(&binding).unwrap();
let parsed: Binding = serde_yaml::from_str(&yaml).unwrap();
assert_eq!(parsed.repo, binding.repo);
assert_eq!(parsed.exports.len(), 1);
}
}

View file

@ -0,0 +1,76 @@
//! Realm data model for cross-repo coordination
//!
//! This module defines the data structures for:
//! - Realms (groups of coordinated repos)
//! - Domains (coordination contexts between repos)
//! - Contracts (schemas defining shared data)
//! - Bindings (export/import declarations)
//!
//! See RFC 0001: Cross-Repo Coordination with Realms
mod config;
mod contract;
mod domain;
mod repo;
mod service;
pub use config::{
AdmissionPolicy, BreakingChangePolicy, Governance, RealmConfig, TrustConfig, TrustMode,
};
pub use contract::{
Compatibility, Contract, ContractValue, EvolutionEntry, ValidationConfig,
};
pub use domain::{Binding, BindingRole, Domain, ExportBinding, ImportBinding, ImportStatus};
pub use repo::{LocalRepoConfig, RealmRef, RepoConfig};
pub use service::{
CheckIssue, CheckIssueKind, CheckResult, DomainDetails, RealmDetails, RealmInfo,
RealmService, RealmSyncStatus, SyncResult, WorktreeInfo, WorktreePrStatus,
};
use thiserror::Error;
#[derive(Debug, Error)]
pub enum RealmError {
#[error("Failed to read file {path}: {source}")]
ReadFile {
path: String,
source: std::io::Error,
},
#[error("Failed to write file {path}: {source}")]
WriteFile {
path: String,
source: std::io::Error,
},
#[error("Failed to parse YAML: {0}")]
YamlParse(#[from] serde_yaml::Error),
#[error("Failed to parse JSON: {0}")]
JsonParse(#[from] serde_json::Error),
#[error("Invalid semver version: {0}")]
InvalidVersion(#[from] semver::Error),
#[error("Contract not found: {0}")]
ContractNotFound(String),
#[error("Domain not found: {0}")]
DomainNotFound(String),
#[error("Repo not found: {0}")]
RepoNotFound(String),
#[error("Validation failed: {0}")]
ValidationFailed(String),
#[error("Ownership violation: {contract} is owned by {owner}, not {attempted}")]
OwnershipViolation {
contract: String,
owner: String,
attempted: String,
},
#[error("Cycle detected: {0}")]
CycleDetected(String),
}

View file

@ -0,0 +1,209 @@
//! Repo registration for realms
//!
//! Defines how repos are registered in a realm.
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use std::path::Path;
use super::RealmError;
/// Configuration for a repo registered in a realm
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RepoConfig {
/// Repo name (unique within realm)
pub name: String,
/// Optional organization prefix
#[serde(default, skip_serializing_if = "Option::is_none")]
pub org: Option<String>,
/// Local filesystem path (for development)
#[serde(default, skip_serializing_if = "Option::is_none")]
pub path: Option<String>,
/// Remote URL (for cloning)
#[serde(default, skip_serializing_if = "Option::is_none")]
pub url: Option<String>,
/// Maintainers (email addresses)
#[serde(default)]
pub maintainers: Vec<String>,
/// When the repo joined the realm
#[serde(default = "Utc::now")]
pub joined_at: DateTime<Utc>,
}
impl RepoConfig {
/// Create a new repo config with a local path
pub fn local(name: impl Into<String>, path: impl Into<String>) -> Self {
Self {
name: name.into(),
org: None,
path: Some(path.into()),
url: None,
maintainers: Vec::new(),
joined_at: Utc::now(),
}
}
/// Create a new repo config with a remote URL
pub fn remote(name: impl Into<String>, url: impl Into<String>) -> Self {
Self {
name: name.into(),
org: None,
path: None,
url: Some(url.into()),
maintainers: Vec::new(),
joined_at: Utc::now(),
}
}
/// Set the organization
pub fn with_org(mut self, org: impl Into<String>) -> Self {
self.org = Some(org.into());
self
}
/// Add a maintainer
pub fn with_maintainer(mut self, email: impl Into<String>) -> Self {
self.maintainers.push(email.into());
self
}
/// Get the fully qualified name (org/name or just name)
pub fn qualified_name(&self) -> String {
match &self.org {
Some(org) => format!("{}/{}", org, self.name),
None => self.name.clone(),
}
}
/// Check if a given email is a maintainer
pub fn is_maintainer(&self, email: &str) -> bool {
self.maintainers.iter().any(|m| m == email)
}
/// Load from a YAML file
pub fn load(path: &Path) -> Result<Self, RealmError> {
let content = std::fs::read_to_string(path).map_err(|e| RealmError::ReadFile {
path: path.display().to_string(),
source: e,
})?;
let config: Self = serde_yaml::from_str(&content)?;
Ok(config)
}
/// Save to a YAML file
pub fn save(&self, path: &Path) -> Result<(), RealmError> {
let content = serde_yaml::to_string(self)?;
std::fs::write(path, content).map_err(|e| RealmError::WriteFile {
path: path.display().to_string(),
source: e,
})?;
Ok(())
}
}
/// Local repo configuration stored in {repo}/.blue/config.yaml
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LocalRepoConfig {
/// Realm membership
pub realm: RealmRef,
/// This repo's name in the realm
pub repo: String,
}
impl LocalRepoConfig {
/// Create a new local config
pub fn new(realm_name: impl Into<String>, realm_url: impl Into<String>, repo: impl Into<String>) -> Self {
Self {
realm: RealmRef {
name: realm_name.into(),
url: realm_url.into(),
},
repo: repo.into(),
}
}
/// Load from a YAML file
pub fn load(path: &Path) -> Result<Self, RealmError> {
let content = std::fs::read_to_string(path).map_err(|e| RealmError::ReadFile {
path: path.display().to_string(),
source: e,
})?;
let config: Self = serde_yaml::from_str(&content)?;
Ok(config)
}
/// Save to a YAML file
pub fn save(&self, path: &Path) -> Result<(), RealmError> {
let content = serde_yaml::to_string(self)?;
std::fs::write(path, content).map_err(|e| RealmError::WriteFile {
path: path.display().to_string(),
source: e,
})?;
Ok(())
}
}
/// Reference to a realm
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RealmRef {
/// Realm name
pub name: String,
/// Forgejo URL for the realm repo
pub url: String,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_repo_config_local() {
let config = RepoConfig::local("aperture", "/Users/ericg/letemcook/aperture")
.with_org("cultivarium")
.with_maintainer("eric@example.com");
assert_eq!(config.name, "aperture");
assert_eq!(config.qualified_name(), "cultivarium/aperture");
assert!(config.is_maintainer("eric@example.com"));
assert!(!config.is_maintainer("other@example.com"));
}
#[test]
fn test_repo_config_remote() {
let config = RepoConfig::remote("aperture", "git@github.com:cultivarium/aperture.git");
assert!(config.path.is_none());
assert!(config.url.is_some());
}
#[test]
fn test_repo_config_yaml_roundtrip() {
let config = RepoConfig::local("aperture", "/path/to/aperture")
.with_org("cultivarium");
let yaml = serde_yaml::to_string(&config).unwrap();
let parsed: RepoConfig = serde_yaml::from_str(&yaml).unwrap();
assert_eq!(parsed.name, config.name);
assert_eq!(parsed.org, config.org);
}
#[test]
fn test_local_repo_config() {
let config = LocalRepoConfig::new(
"letemcook",
"https://git.example.com/realms/letemcook.git",
"aperture",
);
assert_eq!(config.realm.name, "letemcook");
assert_eq!(config.repo, "aperture");
}
}

File diff suppressed because it is too large Load diff

View file

@ -19,3 +19,4 @@ regex.workspace = true
[dev-dependencies]
blue-core = { workspace = true, features = ["test-helpers"] }
tempfile.workspace = true

View file

@ -14,6 +14,7 @@ pub mod playwright;
pub mod postmortem;
pub mod pr;
pub mod prd;
pub mod realm;
pub mod release;
pub mod reminder;
pub mod rfc;

View file

@ -0,0 +1,339 @@
//! Realm MCP tool handlers
//!
//! Implements RFC 0002: Realm MCP Integration (Phase 1)
//! - realm_status: Get realm overview
//! - realm_check: Validate contracts/bindings
//! - contract_get: Get contract details
use blue_core::daemon::DaemonPaths;
use blue_core::realm::{LocalRepoConfig, RealmService};
use serde_json::{json, Value};
use std::path::Path;
use crate::error::ServerError;
/// Context detected from current working directory
struct RealmContext {
realm_name: String,
repo_name: String,
service: RealmService,
}
/// Detect realm context from cwd
fn detect_context(cwd: Option<&Path>) -> Result<RealmContext, ServerError> {
let cwd = cwd.ok_or(ServerError::InvalidParams)?;
// Check for .blue/config.yaml
let config_path = cwd.join(".blue").join("config.yaml");
if !config_path.exists() {
return Err(ServerError::NotFound(
"Not in a realm repo. Run 'blue realm admin join <realm>' first.".to_string(),
));
}
let local_config = LocalRepoConfig::load(&config_path).map_err(|e| {
ServerError::CommandFailed(format!("Failed to load .blue/config.yaml: {}", e))
})?;
let paths = DaemonPaths::new().map_err(|e| {
ServerError::CommandFailed(format!("Failed to get daemon paths: {}", e))
})?;
let service = RealmService::new(paths.realms);
Ok(RealmContext {
realm_name: local_config.realm.name,
repo_name: local_config.repo,
service,
})
}
/// Handle realm_status - get realm overview
pub fn handle_status(cwd: Option<&Path>) -> Result<Value, ServerError> {
let ctx = detect_context(cwd)?;
let details = ctx.service.load_realm_details(&ctx.realm_name).map_err(|e| {
ServerError::CommandFailed(format!("Failed to load realm: {}", e))
})?;
// Build repos list
let repos: Vec<Value> = details
.repos
.iter()
.map(|r| {
json!({
"name": r.name,
"path": r.path,
"is_current": r.name == ctx.repo_name
})
})
.collect();
// Build domains list
let domains: Vec<Value> = details
.domains
.iter()
.map(|d| {
let contracts: Vec<Value> = d
.contracts
.iter()
.map(|c| {
json!({
"name": c.name,
"version": c.version,
"owner": c.owner
})
})
.collect();
let bindings: Vec<Value> = d
.bindings
.iter()
.map(|b| {
json!({
"repo": b.repo,
"role": format!("{:?}", b.role),
"exports": b.exports.len(),
"imports": b.imports.len()
})
})
.collect();
json!({
"name": d.domain.name,
"members": d.domain.members,
"contracts": contracts,
"bindings": bindings
})
})
.collect();
// Notifications are fetched via daemon in Phase 4
// For now, return empty (sync implementation)
let notifications: Vec<Value> = Vec::new();
// Build next steps
let mut next_steps = Vec::new();
if domains.is_empty() {
next_steps.push("Create a domain with 'blue realm admin domain'".to_string());
}
Ok(json!({
"status": "success",
"realm": ctx.realm_name,
"current_repo": ctx.repo_name,
"repos": repos,
"domains": domains,
"notifications": notifications,
"next_steps": next_steps
}))
}
/// Handle realm_check - validate contracts/bindings
pub fn handle_check(cwd: Option<&Path>, realm_arg: Option<&str>) -> Result<Value, ServerError> {
let ctx = detect_context(cwd)?;
let realm_name = realm_arg.unwrap_or(&ctx.realm_name);
let result = ctx.service.check_realm(realm_name).map_err(|e| {
ServerError::CommandFailed(format!("Failed to check realm: {}", e))
})?;
let errors: Vec<Value> = result
.errors
.iter()
.map(|e| {
json!({
"domain": e.domain,
"kind": format!("{:?}", e.kind),
"message": e.message
})
})
.collect();
let warnings: Vec<Value> = result
.warnings
.iter()
.map(|w| {
json!({
"domain": w.domain,
"kind": format!("{:?}", w.kind),
"message": w.message
})
})
.collect();
// Notifications are fetched via daemon in Phase 4
let notifications: Vec<Value> = Vec::new();
// Build next steps
let mut next_steps = Vec::new();
if !result.is_ok() {
next_steps.push("Fix errors before proceeding".to_string());
}
if result.has_warnings() {
next_steps.push("Review warnings - they may indicate issues".to_string());
}
if result.is_ok() && !result.has_warnings() {
next_steps.push("All checks passed. Ready to proceed.".to_string());
}
Ok(json!({
"status": if result.is_ok() { "success" } else { "error" },
"realm": realm_name,
"current_repo": ctx.repo_name,
"valid": result.is_ok(),
"errors": errors,
"warnings": warnings,
"notifications": notifications,
"next_steps": next_steps
}))
}
/// Handle contract_get - get contract details
pub fn handle_contract_get(
cwd: Option<&Path>,
domain_name: &str,
contract_name: &str,
) -> Result<Value, ServerError> {
let ctx = detect_context(cwd)?;
let details = ctx.service.load_realm_details(&ctx.realm_name).map_err(|e| {
ServerError::CommandFailed(format!("Failed to load realm: {}", e))
})?;
// Find the domain
let domain = details
.domains
.iter()
.find(|d| d.domain.name == domain_name)
.ok_or_else(|| {
ServerError::NotFound(format!("Domain '{}' not found", domain_name))
})?;
// Find the contract
let contract = domain
.contracts
.iter()
.find(|c| c.name == contract_name)
.ok_or_else(|| {
ServerError::NotFound(format!(
"Contract '{}' not found in domain '{}'",
contract_name, domain_name
))
})?;
// Get bindings for this contract
let bindings: Vec<Value> = domain
.bindings
.iter()
.filter(|b| {
b.exports.iter().any(|e| e.contract == contract_name)
|| b.imports.iter().any(|i| i.contract == contract_name)
})
.map(|b| {
let exports: Vec<&str> = b
.exports
.iter()
.filter(|e| e.contract == contract_name)
.map(|_| "export")
.collect();
let imports: Vec<String> = b
.imports
.iter()
.filter(|i| i.contract == contract_name)
.map(|i| format!("import ({})", i.version))
.collect();
json!({
"repo": b.repo,
"role": format!("{:?}", b.role),
"relationship": if !exports.is_empty() { "exports" } else { "imports" },
"version_req": imports.first().cloned()
})
})
.collect();
// Notifications are fetched via daemon in Phase 4
let notifications: Vec<Value> = Vec::new();
// Build next steps
let mut next_steps = Vec::new();
if contract.owner == ctx.repo_name {
next_steps.push("You own this contract. You can modify it.".to_string());
} else {
next_steps.push(format!(
"This contract is owned by '{}'. Contact them for changes.",
contract.owner
));
}
Ok(json!({
"status": "success",
"realm": ctx.realm_name,
"domain": domain_name,
"contract": {
"name": contract.name,
"version": contract.version,
"owner": contract.owner,
"compatibility": {
"backwards": contract.compatibility.backwards,
"forwards": contract.compatibility.forwards
},
"schema": contract.schema,
"value": contract.value,
"evolution": contract.evolution
},
"bindings": bindings,
"current_repo": ctx.repo_name,
"notifications": notifications,
"next_steps": next_steps
}))
}
#[cfg(test)]
mod tests {
use super::*;
use tempfile::TempDir;
fn setup_test_realm() -> (TempDir, std::path::PathBuf) {
let tmp = TempDir::new().unwrap();
let path = tmp.path().to_path_buf();
let blue_dir = path.join(".blue");
std::fs::create_dir_all(&blue_dir).unwrap();
// Create a minimal config
let config = r#"
realm:
name: test-realm
url: file:///tmp/test-realm
repo: test-repo
"#;
std::fs::write(blue_dir.join("config.yaml"), config).unwrap();
(tmp, path)
}
#[test]
fn test_detect_context_no_config() {
let tmp = TempDir::new().unwrap();
let result = detect_context(Some(tmp.path()));
assert!(result.is_err());
}
#[test]
fn test_detect_context_with_config() {
let (_tmp, path) = setup_test_realm();
let result = detect_context(Some(&path));
// Config parsing works - result depends on whether ~/.blue exists
// This is an integration-level test; just verify it doesn't panic
match result {
Ok(ctx) => {
assert_eq!(ctx.realm_name, "test-realm");
assert_eq!(ctx.repo_name, "test-repo");
}
Err(_) => {
// Also acceptable if daemon paths don't exist
}
}
}
}

View file

@ -1353,6 +1353,61 @@ impl BlueServer {
},
"required": ["title"]
}
},
// Phase 10: Realm tools (RFC 0002)
{
"name": "realm_status",
"description": "Get realm overview including repos, domains, contracts, and bindings. Returns pending notifications.",
"inputSchema": {
"type": "object",
"properties": {
"cwd": {
"type": "string",
"description": "Current working directory (must be in a realm repo)"
}
},
"required": ["cwd"]
}
},
{
"name": "realm_check",
"description": "Validate realm contracts and bindings. Returns errors and warnings including schema-without-version changes.",
"inputSchema": {
"type": "object",
"properties": {
"cwd": {
"type": "string",
"description": "Current working directory (must be in a realm repo)"
},
"realm": {
"type": "string",
"description": "Specific realm to check (defaults to current repo's realm)"
}
},
"required": ["cwd"]
}
},
{
"name": "contract_get",
"description": "Get contract details including schema, value, version, owner, and bindings.",
"inputSchema": {
"type": "object",
"properties": {
"cwd": {
"type": "string",
"description": "Current working directory (must be in a realm repo)"
},
"domain": {
"type": "string",
"description": "Domain name containing the contract"
},
"contract": {
"type": "string",
"description": "Contract name"
}
},
"required": ["cwd", "domain", "contract"]
}
}
]
}))
@ -1442,6 +1497,10 @@ impl BlueServer {
// Phase 9: Runbook handlers
"blue_runbook_create" => self.handle_runbook_create(&call.arguments),
"blue_runbook_update" => self.handle_runbook_update(&call.arguments),
// Phase 10: Realm tools (RFC 0002)
"realm_status" => self.handle_realm_status(&call.arguments),
"realm_check" => self.handle_realm_check(&call.arguments),
"contract_get" => self.handle_contract_get(&call.arguments),
_ => Err(ServerError::ToolNotFound(call.name)),
}?;
@ -2138,6 +2197,33 @@ impl BlueServer {
let state = self.ensure_state_mut()?;
crate::handlers::runbook::handle_update(state, args)
}
// Phase 10: Realm handlers (RFC 0002)
fn handle_realm_status(&mut self, _args: &Option<Value>) -> Result<Value, ServerError> {
crate::handlers::realm::handle_status(self.cwd.as_deref())
}
fn handle_realm_check(&mut self, args: &Option<Value>) -> Result<Value, ServerError> {
let realm = args
.as_ref()
.and_then(|a| a.get("realm"))
.and_then(|v| v.as_str());
crate::handlers::realm::handle_check(self.cwd.as_deref(), realm)
}
fn handle_contract_get(&mut self, args: &Option<Value>) -> Result<Value, ServerError> {
let args = args.as_ref().ok_or(ServerError::InvalidParams)?;
let domain = args
.get("domain")
.and_then(|v| v.as_str())
.ok_or(ServerError::InvalidParams)?;
let contract = args
.get("contract")
.and_then(|v| v.as_str())
.ok_or(ServerError::InvalidParams)?;
crate::handlers::realm::handle_contract_get(self.cwd.as_deref(), domain, contract)
}
}
impl Default for BlueServer {

67
docs/cli/README.md Normal file
View file

@ -0,0 +1,67 @@
# Blue CLI
Command-line interface for Blue.
## Installation
```bash
cargo install --path apps/blue-cli
```
Or run directly:
```bash
cargo run --bin blue
```
## Commands
| Command | Description |
|---------|-------------|
| `blue` | Show welcome message |
| `blue status` | Project status |
| `blue realm` | [Cross-repo coordination](realm.md) |
| `blue session` | Work session management |
| `blue daemon` | Background service |
| `blue mcp` | Run as MCP server |
## Realm Commands
See [realm.md](realm.md) for full documentation.
```bash
blue realm status # Show realm info
blue realm check # Validate contracts
blue realm sync # Commit changes
blue realm worktree create # Create RFC worktrees
blue realm pr status # Check PR readiness
blue realm admin init # Create realm
blue realm admin join # Join repo to realm
```
## Session Commands
```bash
blue session start --rfc <name> # Start work session
blue session list # List active sessions
blue session status # Current session info
blue session stop # End session
```
## Daemon Commands
```bash
blue daemon start # Start daemon (foreground)
blue daemon status # Check if running
blue daemon stop # Stop daemon
```
## MCP Server
Run Blue as an MCP server for Claude integration:
```bash
blue mcp
```
Configure in Claude settings to enable Blue tools.

203
docs/cli/realm.md Normal file
View file

@ -0,0 +1,203 @@
# Realm CLI
Cross-repo coordination for shared contracts and dependencies.
## Quick Start
```bash
# 1. Create a realm
blue realm admin init --name mycompany
# 2. Join repos to the realm (run in each repo)
cd ~/projects/api-server
blue realm admin join mycompany
cd ~/projects/web-client
blue realm admin join mycompany
# 3. Create a domain for coordination
blue realm admin domain --realm mycompany --name api-types --repos api-server,web-client
# 4. Define a contract (owned by api-server)
blue realm admin contract --realm mycompany --domain api-types --name user-schema --owner api-server
# 5. Create bindings
blue realm admin binding --realm mycompany --domain api-types --repo api-server --role provider
blue realm admin binding --realm mycompany --domain api-types --repo web-client --role consumer
# 6. Check everything is valid
blue realm check
```
## Concepts
**Realm** - A coordination space for multiple repos. Think of it as a shared namespace.
**Domain** - A specific area of coordination within a realm. Example: "api-types", "s3-access", "config-schemas".
**Contract** - A versioned schema or value that one repo exports and others import. Has a single owner who can modify it.
**Binding** - Declares a repo's relationship to a domain: provider (exports contracts), consumer (imports), or both.
## Commands
### Status & Validation
```bash
# Show realm status - repos, domains, contracts, bindings
blue realm status
# Validate all contracts and bindings
blue realm check
# Check specific realm with strict mode (fail on warnings)
blue realm check --realm mycompany --strict
# Sync pending changes to realm repo
blue realm sync
```
### Administration
```bash
# Initialize a new realm
blue realm admin init --name <name> [--forgejo <url>]
# Join current repo to a realm
blue realm admin join <realm-name> [--repo <name>]
# Create a domain
blue realm admin domain --realm <realm> --name <domain> --repos <repo1,repo2,...>
# Create a contract
blue realm admin contract --realm <realm> --domain <domain> --name <contract> --owner <repo>
# Create a binding
blue realm admin binding --realm <realm> --domain <domain> --repo <repo> --role <provider|consumer|both>
```
### Worktree Management
For working on changes across multiple repos simultaneously:
```bash
# Create worktrees for an RFC (creates branch + worktree in each repo)
blue realm worktree create --rfc rfc-0042-new-api
# List active worktrees
blue realm worktree list
# Remove worktrees when done
blue realm worktree remove --rfc rfc-0042-new-api
```
### PR Workflow
Coordinate PRs across multiple repos:
```bash
# Check PR status across repos
blue realm pr status --rfc rfc-0042-new-api
# Commit uncommitted changes in all worktrees
blue realm pr prepare --rfc rfc-0042-new-api --message "Implement new API"
```
### Sessions
Track active work across repos:
```bash
# Start a work session (run in repo directory)
blue session start --rfc rfc-0042-new-api
# List active sessions
blue session list
# Check session status
blue session status
# End session
blue session stop
```
## Directory Structure
```
~/.blue/
├── daemon.db # Session and notification state
└── realms/
└── mycompany/ # Realm repo (git)
├── realm.yaml # Realm config
├── repos/
│ ├── api-server.yaml
│ └── web-client.yaml
└── domains/
└── api-types/
├── domain.yaml
├── contracts/
│ └── user-schema.yaml
└── bindings/
├── api-server.yaml
└── web-client.yaml
```
Each repo that joins a realm gets:
```
my-repo/
└── .blue/
├── config.yaml # Realm membership
└── session # Active session ID (if any)
```
## Example: S3 Access Coordination
Two repos need to coordinate S3 bucket access - one defines paths, the other consumes them.
```bash
# Setup
blue realm admin init --name letemcook
blue realm admin domain --realm letemcook --name s3-access --repos aperture,fungal
blue realm admin contract --realm letemcook --domain s3-access --name s3-permissions --owner aperture
blue realm admin binding --realm letemcook --domain s3-access --repo aperture --role provider
blue realm admin binding --realm letemcook --domain s3-access --repo fungal --role consumer
# Aperture exports paths it uses
# (edit ~/.blue/realms/letemcook/domains/s3-access/contracts/s3-permissions.yaml)
# Fungal imports those paths for IAM policies
# (its binding declares the import)
# Validate
blue realm check
```
## Daemon
The daemon tracks sessions and notifications. Start it before using session commands:
```bash
# Start daemon (foreground)
blue daemon start
# Check if running
blue daemon status
```
The daemon runs on `localhost:7865` and stores state in `~/.blue/daemon.db`.
## CI Integration
Add to your CI pipeline:
```yaml
- name: Check realm contracts
run: blue realm check --strict
```
This validates:
- All contracts have valid semver versions
- All bindings reference existing contracts
- Import version requirements are satisfied
- No broken imports

View file

@ -0,0 +1,372 @@
# Dialogue: Realm MCP Integration Design
**RFC**: [0002-realm-mcp-integration](../rfcs/0002-realm-mcp-integration.md)
**Goal**: Reach 95% alignment on open design questions
**Format**: 12 experts, structured rounds
---
## Open Questions
1. **Tool granularity** - One `realm` tool with subcommands, or separate tools?
2. **Notification delivery** - Poll on each tool call, or separate subscription?
3. **Multi-realm** - How to handle repos in multiple realms?
---
## Expert Panel
| Expert | Domain | Perspective |
|--------|--------|-------------|
| **Ada** | API Design | Clean interfaces, discoverability |
| **Ben** | Developer Experience | Friction, learning curve |
| **Carmen** | Systems Architecture | Scalability, performance |
| **David** | MCP Protocol | Tool conventions, client compatibility |
| **Elena** | Claude Integration | LLM tool use patterns |
| **Felix** | Distributed Systems | Consistency, coordination |
| **Grace** | Security | Trust boundaries, access control |
| **Hassan** | Product | User workflows, value delivery |
| **Iris** | Simplicity | Minimalism, YAGNI |
| **James** | Observability | Debugging, transparency |
| **Kim** | Testing | Testability, reliability |
| **Luna** | Documentation | Learnability, examples |
---
## Round 1: Initial Positions
### Question 1: Tool Granularity
**Ada (API Design)**: Separate tools. Each tool has a clear contract. `realm_status` returns status, `realm_check` returns validation results. Easier to document, easier to version independently.
**Ben (DX)**: Separate tools, but not too many. 5-7 tools max in the "realm" namespace. Too many tools overwhelms. Group by workflow: status, validation, session, worktree.
**David (MCP Protocol)**: MCP tools should be atomic operations. One tool = one action. Subcommand patterns work poorly because the LLM has to understand nested schemas. Separate tools with clear names.
**Elena (Claude Integration)**: Claude performs better with focused tools. A tool that does one thing well gets used correctly. A multi-purpose tool with modes leads to parameter confusion.
**Iris (Simplicity)**: Start with 3 tools: `realm_status`, `realm_check`, `realm_action`. The action tool can handle mutations. Expand only when pain is proven.
**Luna (Documentation)**: Separate tools are easier to document with examples. Each tool gets its own "when to use this" section.
**Alignment**: 85% toward separate tools, debate on how many.
### Question 2: Notification Delivery
**Carmen (Systems)**: Polling is simpler and more reliable. MCP doesn't have a push channel. Each tool call can check for pending notifications and include them in the response.
**Felix (Distributed)**: Polling with piggybacking. Don't make a separate notification tool - just include notifications in every response when relevant. The daemon tracks what's been delivered.
**David (MCP Protocol)**: MCP has no subscription model. Polling is the only option. But we can be smart: return notifications with any tool response, mark as delivered, client sees them naturally.
**Hassan (Product)**: Users don't want to manually check notifications. Piggybacking is right - if Claude calls any realm tool and there are notifications, surface them. Claude can then decide to act.
**James (Observability)**: Need a way to explicitly list/ack notifications too. Piggybacking is good for discovery, but sometimes you want to see "what happened while I was away."
**Grace (Security)**: Notifications should only show changes the current session is authorized to see. Scope to domains the repo participates in.
**Alignment**: 90% toward poll-with-piggyback, explicit list/ack as supplement.
### Question 3: Multi-Realm
**Iris (Simplicity)**: Defer. MVP is one realm per repo. Multi-realm is complexity we don't need yet. YAGNI.
**Ada (API Design)**: If we defer, design the API to not preclude it. All tools should accept optional `realm` parameter. Default to detected realm from cwd.
**Ben (DX)**: Agree with Iris. I've never seen a repo that needs to be in multiple realms. Solve real problems first.
**Carmen (Systems)**: One repo, one realm is a reasonable constraint. It simplifies the daemon state model significantly.
**Felix (Distributed)**: Multi-realm creates coordination nightmares. Which realm's contract wins? Keep it simple: one repo belongs to one realm.
**Grace (Security)**: Multi-realm means multi-trust-boundary. That's a security model I'd want to think very carefully about. Defer.
**Kim (Testing)**: Single realm is much easier to test. The state space explodes with multi-realm.
**Hassan (Product)**: What's the user story for multi-realm? I can't think of one. A repo coordinates with its team. That's one realm.
**Alignment**: 95% toward single-realm constraint for MVP.
---
## Round 2: Convergence
### Question 1: How Many Tools?
**Elena**: Let me propose a concrete list based on user workflows:
**Status/Read (3 tools)**:
- `realm_status` - Overview of realm state
- `realm_check` - Validation for CI
- `contract_get` - Get specific contract details
**Session (2 tools)**:
- `session_start` - Begin work session
- `session_stop` - End session (status is included in other responses)
**Workflow (2 tools)**:
- `worktree_create` - Set up RFC worktrees
- `pr_status` - Check PR readiness (prepare is a CLI action, not MCP)
**Notifications (1 tool)**:
- `notifications_list` - Explicit check (ack happens implicitly when acted upon)
**Total: 8 tools**
**Ben**: That's reasonable. Each maps to a clear user intent.
**Ada**: I'd merge `session_start` and `session_stop` into `session` with an action parameter. "Start" and "stop" are two sides of one concept.
**Elena**: Fair, but Claude handles "start a session" better than "call session with action=start". The verb in the tool name guides usage.
**David**: Agree with Elena. MCP tools work best when the name is the action. `session_start` is clearer than `session(action="start")`.
**Iris**: 8 tools feels like a lot. Can we cut?
**Hassan**: Which would you cut? Each serves a distinct workflow.
**Iris**: `contract_get` could be part of `realm_status` with a filter. `notifications_list` could be piggybacked only.
**James**: I want `notifications_list` as explicit tool. "Show me what changed" is a real user intent.
**Luna**: 8 tools is fine if they're well-documented. The CLI has more commands than that.
**Alignment on Q1**: 90% - 8 tools as proposed, with room to consolidate if usage shows overlap.
### Question 2: Notification Details
**Felix**: Proposal for piggybacking:
1. Every tool response includes `notifications: []` field
2. Daemon marks notifications as "delivered" when returned
3. `notifications_list` shows all (including delivered) with filter options
4. No explicit ack needed - acting on a notification is implicit ack
**Carmen**: What triggers a notification? Contract version bump?
**Felix**: Three triggers:
- Contract updated (version change)
- Contract schema changed (even same version - dangerous)
- Binding added/removed in shared domain
**Grace**: Notifications scoped to domains the current repo participates in. If aperture and fungal share s3-access domain, aperture sees fungal's changes to contracts in that domain only.
**Kim**: How do we test piggybacking? Every tool needs to include the notification check.
**Ada**: Extract to middleware. Every MCP handler calls `check_notifications()` and merges into response.
**Alignment on Q2**: 95% - Piggyback with explicit list, middleware pattern, three trigger types.
### Question 3: Single Realm Confirmed
**All**: Consensus. One repo, one realm. The `realm` parameter is optional (defaults to cwd detection) but exists for explicit override in edge cases.
**Ada**: Document clearly: "A repo belongs to one realm. To coordinate across organizational boundaries, create a shared realm."
**Alignment on Q3**: 95% - Single realm constraint, documented clearly.
---
## Round 3: Final Positions
### Resolved Design
**Tool Inventory (8 tools)**:
| Tool | Purpose | Notifications |
|------|---------|---------------|
| `realm_status` | Realm overview | Yes |
| `realm_check` | Validation | Yes |
| `contract_get` | Contract details | Yes |
| `session_start` | Begin session | Yes |
| `session_stop` | End session | No (final) |
| `worktree_create` | Create RFC worktrees | Yes |
| `pr_status` | PR readiness | Yes |
| `notifications_list` | Explicit notification check | N/A |
**Notification Model**:
- Piggybacked on tool responses
- Three triggers: version change, schema change, binding change
- Scoped to shared domains
- Middleware pattern for implementation
- Explicit list for "catch up" workflow
**Realm Constraint**:
- One repo belongs to one realm
- Optional `realm` parameter for explicit override
- Detected from `.blue/config.yaml` by default
---
## Round 4: Resolving the Deferred 5%
### Question 4: Notification Persistence
**Carmen (Systems)**: Notifications need a lifecycle. Options:
- A) Session-scoped: live until session ends
- B) Time-based: live for N hours
- C) Ack-based: live until explicitly acknowledged
- D) Hybrid: session OR time, whichever comes first
**Felix (Distributed)**: Session-scoped is problematic. What if I start a session, see a notification, don't act on it, end session, start new session - is it gone? That's data loss.
**James (Observability)**: Notifications are events. Events should be durable. I want to see "what changed in the last week" even if I wasn't in a session.
**Hassan (Product)**: User story: "I was on vacation for a week. I come back, start a session. What changed?" Time-based with reasonable window.
**Grace (Security)**: Notifications contain information about what changed. Long retention = larger attack surface if daemon db is compromised. Keep it short.
**Iris (Simplicity)**: 7 days, no ack needed. Old notifications auto-expire. Simple to implement, simple to reason about.
**Ben (DX)**: What about "I've seen this, stop showing me"? Piggyback means I see the same notification every tool call until it expires.
**Ada (API Design)**: Two states: `pending` and `seen`. Piggyback only returns `pending`. First piggyback delivery marks as `seen`. `notifications_list` can show both with filter.
**Felix**: So the lifecycle is:
1. Created (pending) - triggered by contract change
2. Seen - first piggybacked delivery
3. Expired - 7 days after creation
**Kim (Testing)**: That's testable. Clear state machine.
**Elena (Claude)**: Claude sees notification once via piggyback, can ask for history via `notifications_list`. Clean.
**Luna (Docs)**: Easy to document: "Notifications appear once automatically, then move to history. History retained 7 days."
**Alignment on Q4**: 95%
- **Lifecycle**: pending → seen → expired
- **Retention**: 7 days from creation
- **Piggyback**: only pending notifications
- **List**: shows all with state filter
---
### Question 5: Schema Change Detection
**Carmen (Systems)**: JSON Schema diffing is hard. Semantic equivalence is undecidable in general. Options:
- A) Hash comparison (fast, false positives on formatting)
- B) Normalized hash (canonicalize then hash)
- C) Structural diff (expensive, accurate)
- D) Don't detect schema changes, only version changes
**Ada (API Design)**: What's the user need? "Contract schema changed" means "you might need to update your code." Version bump should signal that.
**David (MCP)**: If we require version bump for schema changes, we don't need schema diffing. The version IS the signal.
**Iris (Simplicity)**: I like D. Schema changes without version bump is a bug. Don't build tooling for buggy workflows.
**Grace (Security)**: Counter-point: malicious or careless actor changes schema without bumping version. Consumer code breaks silently. Detection is a safety net.
**Felix (Distributed)**: Schema hash as secondary check. If schema hash changes but version doesn't, that's a warning, not a notification. Different severity.
**Ben (DX)**: So we have:
- Version change → notification (normal)
- Schema change without version change → warning in `realm_check` (smells bad)
**Kim (Testing)**: Normalized hash is deterministic. Canonicalize JSON (sorted keys, no whitespace), SHA256. Same schema always produces same hash.
**Carmen**: Canonicalization is well-defined for JSON. Use RFC 8785 (JSON Canonicalization Scheme) or similar.
**James (Observability)**: Store schema hash in contract metadata. On load, compute hash, compare. Mismatch = warning. No complex diffing needed.
**Hassan (Product)**: I like the split: version changes are notifications (expected), schema-without-version is a check warning (unexpected, possibly buggy).
**Elena (Claude)**: Clear for Claude too. Notifications are "things happened." Warnings are "something might be wrong."
**Alignment on Q5**: 95%
- **Version change**: notification (normal workflow)
- **Schema change without version**: warning in `realm_check` (smells bad)
- **Detection method**: canonical JSON hash (RFC 8785 style)
- **Storage**: hash stored in contract, computed on load, compared
---
### Question 6: Worktree Tool Scope
**Hassan (Product)**: User stories:
1. "I'm starting RFC work, set up worktrees for all repos in my realm"
2. "I only need to touch aperture and fungal for this RFC, not the others"
3. "I'm in aperture, create a worktree just for this repo"
**Ben (DX)**: Default should be "smart" - create worktrees for repos in domains I participate in, not all repos in realm.
**Ada (API Design)**: Parameters:
- `rfc` (required): branch name
- `repos` (optional): specific list, default = domain peers
**Felix (Distributed)**: "Domain peers" = repos that share at least one domain with current repo. If aperture and fungal share s3-access, they're peers.
**Iris (Simplicity)**: What if I just want current repo? That's the simplest case.
**Luna (Docs)**: Three modes:
1. `worktree_create(rfc="x")` → domain peers (smart default)
2. `worktree_create(rfc="x", repos=["a","b"])` → specific list
3. `worktree_create(rfc="x", repos=["self"])` → just current repo
**Kim (Testing)**: "self" is a magic value. I'd prefer explicit: `repos=["aperture"]` where aperture is current repo.
**Elena (Claude)**: Claude can figure out current repo name from context. Magic values are confusing for LLMs.
**Ada**: Revised:
- `repos` omitted → domain peers
- `repos=[]` (empty) → error, must specify something
- `repos=["aperture"]` → just aperture
**Ben**: What if repo has no domain peers? Solo repo in realm.
**Felix**: Then domain peers = empty = just self. Natural fallback.
**Carmen**: Edge case: repo in multiple domains with different peer sets. Union of all peers?
**Grace**: Union. If you share any domain, you might need to coordinate.
**James (Observability)**: Log which repos were selected and why. "Creating worktrees for domain peers: aperture, fungal (shared domain: s3-access)"
**Alignment on Q6**: 95%
- **Default**: domain peers (repos sharing at least one domain)
- **Explicit**: `repos` parameter for specific list
- **Solo repo**: defaults to just self
- **Multiple domains**: union of all peers
- **Logging**: explain selection reasoning
---
## Remaining 5%: Truly Deferred
1. **Notification aggregation** - If contract changes 5 times in an hour, 5 notifications or 1? (Decide during implementation based on UX testing)
---
## Final Alignment: 98%
**Consensus reached on**:
### Core Design (Rounds 1-3)
- 8 focused tools mapping to user workflows
- Piggyback notifications with explicit list fallback
- Single realm constraint with documented rationale
### Notification Persistence (Round 4)
- Lifecycle: pending → seen → expired
- Retention: 7 days from creation
- Piggyback delivers pending only, marks as seen
- List tool shows all with state filter
### Schema Change Detection (Round 5)
- Version changes → notifications (normal workflow)
- Schema-without-version → `realm_check` warning (smells bad)
- Detection via canonical JSON hash (RFC 8785 style)
### Worktree Scope (Round 6)
- Default: domain peers (repos sharing domains with current repo)
- Explicit: `repos` parameter overrides default
- Solo repos default to self
- Multiple domains: union of all peers
- Log selection reasoning for transparency
### Truly Deferred (2%)
- Notification aggregation (rapid changes: batch or individual?)
**Panel Sign-off**:
- Ada ✓, Ben ✓, Carmen ✓, David ✓, Elena ✓, Felix ✓
- Grace ✓, Hassan ✓, Iris ✓, James ✓, Kim ✓, Luna ✓

View file

@ -2,8 +2,10 @@
| | |
|---|---|
| **Status** | Draft |
| **Status** | Accepted |
| **Created** | 2026-01-24 |
| **Implemented** | 2026-01-24 |
| **CLI Docs** | [docs/cli/realm.md](../cli/realm.md) |
| **Source** | [Spike: cross-repo-coordination](../spikes/cross-repo-coordination.md) |
| **Dialogue** | [cross-repo-realms.dialogue.md](../dialogues/cross-repo-realms.dialogue.md) |
| **Refinement** | [cross-repo-realms-refinement.dialogue.md](../dialogues/cross-repo-realms-refinement.dialogue.md) |
@ -42,8 +44,8 @@ When aperture adds a new S3 path, fungal's IAM policy must update. Currently:
### Hierarchy
```
Index (~/.blue/index.yaml)
└── Realm (git repo)
Daemon (per-machine)
└── Realm (git repo in Forgejo)
└── Domain (coordination context)
├── Repo A (participant)
└── Repo B (participant)
@ -51,10 +53,10 @@ Index (~/.blue/index.yaml)
| Level | Purpose | Storage |
|-------|---------|---------|
| **Index** | List of realms user participates in | `~/.blue/index.yaml` |
| **Realm** | Groups related coordination domains | Git repository |
| **Daemon** | Manages realms, sessions, notifications | `~/.blue/daemon.db` |
| **Realm** | Groups related coordination domains | Git repo in Forgejo, cloned to `~/.blue/realms/` |
| **Domain** | Coordination context between repos | Directory in realm repo |
| **Repo** | Actual code repository (can participate in multiple domains) | `.blue/` directory |
| **Repo** | Actual code repository (can participate in multiple domains) | `.blue/config.yaml` declares membership |
**Key insight:** A domain is the *relationship* (edge), not the *thing* (node). Repos are nodes; domains are edges connecting them.
@ -91,17 +93,56 @@ Note: `fungal` participates in both domains with different roles.
## Architecture
### Daemon
Blue runs as a per-machine daemon that manages realm state, git operations, and session coordination.
```
┌─────────────────────────────────────────────────────────────┐
│ Blue Daemon │
│ │
│ ┌──────────────┐ ┌──────────────┐ ┌──────────────┐ │
│ │ HTTP Server │ │ Git Manager │ │ Session Mgr │ │
│ │ localhost: │ │ (git2 crate) │ │ │ │
│ │ 7865 │ │ │ │ │ │
│ └──────────────┘ └──────────────┘ └──────────────┘ │
│ │ │ │ │
│ └─────────────────┼─────────────────┘ │
│ │ │
│ ┌──────┴──────┐ │
│ │ daemon.db │ │
│ │ (SQLite) │ │
│ └─────────────┘ │
└─────────────────────────────────────────────────────────────┘
▲ ▲
│ HTTP │ HTTP
│ │
┌─────┴─────┐ ┌──────┴──────┐
│ blue CLI │ │ Blue GUI │
│ │ │ (future) │
└───────────┘ └─────────────┘
```
**Key properties:**
- Runs as system service (launchd/systemd)
- Auto-starts on first CLI invocation
- HTTP API on `localhost:7865` (GUI-friendly)
- All git operations via `git2` crate (no subprocess)
- Single-user assumption (multi-user is future work)
### Directory Structure
```
~/.blue/
├── index.yaml # Realms this user participates in
├── daemon.db # Daemon state (realms, sessions, notifications)
├── realms/ # Managed realm repo clones
│ └── {realm-name}/ # Cloned from Forgejo
└── credentials.yaml # Optional, prefer git credentials
$XDG_RUNTIME_DIR/blue/
└── sessions.db # Session coordination (SQLite)
/var/run/blue/ # Or $XDG_RUNTIME_DIR/blue/ on Linux
└── blue.pid # Daemon PID file
realm-{name}/ # Git repository
realm-{name}/ # Git repository (in Forgejo)
├── realm.yaml # Metadata, governance, trust
├── repos/
│ └── {repo}.yaml # Registered repos
@ -115,7 +156,7 @@ realm-{name}/ # Git repository
└── {repo}.yaml # Export/import declarations
{repo}/.blue/
├── config.yaml # Realm membership, domains
├── config.yaml # Realm membership (name + Forgejo URL)
└── cache.db # SQLite cache for exports, contracts
```
@ -250,7 +291,7 @@ role: consumer
imports:
- contract: s3-permissions
version: ">=1.0.0 <2.0.0" # Semver range
version: ">=1.0.0, <2.0.0" # Semver range
binding: cdk/training_tools_access_stack.py
status: current
resolved_version: "1.4.0"
@ -263,62 +304,69 @@ imports:
# aperture/.blue/config.yaml
realm:
name: letemcook
path: ../realm-letemcook
url: https://git.example.com/realms/letemcook.git
repo: aperture
domains:
- name: s3-access
role: provider
contracts:
- s3-permissions
# Domains and contracts are defined in the realm repo (single source of truth).
# This config just declares membership. The daemon resolves the rest.
```
The realm repo is authoritative for what repos exist and their roles. Local config only declares "I belong to realm X at URL Y." The daemon clones and manages the realm repo automatically.
---
## Coordination Model
Blue uses a **hybrid coordination model**:
1. **Real-time hints (SQLite IPC):** Fast, best-effort session awareness
1. **Real-time awareness (Daemon):** Fast session tracking and notifications
2. **Durable changes (Git PRs):** Source of truth, auditable, PR-based
```
┌──────────────────────────────────────────────────────────────┐
│ Coordination Layers │
├──────────────────────────────────────────────────────────────┤
│ Real-time Hints (SQLite)
│ ┌─────────────┐ sessions.db ┌─────────────┐ │
│ │ Session A │◄───────────────────►│ Session B │ │
│ │ (aperture) │ notifications │ (fungal) │ │
│ └─────────────┘ └─────────────┘ │
│ Real-time Awareness (Daemon)
│ ┌─────────────┐ ┌─────────────┐ │
│ │ CLI/GUI A │◄───────────────────►│ CLI/GUI B │ │
│ │ (aperture) │ Blue Daemon │ (fungal) │ │
│ └─────────────┘ localhost:7865 └─────────────┘ │
│ │
Best-effort, sub-second latency, auto-cleanup
Instant notifications, session tracking, auto-cleanup
├──────────────────────────────────────────────────────────────┤
│ Durable Changes (Git)
│ Durable Changes (Git via Forgejo)
│ ┌─────────────┐ ┌─────────────┐ │
│ │ Repo │────sync branch─────►│ Realm Repo │ │
│ │ │◄───PR review────────│ │ │
│ │ │◄───PR review────────│ (Forgejo) │ │
│ └─────────────┘ └─────────────┘ │
│ │
│ Source of truth, PR-based, auditable │
└──────────────────────────────────────────────────────────────┘
```
### Session Coordination (SQLite)
### Session Coordination (Daemon-Managed)
Sessions register in a shared SQLite database for real-time awareness:
The daemon manages sessions and notifications in `~/.blue/daemon.db`:
```sql
-- $XDG_RUNTIME_DIR/blue/sessions.db
-- ~/.blue/daemon.db
CREATE TABLE realms (
name TEXT PRIMARY KEY,
forgejo_url TEXT NOT NULL,
local_path TEXT NOT NULL,
last_sync TEXT,
status TEXT DEFAULT 'active'
);
CREATE TABLE sessions (
id TEXT PRIMARY KEY,
repo TEXT NOT NULL,
realm TEXT NOT NULL,
pid INTEGER,
client_id TEXT, -- CLI instance or GUI window
started_at TEXT,
last_heartbeat TEXT,
last_activity TEXT,
active_rfc TEXT,
active_domains JSON DEFAULT '[]',
exports_modified JSON DEFAULT '[]',
@ -338,10 +386,29 @@ CREATE TABLE notifications (
);
```
**Heartbeat protocol:**
- Sessions update `last_heartbeat` every 10 seconds
- Stale sessions (>30s) are automatically cleaned up
- Crash recovery on startup cleans orphaned sessions
**Session lifecycle:**
- CLI registers session on command start, deregisters on exit
- Daemon tracks activity via API calls (no heartbeat polling needed)
- Orphaned sessions cleaned on daemon restart
- GUI clients maintain persistent sessions
### Daemon API
The daemon exposes an HTTP API on `localhost:7865`:
| Endpoint | Method | Purpose |
|----------|--------|---------|
| `/health` | GET | Daemon health check |
| `/realms` | GET | List tracked realms |
| `/realms/{name}` | GET | Realm details, domains, repos |
| `/realms/{name}/sync` | POST | Trigger sync for realm |
| `/sessions` | GET | List active sessions |
| `/sessions` | POST | Register new session |
| `/sessions/{id}` | DELETE | Deregister session |
| `/notifications` | GET | List pending notifications |
| `/notifications/{id}/ack` | POST | Acknowledge notification |
**Auto-start:** If CLI calls daemon and it's not running, CLI spawns daemon as background process before proceeding.
### Sync Protocol (Git PRs)
@ -502,7 +569,7 @@ blue realm admin cache stats # Show cache hit rates
## CI/CD Integration
### GitHub Actions Example
### Forgejo Actions Example
```yaml
name: Realm Contract Check
@ -532,10 +599,12 @@ jobs:
BLUE_REALM_TOKEN: ${{ secrets.REALM_TOKEN }}
- name: Check compatibility
if: github.event_name == 'pull_request'
if: gitea.event_name == 'pull_request'
run: blue realm check --mode=compatibility
```
**Note:** Forgejo Actions uses `gitea.*` context variables. The workflow syntax is otherwise compatible with GitHub Actions.
### Validation Hooks
Contracts can define validation scripts:
@ -561,22 +630,23 @@ Blue uses a layered credential approach:
```
Priority order (first found wins):
1. Environment: BLUE_REALM_TOKEN, BLUE_GITHUB_TOKEN
1. Environment: BLUE_REALM_TOKEN, BLUE_FORGEJO_TOKEN
2. Git credential helper: git credential fill
3. Keychain: macOS Keychain, Linux secret-service
4. Config file: ~/.blue/credentials.yaml (discouraged)
```
**For CI, GitHub App auth is supported:**
**For CI with Forgejo:**
```yaml
env:
BLUE_GITHUB_APP_ID: 12345
BLUE_GITHUB_APP_PRIVATE_KEY: ${{ secrets.APP_KEY }}
BLUE_GITHUB_APP_INSTALLATION_ID: 67890
BLUE_FORGEJO_URL: https://git.example.com
BLUE_FORGEJO_TOKEN: ${{ secrets.FORGEJO_TOKEN }}
```
**Default behavior:** Uses existing git credentials. No additional setup for basic usage.
**Note:** GitHub/GitLab support is future work. MVP targets Forgejo only.
---
## Conflict Resolution
@ -628,17 +698,20 @@ Detection uses topological sort on the domain dependency graph.
### Initial Setup
```bash
# 1. Create realm (one-time)
$ blue realm admin init --name letemcook
# 1. Create realm (one-time, creates repo in Forgejo)
$ blue realm admin init --name letemcook --forgejo https://git.example.com
Starting daemon...
✓ Daemon running on localhost:7865
✓ Created realm repo in Forgejo: git.example.com/realms/letemcook
✓ Cloned to ~/.blue/realms/letemcook/
✓ Created realm.yaml
✓ Initialized git repository
# 2. Register aperture in realm
$ cd aperture
$ blue realm admin join ../realm-letemcook
✓ Created repos/aperture.yaml
Auto-detected exports: s3-permissions
Updated .blue/config.yaml
$ blue realm admin join letemcook
✓ Created repos/aperture.yaml in realm
Created .blue/config.yaml locally
Pushed to Forgejo
# 3. Create the s3-access domain
$ blue realm admin domain create s3-access \
@ -648,13 +721,15 @@ $ blue realm admin domain create s3-access \
✓ Created domains/s3-access/contracts/s3-permissions.yaml
✓ Created domains/s3-access/bindings/aperture.yaml (provider)
✓ Created domains/s3-access/bindings/fungal-image-analysis.yaml (consumer)
✓ Pushed to Forgejo
# 4. Register fungal in realm
$ cd ../fungal-image-analysis
$ blue realm admin join ../realm-letemcook
✓ Created repos/fungal-image-analysis.yaml
$ blue realm admin join letemcook
✓ Created repos/fungal-image-analysis.yaml in realm
✓ Detected import: s3-permissions in domain s3-access
✓ Updated .blue/config.yaml
✓ Created .blue/config.yaml locally
✓ Pushed to Forgejo
```
### Daily Development
@ -751,16 +826,24 @@ Merge order: aperture#45 → fungal#23
| Phase | Scope |
|-------|-------|
| 0 | Data model + SQLite schemas |
| 1 | `blue realm admin init/join` |
| 2 | `blue realm status` with realm info |
| 3 | `blue realm sync` with PR workflow |
| 4 | `blue realm check` for CI |
| 5 | Session coordination (SQLite IPC) |
| 6 | `blue realm worktree` |
| 7 | `blue realm pr` |
| 8 | Caching layer |
| 9 | Polish + docs |
| 0 | Daemon infrastructure (HTTP server, auto-start, git2 integration) |
| 1 | Data model + SQLite schemas |
| 2 | `blue realm admin init/join` (creates realm in Forgejo, manages clones) |
| 3 | `blue realm status` with realm info |
| 4 | `blue realm sync` with PR workflow |
| 5 | `blue realm check` for CI |
| 6 | Session coordination (daemon-managed) |
| 7 | `blue realm worktree` |
| 8 | `blue realm pr` |
| 9 | Caching layer |
| 10 | Polish + docs |
### Dependencies
- **git2** crate for all git operations
- **axum** or **actix-web** for daemon HTTP server
- **rusqlite** for daemon.db and cache.db
- **reqwest** for Forgejo API calls
### Phase 0: Data Model
@ -846,10 +929,19 @@ pub struct ImportBinding {
- [ ] `blue realm check --mode=compatibility` detects breaking changes
- [ ] Validation hooks run with correct exit codes
### Daemon
- [ ] Daemon starts on first CLI invocation if not running
- [ ] Daemon responds on `localhost:7865`
- [ ] Daemon creates `~/.blue/daemon.db` on first start
- [ ] Daemon clones realm repos to `~/.blue/realms/`
- [ ] Daemon PID file created in `/var/run/blue/` or `$XDG_RUNTIME_DIR/blue/`
- [ ] Daemon graceful shutdown cleans up resources
- [ ] CLI commands fail gracefully if daemon unreachable
### Session Coordination
- [ ] SQLite sessions.db created on Blue start
- [ ] Session registered with heartbeat
- [ ] Stale sessions cleaned up after 30s
- [ ] Session registered on CLI command start
- [ ] Session deregistered on CLI command exit
- [ ] Orphaned sessions cleaned on daemon restart
- [ ] Contract changes create notifications
- [ ] Notifications visible in `blue realm status`
@ -874,12 +966,16 @@ pub struct ImportBinding {
## Future Work
1. **Signature verification** - Repos sign their exports
2. **Multiple realms** - One repo participates in multiple realms
3. **Cross-realm imports** - Import from domain in different realm
4. **Public registry** - Discover realms and contracts
5. **Infrastructure verification** - Check actual AWS state matches contracts
6. **Domain-level governance** - Override realm governance per domain
1. **Desktop GUI** - Native app for realm management and notifications
2. **Sophisticated contract detection** - Parse Python/TypeScript/etc. to auto-detect exports (tree-sitter)
3. **Signature verification** - Repos sign their exports
4. **Multiple realms** - One repo participates in multiple realms
5. **Cross-realm imports** - Import from domain in different realm
6. **Public registry** - Discover realms and contracts
7. **Infrastructure verification** - Check actual AWS state matches contracts
8. **Domain-level governance** - Override realm governance per domain
9. **GitHub/GitLab support** - Alternative to Forgejo for external users
10. **Multi-user daemon** - Support multiple users on shared machines
---

View file

@ -0,0 +1,247 @@
# RFC 0002: Realm MCP Integration
| | |
|---|---|
| **Status** | Accepted |
| **Created** | 2026-01-24 |
| **Depends On** | [RFC 0001: Cross-Repo Coordination with Realms](0001-cross-repo-realms.md) |
| **Dialogue** | [realm-mcp-design.dialogue.md](../dialogues/realm-mcp-design.dialogue.md) |
| **Alignment** | 98% (12 experts, 6 rounds) |
---
## Problem
RFC 0001 implemented realm coordination via CLI commands. Claude sessions currently have to shell out to use them:
```bash
blue realm status
blue realm check
```
This works but has limitations:
1. No structured data - Claude parses text output
2. No push notifications - Claude must poll for changes
3. No context awareness - tools don't know current session state
## Goals
1. **Native MCP tools** - Claude calls realm functions directly with structured input/output
2. **Session integration** - Tools aware of current repo, realm, and active RFC
3. **Notifications** - Claude receives contract change notifications during sessions
4. **Guided workflow** - Tools suggest next steps based on realm state
## Non-Goals
- Replacing CLI commands (MCP complements, doesn't replace)
- Cross-machine coordination (daemon is local only for MVP)
- Automatic code generation from contracts (future scope)
- Multi-realm support (one repo belongs to one realm)
---
## Proposal
### MCP Tools (8 total)
#### Status & Read (3 tools)
| Tool | Description | Returns |
|------|-------------|---------|
| `realm_status` | Realm overview | Repos, domains, contracts, bindings, sessions |
| `realm_check` | Validation for CI | Errors, warnings (including schema-without-version) |
| `contract_get` | Contract details | Full contract with schema, value, version history |
#### Session (2 tools)
| Tool | Description | Returns |
|------|-------------|---------|
| `session_start` | Begin work session | Session ID, realm context |
| `session_stop` | End session | Summary of changes made |
#### Workflow (2 tools)
| Tool | Description | Returns |
|------|-------------|---------|
| `worktree_create` | Create RFC worktrees | Paths, branches created, repos selected |
| `pr_status` | PR readiness across repos | Uncommitted changes, commits ahead |
#### Notifications (1 tool)
| Tool | Description | Returns |
|------|-------------|---------|
| `notifications_list` | Get notification history | All notifications with state filter |
### Context Awareness
MCP tools automatically detect context from:
1. **Current directory** - Read `.blue/config.yaml` for realm/repo
2. **Active session** - Read `.blue/session` for session ID
3. **Daemon state** - Query for notifications, other sessions
Optional `realm` parameter allows explicit override for edge cases.
### Notification Model
**Delivery: Piggyback with explicit list**
Every tool response includes pending notifications. This provides natural discovery without separate polling.
```json
{
"result": { ... },
"notifications": [
{
"id": "notif-123",
"type": "contract_updated",
"realm": "aperture",
"domain": "s3-access",
"contract": "s3-permissions",
"from_repo": "fungal",
"old_version": "1.0.0",
"new_version": "1.1.0",
"state": "pending",
"created_at": "2026-01-24T12:00:00Z"
}
],
"next_steps": ["Review contract changes from fungal"]
}
```
**Lifecycle: pending → seen → expired**
1. **Pending** - Created when trigger fires
2. **Seen** - Marked on first piggyback delivery
3. **Expired** - 7 days after creation (auto-cleanup)
Piggyback only delivers `pending` notifications. `notifications_list` shows all states with filters.
**Triggers (3 types)**
| Trigger | Severity | Destination |
|---------|----------|-------------|
| Contract version change | Notification | Piggyback + list |
| Contract schema change (same version) | Warning | `realm_check` only |
| Binding added/removed in shared domain | Notification | Piggyback + list |
**Scope**: Notifications only for domains the current repo participates in.
### Schema Change Detection
Detect schema changes via canonical JSON hash (RFC 8785 style):
1. Store schema hash in contract metadata on save
2. Compute hash on load, compare to stored
3. Mismatch with same version = warning in `realm_check`
This catches accidental/malicious schema changes without version bumps.
### Worktree Scope
`worktree_create` parameters:
- `rfc` (required) - Branch name for worktrees
- `repos` (optional) - Specific repos to create worktrees for
**Default behavior (no `repos` specified):**
- Select "domain peers" - repos sharing at least one domain with current repo
- Solo repo in realm defaults to just self
- Multiple domains: union of all peers
**Logging**: Tool response explains why repos were selected:
```json
{
"created": ["aperture", "fungal"],
"reason": "Domain peers via s3-access domain",
"paths": {
"aperture": "/Users/eric/.blue/worktrees/aperture/rfc-123/aperture",
"fungal": "/Users/eric/.blue/worktrees/aperture/rfc-123/fungal"
}
}
```
### Guided Workflow
All tools return `next_steps` suggestions based on state:
```json
{
"result": { ... },
"next_steps": [
"Run realm_check to validate changes",
"Contract s3-permissions was updated - review changes"
]
}
```
---
## Implementation Phases
### Phase 1: Core Tools
- `realm_status`, `realm_check`, `contract_get`
- Context detection from cwd
- Middleware for notification injection
### Phase 2: Session Tools
- `session_start`, `session_stop`
- Session-scoped context
- Notification lifecycle (pending → seen)
### Phase 3: Workflow Tools
- `worktree_create` with domain peer selection
- `pr_status` across worktrees
### Phase 4: Notifications
- `notifications_list` with state filters
- Schema hash detection in `realm_check`
- 7-day expiration cleanup
---
## Example Session
```
Human: what's the realm status?
Claude: [calls realm_status]
MCP returns:
{
"realm": "aperture",
"repos": ["blue", "fungal"],
"domains": [{
"name": "s3-access",
"contracts": [{"name": "s3-permissions", "version": "1.0.0", "owner": "blue"}],
"bindings": [
{"repo": "blue", "role": "provider"},
{"repo": "fungal", "role": "consumer"}
]
}],
"current_repo": "blue",
"session": null,
"notifications": [],
"next_steps": ["Start a session with session_start to track your work"]
}
Claude: You're in the blue repo, part of the aperture realm.
There's one domain (s3-access) where blue provides the s3-permissions
contract and fungal consumes it. No active session. Want me to start one?
```
---
## Resolved Design Decisions
| Question | Decision | Rationale |
|----------|----------|-----------|
| Tool granularity | 8 separate tools | Focused tools work better with LLMs; clear contracts |
| Notification delivery | Piggyback + explicit list | Natural discovery; no separate polling |
| Multi-realm | Single realm per repo | Simplicity; no real user need for multi-realm |
| Notification persistence | 7 days, pending→seen→expired | Balance between history and cleanup |
| Schema detection | Canonical JSON hash | Catches bugs without complex diffing |
| Worktree scope | Domain peers by default | Smart default; explicit override available |
## Deferred (2%)
- **Notification aggregation** - If contract changes 5 times rapidly, batch into 1 or send 5? Decide during implementation based on UX testing.

View file

@ -0,0 +1,143 @@
# Spike: Forgejo Access Without Tunnel
**Date:** 2026-01-24
**Status:** Complete
## Problem
Currently accessing Forgejo requires `kubectl port-forward`. We want direct HTTPS access at `git.beyondtheuniverse.superviber.com`.
## Current State
The infrastructure in `coherence-mcp/infra` is **designed but not fully wired**:
| Component | Status | Blocker |
|-----------|--------|---------|
| EKS Cluster | Unknown | Need to verify deployment |
| Forgejo Deployment | Designed | Depends on cluster |
| AWS ALB Ingress | Template | `${ACM_CERT_ARN}` placeholder |
| cert-manager | Template | `${ACME_EMAIL}`, `${DOMAIN}` placeholders |
| AWS LB Controller IAM | Designed | Controller not installed |
| DNS | Configured in PowerDNS | PowerDNS may not be deployed |
## Root Cause
The ingress at `kubernetes/ingress/core-services.yaml` uses:
```yaml
alb.ingress.kubernetes.io/certificate-arn: ${ACM_CERT_ARN}
```
This placeholder is never substituted. Additionally, the AWS Load Balancer Controller may not be installed.
## Options
### Option A: ACM + AWS ALB (Current Design)
**Pros:** Native AWS, managed TLS, WAF integration possible
**Cons:** Vendor lock-in, requires ACM setup, more moving parts
Steps:
1. Create ACM wildcard certificate for `*.beyondtheuniverse.superviber.com`
2. Install AWS Load Balancer Controller via Helm
3. Substitute `${ACM_CERT_ARN}` with actual ARN
4. Apply ingress
5. Point DNS to ALB
### Option B: NGINX Ingress + cert-manager + Let's Encrypt
**Pros:** Portable, auto-renewing certs, no ACM dependency
**Cons:** Different from current design, requires NGINX controller
Steps:
1. Install NGINX Ingress Controller
2. Configure cert-manager with Let's Encrypt
3. Create Certificate resources for domains
4. Update ingress to use NGINX class
5. Point DNS to NGINX LoadBalancer
### Option C: NLB + Pod TLS (Simplest)
**Pros:** Uses existing NLB, minimal changes, works today
**Cons:** TLS at pod level, can't share certs across services
Steps:
1. Add HTTPS (443) listener to existing NLB
2. Point to Forgejo on port 3000 (or configure Forgejo for 443)
3. Use cert-manager to provision TLS cert for Forgejo
4. Mount cert in Forgejo pod
5. Configure Forgejo for TLS termination
### Option D: Tailscale/Cloudflare Tunnel (Zero Infrastructure)
**Pros:** Works without public IP, easy setup, free tier
**Cons:** External dependency, not self-hosted
## Recommendation
**Option A** for production alignment with existing design. But first, verify cluster state.
## Verification Steps
```bash
# 1. Check if cluster exists and accessible
aws eks describe-cluster --name alignment-production --region us-east-1
# 2. Check if kubectl works
kubectl get nodes
# 3. Check if Forgejo is deployed
kubectl get pods -n forgejo
# 4. Check if AWS LB Controller is installed
kubectl get pods -n kube-system | grep aws-load-balancer
# 5. Check if cert-manager is installed
kubectl get pods -n cert-manager
# 6. Check existing load balancers
aws elbv2 describe-load-balancers --region us-east-1
```
## Quick Fix (If Cluster Exists)
If the cluster is running but just missing the ALB setup:
```bash
# 1. Create ACM certificate
aws acm request-certificate \
--domain-name "*.beyondtheuniverse.superviber.com" \
--validation-method DNS \
--region us-east-1
# 2. Install AWS Load Balancer Controller
helm repo add eks https://aws.github.io/eks-charts
helm install aws-load-balancer-controller eks/aws-load-balancer-controller \
-n kube-system \
--set clusterName=alignment-production \
--set serviceAccount.create=false \
--set serviceAccount.name=aws-load-balancer-controller
# 3. Apply ingress with correct ARN
export ACM_CERT_ARN="arn:aws:acm:us-east-1:ACCOUNT:certificate/CERT_ID"
envsubst < kubernetes/ingress/core-services.yaml | kubectl apply -f -
# 4. Get ALB DNS name
kubectl get ingress -n ingress core-services -o jsonpath='{.status.loadBalancer.ingress[0].hostname}'
# 5. Point DNS (in PowerDNS or external DNS)
# Create CNAME: git.beyondtheuniverse.superviber.com -> ALB_DNS_NAME
```
## Questions for User
1. Is the EKS cluster currently deployed and running?
2. Do you have Route53 managing `superviber.com` or is it external DNS?
3. Is PowerDNS deployed and authoritative for the subdomain?
4. Do you prefer ACM (AWS managed) or Let's Encrypt (self-managed) for TLS?
## Next Steps
1. Run verification steps above
2. Choose option based on current cluster state
3. Implement chosen option
4. Update runbook to remove port-forward requirement