Files
James Bland befb8fbaeb feat: initial Claude Code configuration scaffold
Comprehensive Claude Code guidance system with:

- 5 agents: tdd-guardian, code-reviewer, security-scanner, refactor-scan, dependency-audit
- 18 skills covering languages (Python, TypeScript, Rust, Go, Java, C#),
  infrastructure (AWS, Azure, GCP, Terraform, Ansible, Docker/K8s, Database, CI/CD),
  testing (TDD, UI, Browser), and patterns (Monorepo, API Design, Observability)
- 3 hooks: secret detection, auto-formatting, TDD git pre-commit
- Strict TDD enforcement with 80%+ coverage requirements
- Multi-model strategy: Opus for planning, Sonnet for execution (opusplan)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-20 15:47:34 -05:00

14 KiB

name, description
name description
rust-async Rust development with Tokio async runtime, cargo test, clippy, and systems programming patterns. Use when writing Rust code, agents, or system utilities.

Rust Development Skill

Project Structure

my-agent/
├── Cargo.toml
├── Cargo.lock
├── src/
│   ├── main.rs           # Binary entry point
│   ├── lib.rs            # Library root (if dual crate)
│   ├── config.rs         # Configuration handling
│   ├── error.rs          # Error types
│   ├── client/
│   │   ├── mod.rs
│   │   └── http.rs
│   └── discovery/
│       ├── mod.rs
│       ├── system.rs
│       └── network.rs
├── tests/
│   └── integration/
│       └── discovery_test.rs
└── benches/
    └── performance.rs

Cargo Configuration

# Cargo.toml
[package]
name = "my-agent"
version = "0.1.0"
edition = "2021"
rust-version = "1.75"

[dependencies]
# Async runtime
tokio = { version = "1.35", features = ["full"] }

# HTTP client
reqwest = { version = "0.11", features = ["json", "socks"] }

# Serialization
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"

# Error handling
thiserror = "1.0"
anyhow = "1.0"

# Logging
tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }

# System info
sysinfo = "0.30"

[dev-dependencies]
tokio-test = "0.4"
mockall = "0.12"
tempfile = "3.10"

[profile.release]
opt-level = "z"     # Optimize for size
lto = true          # Link-time optimization
codegen-units = 1   # Better optimization
strip = true        # Strip symbols
panic = "abort"     # Smaller binary

[lints.rust]
unsafe_code = "forbid"

[lints.clippy]
all = "deny"
pedantic = "warn"
nursery = "warn"

Error Handling

Custom Error Types

// src/error.rs
use thiserror::Error;

#[derive(Error, Debug)]
pub enum AgentError {
    #[error("Configuration error: {0}")]
    Config(String),

    #[error("Network error: {0}")]
    Network(#[from] reqwest::Error),

    #[error("IO error: {0}")]
    Io(#[from] std::io::Error),

    #[error("Serialization error: {0}")]
    Serialization(#[from] serde_json::Error),

    #[error("Discovery failed: {message}")]
    Discovery { message: String, source: Option<Box<dyn std::error::Error + Send + Sync>> },

    #[error("Connection timeout after {duration_secs}s")]
    Timeout { duration_secs: u64 },
}

pub type Result<T> = std::result::Result<T, AgentError>;

Result Pattern Usage

// src/client/http.rs
use crate::error::{AgentError, Result};

pub struct HttpClient {
    client: reqwest::Client,
    base_url: String,
}

impl HttpClient {
    pub fn new(base_url: &str, timeout_secs: u64) -> Result<Self> {
        let client = reqwest::Client::builder()
            .timeout(std::time::Duration::from_secs(timeout_secs))
            .build()
            .map_err(AgentError::Network)?;

        Ok(Self {
            client,
            base_url: base_url.to_string(),
        })
    }

    pub async fn get<T: serde::de::DeserializeOwned>(&self, path: &str) -> Result<T> {
        let url = format!("{}{}", self.base_url, path);

        let response = self
            .client
            .get(&url)
            .send()
            .await?
            .error_for_status()?
            .json::<T>()
            .await?;

        Ok(response)
    }

    pub async fn post<T, R>(&self, path: &str, body: &T) -> Result<R>
    where
        T: serde::Serialize,
        R: serde::de::DeserializeOwned,
    {
        let url = format!("{}{}", self.base_url, path);

        let response = self
            .client
            .post(&url)
            .json(body)
            .send()
            .await?
            .error_for_status()?
            .json::<R>()
            .await?;

        Ok(response)
    }
}

Async Patterns with Tokio

Main Entry Point

// src/main.rs
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};

mod config;
mod error;
mod client;
mod discovery;

use crate::config::Config;
use crate::error::Result;

#[tokio::main]
async fn main() -> Result<()> {
    // Initialize tracing
    tracing_subscriber::registry()
        .with(tracing_subscriber::EnvFilter::new(
            std::env::var("RUST_LOG").unwrap_or_else(|_| "info".into()),
        ))
        .with(tracing_subscriber::fmt::layer())
        .init();

    let config = Config::from_env()?;

    tracing::info!(version = env!("CARGO_PKG_VERSION"), "Starting agent");

    run(config).await
}

async fn run(config: Config) -> Result<()> {
    let client = client::HttpClient::new(&config.server_url, config.timeout_secs)?;

    // Main loop with graceful shutdown
    let mut interval = tokio::time::interval(config.poll_interval);

    loop {
        tokio::select! {
            _ = interval.tick() => {
                if let Err(e) = discovery::collect_and_send(&client).await {
                    tracing::error!(error = %e, "Discovery cycle failed");
                }
            }
            _ = tokio::signal::ctrl_c() => {
                tracing::info!("Shutdown signal received");
                break;
            }
        }
    }

    Ok(())
}

Concurrent Operations

// src/discovery/mod.rs
use tokio::task::JoinSet;
use crate::error::Result;

pub async fn discover_all() -> Result<SystemInfo> {
    let mut tasks = JoinSet::new();

    // Spawn concurrent discovery tasks
    tasks.spawn(async { ("cpu", discover_cpu().await) });
    tasks.spawn(async { ("memory", discover_memory().await) });
    tasks.spawn(async { ("disk", discover_disk().await) });
    tasks.spawn(async { ("network", discover_network().await) });

    let mut info = SystemInfo::default();

    // Collect results as they complete
    while let Some(result) = tasks.join_next().await {
        match result {
            Ok((name, Ok(data))) => {
                tracing::debug!(component = name, "Discovery completed");
                info.merge(name, data);
            }
            Ok((name, Err(e))) => {
                tracing::warn!(component = name, error = %e, "Discovery failed");
            }
            Err(e) => {
                tracing::error!(error = %e, "Task panicked");
            }
        }
    }

    Ok(info)
}

Testing Patterns

Unit Tests

// src/discovery/system.rs
use serde::{Deserialize, Serialize};

#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct CpuInfo {
    pub cores: usize,
    pub usage_percent: f32,
    pub model: String,
}

impl CpuInfo {
    pub fn is_high_usage(&self) -> bool {
        self.usage_percent > 80.0
    }
}

#[cfg(test)]
mod tests {
    use super::*;

    fn get_mock_cpu_info(overrides: Option<CpuInfo>) -> CpuInfo {
        let default = CpuInfo {
            cores: 4,
            usage_percent: 25.0,
            model: "Test CPU".to_string(),
        };
        overrides.unwrap_or(default)
    }

    #[test]
    fn test_is_high_usage_returns_true_above_threshold() {
        let cpu = get_mock_cpu_info(Some(CpuInfo {
            usage_percent: 85.0,
            ..get_mock_cpu_info(None)
        }));

        assert!(cpu.is_high_usage());
    }

    #[test]
    fn test_is_high_usage_returns_false_below_threshold() {
        let cpu = get_mock_cpu_info(Some(CpuInfo {
            usage_percent: 50.0,
            ..get_mock_cpu_info(None)
        }));

        assert!(!cpu.is_high_usage());
    }

    #[test]
    fn test_is_high_usage_returns_false_at_boundary() {
        let cpu = get_mock_cpu_info(Some(CpuInfo {
            usage_percent: 80.0,
            ..get_mock_cpu_info(None)
        }));

        assert!(!cpu.is_high_usage());
    }
}

Async Tests

// tests/integration/client_test.rs
use tokio_test::block_on;
use my_agent::client::HttpClient;

#[tokio::test]
async fn test_client_handles_timeout() {
    let client = HttpClient::new("http://localhost:9999", 1).unwrap();

    let result: Result<serde_json::Value, _> = client.get("/test").await;

    assert!(result.is_err());
}

#[tokio::test]
async fn test_concurrent_requests_complete() {
    let client = HttpClient::new("https://httpbin.org", 30).unwrap();

    let handles: Vec<_> = (0..5)
        .map(|i| {
            let client = client.clone();
            tokio::spawn(async move {
                client.get::<serde_json::Value>(&format!("/delay/{}", i % 2)).await
            })
        })
        .collect();

    for handle in handles {
        let result = handle.await.unwrap();
        assert!(result.is_ok());
    }
}

Mocking with Mockall

// src/discovery/mod.rs
#[cfg_attr(test, mockall::automock)]
pub trait SystemDiscovery {
    fn get_cpu_info(&self) -> crate::error::Result<CpuInfo>;
    fn get_memory_info(&self) -> crate::error::Result<MemoryInfo>;
}

// src/discovery/collector.rs
pub struct Collector<D: SystemDiscovery> {
    discovery: D,
}

impl<D: SystemDiscovery> Collector<D> {
    pub fn new(discovery: D) -> Self {
        Self { discovery }
    }

    pub fn collect(&self) -> crate::error::Result<SystemSnapshot> {
        let cpu = self.discovery.get_cpu_info()?;
        let memory = self.discovery.get_memory_info()?;

        Ok(SystemSnapshot { cpu, memory })
    }
}

#[cfg(test)]
mod tests {
    use super::*;
    use mockall::predicate::*;

    #[test]
    fn test_collector_combines_cpu_and_memory() {
        let mut mock = MockSystemDiscovery::new();

        mock.expect_get_cpu_info()
            .times(1)
            .returning(|| Ok(CpuInfo {
                cores: 4,
                usage_percent: 50.0,
                model: "Mock CPU".to_string(),
            }));

        mock.expect_get_memory_info()
            .times(1)
            .returning(|| Ok(MemoryInfo {
                total_gb: 16.0,
                available_gb: 8.0,
            }));

        let collector = Collector::new(mock);
        let snapshot = collector.collect().unwrap();

        assert_eq!(snapshot.cpu.cores, 4);
        assert_eq!(snapshot.memory.total_gb, 16.0);
    }
}

Configuration

// src/config.rs
use crate::error::{AgentError, Result};
use std::time::Duration;

#[derive(Debug, Clone)]
pub struct Config {
    pub server_url: String,
    pub timeout_secs: u64,
    pub poll_interval: Duration,
    pub agent_id: String,
}

impl Config {
    pub fn from_env() -> Result<Self> {
        let server_url = std::env::var("SERVER_URL")
            .map_err(|_| AgentError::Config("SERVER_URL not set".to_string()))?;

        let timeout_secs = std::env::var("TIMEOUT_SECS")
            .unwrap_or_else(|_| "30".to_string())
            .parse()
            .map_err(|_| AgentError::Config("Invalid TIMEOUT_SECS".to_string()))?;

        let poll_interval_secs: u64 = std::env::var("POLL_INTERVAL_SECS")
            .unwrap_or_else(|_| "60".to_string())
            .parse()
            .map_err(|_| AgentError::Config("Invalid POLL_INTERVAL_SECS".to_string()))?;

        let agent_id = std::env::var("AGENT_ID")
            .unwrap_or_else(|_| hostname::get()
                .map(|h| h.to_string_lossy().to_string())
                .unwrap_or_else(|_| "unknown".to_string()));

        Ok(Self {
            server_url,
            timeout_secs,
            poll_interval: Duration::from_secs(poll_interval_secs),
            agent_id,
        })
    }
}

Clippy Configuration

# clippy.toml
cognitive-complexity-threshold = 10
too-many-arguments-threshold = 5
type-complexity-threshold = 200

Commands

# Building
cargo build                     # Debug build
cargo build --release           # Release build
cargo build --target x86_64-unknown-linux-musl  # Static binary

# Testing
cargo test                      # Run all tests
cargo test -- --nocapture       # Show println! output
cargo test test_name            # Run specific test
cargo test --test integration   # Run integration tests only

# Linting
cargo clippy                    # Run clippy
cargo clippy -- -D warnings     # Treat warnings as errors
cargo clippy --fix              # Auto-fix issues

# Formatting
cargo fmt                       # Format code
cargo fmt --check               # Check formatting

# Other
cargo doc --open                # Generate and open docs
cargo bench                     # Run benchmarks
cargo tree                      # Show dependency tree
cargo audit                     # Check for vulnerabilities

Anti-Patterns to Avoid

// BAD: Unwrap without context
let config = Config::from_env().unwrap();

// GOOD: Provide context or use ?
let config = Config::from_env()
    .expect("Failed to load configuration from environment");

// Or in functions returning Result:
let config = Config::from_env()?;


// BAD: Clone when not needed
fn process(data: &Vec<String>) {
    let cloned = data.clone();  // Unnecessary allocation
    for item in cloned.iter() {
        println!("{}", item);
    }
}

// GOOD: Use references
fn process(data: &[String]) {
    for item in data {
        println!("{}", item);
    }
}


// BAD: String concatenation in loop
let mut result = String::new();
for item in items {
    result = result + &item + ", ";  // Allocates each iteration
}

// GOOD: Use push_str or join
let result = items.join(", ");


// BAD: Blocking in async context
async fn fetch_data() {
    std::thread::sleep(Duration::from_secs(1));  // Blocks runtime!
}

// GOOD: Use async sleep
async fn fetch_data() {
    tokio::time::sleep(Duration::from_secs(1)).await;
}


// BAD: Ignoring errors silently
let _ = file.write_all(data);

// GOOD: Handle or propagate errors
file.write_all(data)?;
// Or log if truly ignorable:
if let Err(e) = file.write_all(data) {
    tracing::warn!(error = %e, "Failed to write data");
}