When to use each and cycle prevention
In Rust, ownership is usually exclusive - one owner, one value. But sometimes you need multiple owners for the same data. That's where Rc (Reference Counted) and Arc (Atomic Reference Counted) come in.
let data = vec![1, 2, 3];
let owner1 = data; // data moved here
let owner2 = data; // ERROR: data already moved!
The Solution:
use std::rc::Rc;
let data = Rc::new(vec![1, 2, 3]);
let owner1 = Rc::clone(&data); // Reference count: 2
let owner2 = Rc::clone(&data); // Reference count: 3
// All owners can read the same data
| Feature | Rc | Arc |
|---------|---------|----------|
| Thread-Safe | ā No | ā Yes |
| Performance | Faster | Slower (atomic ops) |
| Use Case | Single-threaded | Multi-threaded |
| Overhead | ~2 words | ~2 words + atomics |
| Clone Cost | ~1-2 cycles | ~10-20 cycles |
Rule of Thumb:Rc in single-threaded codeArc when sharing across threadsRc doesn't implement Send or Sync, so compiler will catch mistakesuse std::rc::Rc;
use std::cell::RefCell;
/// A tree node that can have multiple parents (shared ownership)
/// Common in DOM trees, AST, graph structures
#[derive(Debug)]
pub struct Node {
pub id: String,
pub data: String,
pub children: RefCell<Vec<Rc<Node>>>,
}
impl Node {
pub fn new(id: impl Into<String>, data: impl Into<String>) -> Rc<Self> {
Rc::new(Node {
id: id.into(),
data: data.into(),
children: RefCell::new(Vec::new()),
})
}
pub fn add_child(&self, child: Rc<Node>) {
self.children.borrow_mut().push(child);
}
pub fn print_tree(&self, depth: usize) {
println!("{}{}: {}", " ".repeat(depth), self.id, self.data);
for child in self.children.borrow().iter() {
child.print_tree(depth + 1);
}
}
}
// Build a component tree (like React)
fn build_component_tree() {
// Root component
let app = Node::new("App", "Application Root");
// Header component (shared across multiple parents)
let header = Node::new("Header", "Site Header");
// Main content
let main = Node::new("Main", "Main Content");
let article = Node::new("Article", "Blog Post");
// Build tree structure
app.add_child(Rc::clone(&header));
app.add_child(Rc::clone(&main));
main.add_child(Rc::clone(&article));
// Header can be referenced by multiple parents
let sidebar = Node::new("Sidebar", "Navigation");
sidebar.add_child(Rc::clone(&header)); // Header shared!
app.add_child(sidebar);
println!("Component tree:");
app.print_tree(0);
// Check reference count
println!("\nHeader ref count: {}", Rc::strong_count(&header)); // Should be 3
}
use std::sync::Arc;
use std::thread;
use std::time::Duration;
/// Application configuration shared across threads
#[derive(Debug, Clone)]
pub struct Config {
pub database_url: String,
pub max_connections: usize,
pub timeout_secs: u64,
pub api_keys: Vec<String>,
}
impl Config {
pub fn load() -> Arc<Self> {
Arc::new(Config {
database_url: "postgres://localhost/db".to_string(),
max_connections: 100,
timeout_secs: 30,
api_keys: vec!["key1".to_string(), "key2".to_string()],
})
}
}
/// Worker thread that uses shared config
fn worker_thread(id: usize, config: Arc<Config>) {
println!("Worker {} starting with config:", id);
println!(" DB: {}", config.database_url);
println!(" Max connections: {}", config.max_connections);
// Simulate work
thread::sleep(Duration::from_millis(100));
println!("Worker {} completed", id);
}
// Usage: Share config across thread pool
fn shared_config_example() {
let config = Config::load();
let mut handles = vec![];
// Spawn 4 worker threads, all sharing same config
for i in 0..4 {
let config_clone = Arc::clone(&config);
let handle = thread::spawn(move || {
worker_thread(i, config_clone);
});
handles.push(handle);
}
// Wait for all workers
for handle in handles {
handle.join().unwrap();
}
println!("\nConfig ref count: {}", Arc::strong_count(&config)); // Back to 1
}
use std::sync::Arc;
use std::sync::Mutex;
use std::collections::VecDeque;
use std::time::Duration;
/// Database connection (expensive to create)
pub struct DbConnection {
pub id: usize,
// In real code: TCP socket, authentication, etc.
}
impl DbConnection {
fn new(id: usize) -> Self {
println!("Creating connection {}", id);
// Simulate expensive connection setup
std::thread::sleep(Duration::from_millis(100));
DbConnection { id }
}
fn query(&self, sql: &str) -> String {
format!("Connection {}: executing {}", self.id, sql)
}
}
/// Connection pool shared across threads
pub struct ConnectionPool {
available: Mutex<VecDeque<Arc<DbConnection>>>,
max_connections: usize,
created_count: Mutex<usize>,
}
impl ConnectionPool {
pub fn new(max_connections: usize) -> Arc<Self> {
Arc::new(ConnectionPool {
available: Mutex::new(VecDeque::new()),
max_connections,
created_count: Mutex::new(0),
})
}
/// Get a connection from the pool
pub fn acquire(&self) -> Result<Arc<DbConnection>, &'static str> {
// Try to get from available pool
{
let mut available = self.available.lock().unwrap();
if let Some(conn) = available.pop_front() {
return Ok(conn);
}
}
// No available connections - create new if under limit
let mut count = self.created_count.lock().unwrap();
if *count < self.max_connections {
*count += 1;
let conn = Arc::new(DbConnection::new(*count));
Ok(conn)
} else {
Err("Pool exhausted")
}
}
/// Return connection to pool
pub fn release(&self, conn: Arc<DbConnection>) {
let mut available = self.available.lock().unwrap();
available.push_back(conn);
}
}
// Usage across threads
fn connection_pool_example() {
let pool = ConnectionPool::new(3);
let mut handles = vec![];
for i in 0..5 {
let pool_clone = Arc::clone(&pool);
let handle = std::thread::spawn(move || {
match pool_clone.acquire() {
Ok(conn) => {
println!("Thread {} got connection", i);
let result = conn.query("SELECT * FROM users");
println!("Thread {}: {}", i, result);
// Return to pool
pool_clone.release(conn);
}
Err(e) => {
println!("Thread {} failed: {}", i, e);
}
}
});
handles.push(handle);
}
for handle in handles {
handle.join().unwrap();
}
}
use std::sync::{Arc, Mutex};
// The combo pattern: Arc for shared ownership, Mutex for mutation
type SharedState<T> = Arc<Mutex<T>>;
fn create_shared_counter() -> SharedState<i32> {
Arc::new(Mutex::new(0))
}
fn increment_counter(counter: SharedState<i32>) {
let mut value = counter.lock().unwrap();
*value += 1;
}
This is the most common Arc usage pattern in production code.
use std::sync::Arc;
use std::cell::RefCell;
/// Shared configuration (immutable)
struct AppConfig {
name: String,
version: String,
}
/// Thread-local state (mutable)
struct ThreadState {
request_count: usize,
error_count: usize,
}
/// Worker with shared config + thread-local state
struct Worker {
config: Arc<AppConfig>, // Shared across threads
state: RefCell<ThreadState>, // Thread-local (not Send!)
}
impl Worker {
fn new(config: Arc<AppConfig>) -> Self {
Worker {
config,
state: RefCell::new(ThreadState {
request_count: 0,
error_count: 0,
}),
}
}
fn process_request(&self) {
println!("Processing request for {}", self.config.name);
// Modify thread-local state
let mut state = self.state.borrow_mut();
state.request_count += 1;
// Config is shared (read-only)
if state.request_count % 100 == 0 {
println!("Processed {} requests (version {})",
state.request_count,
self.config.version);
}
}
}
// Simplified Rc implementation
struct Rc<T> {
ptr: *const RcBox<T>,
}
struct RcBox<T> {
strong_count: Cell<usize>, // Reference count
weak_count: Cell<usize>, // Weak reference count
value: T, // The actual data
}
impl<T> Clone for Rc<T> {
fn clone(&self) -> Self {
// Increment reference count (1-2 CPU cycles)
unsafe {
(*self.ptr).strong_count.set(
(*self.ptr).strong_count.get() + 1
);
}
// Return new Rc pointing to same data
Rc { ptr: self.ptr }
}
}
struct ArcBox<T> {
strong_count: AtomicUsize, // Thread-safe counter
weak_count: AtomicUsize,
value: T,
}
// Arc::clone() is ~10x slower than Rc::clone() due to atomics
// But still very fast (~10-20 cycles)
Stack: Heap:
āāāāāāāāāāā āāāāāāāāāāāāāāāāāāāā
ā Rc<T> āāāāāāāāāāāāŗ ā strong_count: 3 ā
ā ptr ā ā weak_count: 0 ā
āāāāāāāāāāā ā value: T ā
āāāāāāāāāāāāāāāāāāāā
ā²
āāāāāāāāāāā ā
ā Rc<T> āāāāāāāāāāāāāāāāāāāāāā
ā ptr ā
āāāāāāāāāāā
Size:
Rc = one pointer (8 bytes on 64-bit)RcBox = 2 counters + T (~16 bytes + sizeof(T))use std::rc::Rc;
use std::cell::RefCell;
struct Node {
next: Option<Rc<RefCell<Node>>>,
}
// Create a cycle
let node1 = Rc::new(RefCell::new(Node { next: None }));
let node2 = Rc::new(RefCell::new(Node { next: Some(Rc::clone(&node1)) }));
node1.borrow_mut().next = Some(Rc::clone(&node2));
// Cycle: node1 -> node2 -> node1
// Both have ref count 2, neither will be dropped!
Solution: Use Weak to break cycles (covered in next pattern)
// BAD: Using Arc in single-threaded code
use std::sync::Arc;
fn single_threaded() {
let data = Arc::new(vec![1, 2, 3]); // Unnecessary atomics!
// ...
}
// GOOD: Use Rc for single-threaded
use std::rc::Rc;
fn single_threaded() {
let data = Rc::new(vec![1, 2, 3]); // Faster!
// ...
}
// BAD: Cloning Arc in tight loop
for _ in 0..1_000_000 {
let clone = Arc::clone(&data);
// Atomic increment/decrement on every iteration!
}
// GOOD: Clone once, reuse
let data_ref = Arc::clone(&data);
for _ in 0..1_000_000 {
// Use data_ref
}
// BAD: Mutex when data is read-only
let config = Arc::new(Mutex::new(Config { ... }));
// Every access requires locking:
let data = config.lock().unwrap();
// GOOD: No Mutex for immutable data
let config = Arc::new(Config { ... });
// Direct access, no locking:
println!("{}", config.database_url);
// Useful for debugging leaks
let data = Rc::new(vec![1, 2, 3]);
let clone1 = Rc::clone(&data);
let clone2 = Rc::clone(&data);
println!("Strong count: {}", Rc::strong_count(&data)); // 3
// If this is unexpectedly high, you might have a leak!
&mut T or Box| Operation | Rc
|-----------|-------|--------|--------|
| Clone | ~2 cycles | ~10-20 cycles | N/A (can't clone) |
| Deref | 1 cycle | 1 cycle | 1 cycle |
| Drop | ~2 cycles | ~10-20 cycles | 0 cycles |
| Memory | +16 bytes | +16 bytes | +0 bytes |
| Thread-Safe | ā | ā | ā |
Conclusion:Build a read-only cache shared across multiple components using Rc.
Hints:Rc> Create a metrics collector shared across thread pool using Arc.
Hints:Arc>> Implement a generic resource pool using Arc.
Hints:Arc>>> ArcArc backHeavily uses Arc for sharing runtime state across tasks.
View on GitHubUses Arc for application state and middleware.
View on GitHubUses Rc/Arc for DOM nodes and layout trees.
View on GitHubRun this code in the official Rust Playground