should be good to extend functionality now

This commit is contained in:
psun256
2025-12-08 14:31:59 -05:00
parent 07cb45fa73
commit a3f50c1f0a
8 changed files with 174 additions and 84 deletions

View File

@@ -1,71 +1,58 @@
use std::collections::HashMap;
use core::fmt;
use std::net::SocketAddr;
use std::sync::RwLock;
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
pub struct BackendPool {
pub backends: Arc<RwLock<HashMap<String, Arc<Backend>>>>,
}
use std::sync::atomic::{AtomicUsize, Ordering};
#[derive(Debug)]
pub struct Backend {
pub id: String,
pub address: SocketAddr,
pub is_healthy: AtomicBool, // no clue how this should work, for now
pub current_load: AtomicUsize, // no clue how this should work, for now
}
impl BackendPool {
pub fn new(initial_backends: Vec<Arc<Backend>>) -> Self {
let mut map = HashMap::new();
for backend in initial_backends {
map.insert(backend.id.clone(), backend);
}
Self {
backends: Arc::new(RwLock::new(map)),
}
}
pub fn add_backend(&self, backend: Arc<Backend>) {
let mut backends_guard = self.backends
.write()
.expect("BackendPool lock poisoned");
// let backends_guard = self.backends.read().unwrap_or_else(|poisoned| poisoned.into_inner());
backends_guard.insert(backend.id.clone(), backend);
}
pub fn get_backend(&self, id: &str) -> Option<Arc<Backend>> {
let backends_guard = self.backends
.read()
.expect("BackendPool lock poisoned");
// let backends_guard = self.backends.read().unwrap_or_else(|poisoned| poisoned.into_inner());
backends_guard.get(id).cloned()
}
pub fn bruh_amogus_sus(&self) {
for k in self.backends.read().unwrap().keys() {
self.backends.write().unwrap().get(k).unwrap().increment_current_load();
}
}
pub active_connections: AtomicUsize,
}
impl Backend {
pub fn new(id: String, address: SocketAddr) -> Self {
Self {
id: id,
address: address,
is_healthy: AtomicBool::new(false),
current_load: AtomicUsize::new(0),
id: id.to_string(),
address,
active_connections: AtomicUsize::new(0),
}
}
pub fn increment_current_load(&self) {
self.current_load.fetch_add(1, Ordering::SeqCst);
// Ordering::Relaxed means the ops could be in any order, but since this
// is just a metric, and we assume the underlying system is sane
// enough not to behave poorly, so SeqCst is probably overkill.
pub fn inc_connections(&self) {
self.active_connections.fetch_add(1, Ordering::Relaxed);
println!("{} has {} connections open", self.id, self.active_connections.load(Ordering::Relaxed));
}
pub fn decrement_current_load(&self) {
self.current_load.fetch_sub(1, Ordering::SeqCst);
pub fn dec_connections(&self) {
self.active_connections.fetch_sub(1, Ordering::Relaxed);
println!("{} has {} connections open", self.id, self.active_connections.load(Ordering::Relaxed));
}
}
impl fmt::Display for Backend {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{} ({})", self.address, self.id)
}
}
#[derive(Clone, Debug)]
pub struct BackendPool {
pub backends: Arc<RwLock<Vec<Arc<Backend>>>>,
}
impl BackendPool {
pub fn new() -> Self {
BackendPool {
backends: Arc::new(RwLock::new(Vec::new())),
}
}
pub fn add(&self, backend: Backend) {
self.backends.write().unwrap().push(Arc::new(backend));
}
}