use hibitset::BitSet;
use std::borrow::Borrow;
use std::fmt;
use std::hash::Hash;
use std::marker::PhantomData;
use std::ops::{AddAssign, Sub};
use std::sync::{Arc, Mutex};
use uuid::Uuid;
use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};
use crate::cast::CastFrom;
#[derive(Debug, Default, Clone)]
pub struct Gen<Id: From<u64> + Default> {
id: u64,
phantom: PhantomData<Id>,
}
impl<Id: From<u64> + Default> Gen<Id> {
pub fn allocate_id(&mut self) -> Id {
let id = self.id;
self.id += 1;
id.into()
}
}
pub type IdGen = Gen<u64>;
pub trait IdGenerator:
From<u8> + AddAssign + Sub + PartialOrd + Copy + Eq + Hash + Ord + serde::Serialize + fmt::Display
{
}
impl<T> IdGenerator for T where
T: From<u8>
+ AddAssign
+ Sub
+ PartialOrd
+ Copy
+ Eq
+ Hash
+ Ord
+ serde::Serialize
+ fmt::Display
{
}
#[derive(Debug, Clone)]
pub struct IdAllocator<A: IdAllocatorInner>(pub Arc<Mutex<A>>);
pub trait IdAllocatorInner: std::fmt::Debug + Send {
const NAME: &'static str;
fn new(min: u32, max: u32, mask: u32) -> Self;
fn alloc(&mut self) -> Option<u32>;
fn remove(&mut self, id: u32);
}
#[derive(Debug, Clone)]
pub struct IdAllocatorInnerBitSet {
next: StdRng,
min: u32,
max: u32,
mask: u32,
used: BitSet,
}
impl IdAllocatorInner for IdAllocatorInnerBitSet {
const NAME: &'static str = "hibitset";
fn new(min: u32, max: u32, mask: u32) -> Self {
let total = usize::cast_from(max - min);
assert!(total < BitSet::BITS_PER_USIZE.pow(4));
IdAllocatorInnerBitSet {
next: StdRng::from_entropy(),
min,
max,
mask,
used: BitSet::new(),
}
}
fn alloc(&mut self) -> Option<u32> {
let range = self.min..=self.max;
let init = self.next.gen_range(range);
let mut next = init;
loop {
let stored = next - self.min;
if !self.used.add(stored) {
assert!(
next & self.mask == 0,
"chosen ID must not intersect with mask:\n{:#034b}\n{:#034b}",
next,
self.mask
);
return Some(next | self.mask);
}
next = if next == self.max { self.min } else { next + 1 };
if next == init {
return None;
}
}
}
fn remove(&mut self, id: u32) {
let id = (!self.mask) & id;
let stored = id - self.min;
self.used.remove(stored);
}
}
impl<A: IdAllocatorInner> IdAllocator<A> {
pub fn new(min: u32, max: u32, mask: u32) -> IdAllocator<A> {
assert!(min <= max);
if mask != 0 && max > 0 {
let mask_check = (1 << (max.ilog2() + 1)) - 1;
assert_eq!(mask & mask_check, 0, "max and mask share bits");
}
let inner = A::new(min, max, mask);
IdAllocator(Arc::new(Mutex::new(inner)))
}
pub fn alloc(&self) -> Option<IdHandle<u32, A>> {
let inner = Arc::new(internal::IdHandleInner::new(self)?);
Some(IdHandle::Dynamic(inner))
}
fn alloc_internal(&self) -> Option<u32> {
let mut inner = self.0.lock().expect("lock poisoned");
inner.alloc()
}
fn free_internal(&self, id: u32) {
let mut inner = self.0.lock().expect("lock poisoned");
inner.remove(id);
}
}
#[derive(Debug)]
pub enum IdHandle<T, A: IdAllocatorInner> {
Static(T),
Dynamic(Arc<internal::IdHandleInner<T, A>>),
}
impl<T: Clone, A: IdAllocatorInner> Clone for IdHandle<T, A> {
fn clone(&self) -> Self {
match self {
IdHandle::Static(t) => IdHandle::Static(t.clone()),
IdHandle::Dynamic(handle) => IdHandle::Dynamic(Arc::clone(handle)),
}
}
}
impl<T: IdGenerator, A: IdAllocatorInner> IdHandle<T, A> {
pub fn unhandled(&self) -> T {
*self.borrow()
}
}
impl<T: IdGenerator, A: IdAllocatorInner> PartialEq for IdHandle<T, A> {
fn eq(&self, other: &Self) -> bool {
self.unhandled() == other.unhandled()
}
}
impl<T: IdGenerator, A: IdAllocatorInner> Eq for IdHandle<T, A> {}
impl<T: IdGenerator, A: IdAllocatorInner> PartialOrd for IdHandle<T, A> {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl<T: IdGenerator, A: IdAllocatorInner> Ord for IdHandle<T, A> {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.unhandled().cmp(&other.unhandled())
}
}
impl<T, A: IdAllocatorInner> Borrow<T> for IdHandle<T, A> {
fn borrow(&self) -> &T {
match self {
IdHandle::Static(id) => id,
IdHandle::Dynamic(inner) => &inner.id,
}
}
}
impl<T: IdGenerator, A: IdAllocatorInner> fmt::Display for IdHandle<T, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.unhandled().fmt(f)
}
}
impl<T: IdGenerator, A: IdAllocatorInner> serde::Serialize for IdHandle<T, A> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
self.unhandled().serialize(serializer)
}
}
mod internal {
use std::sync::Arc;
use crate::cast::CastFrom;
use crate::id_gen::{IdAllocator, IdAllocatorInner};
#[derive(Debug)]
pub struct IdHandleInner<T, A: IdAllocatorInner> {
pub(super) allocator: IdAllocator<A>,
pub(super) id: T,
stored: u32,
}
impl<T, A: IdAllocatorInner> IdHandleInner<T, A>
where
T: CastFrom<u32>,
{
pub fn new(allocator: &IdAllocator<A>) -> Option<Self> {
let stored = allocator.alloc_internal()?;
Some(IdHandleInner {
allocator: IdAllocator(Arc::clone(&allocator.0)),
id: T::cast_from(stored),
stored,
})
}
}
impl<T, A: IdAllocatorInner> Drop for IdHandleInner<T, A> {
fn drop(&mut self) {
self.allocator.free_internal(self.stored);
}
}
}
pub const ORG_ID_OFFSET: usize = 19;
pub const MAX_ORG_ID: u32 = (1 << ORG_ID_OFFSET) - 1;
pub fn org_id_conn_bits(uuid: &Uuid) -> u32 {
let lower = uuid.as_u128();
let lower = (lower & 0xFFF) << ORG_ID_OFFSET;
let lower: u32 = lower.try_into().expect("must fit");
lower
}
pub fn conn_id_org_uuid(conn_id: u32) -> String {
const UPPER: [char; 16] = [
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F',
];
let orgid = usize::try_from((conn_id >> ORG_ID_OFFSET) & 0xFFF).expect("must cast");
let mut dst = String::with_capacity(3);
dst.push(UPPER[(orgid >> 8) & 0xf]);
dst.push(UPPER[(orgid >> 4) & 0xf]);
dst.push(UPPER[orgid & 0xf]);
dst
}
pub fn temp_id() -> String {
let temp_uuid = uuid::Uuid::new_v4().as_hyphenated().to_string();
temp_uuid.chars().rev().take_while(|c| *c != '-').collect()
}
#[cfg(test)]
mod tests {
use std::collections::BTreeMap;
use super::*;
#[crate::test]
fn test_conn_org() {
let uuid = Uuid::parse_str("9e37ec59-56f4-450a-acbd-18ff14f10ca8").unwrap();
let lower = org_id_conn_bits(&uuid);
let org_lower_uuid = conn_id_org_uuid(lower);
assert_eq!(org_lower_uuid, "CA8");
}
#[crate::test]
fn test_id_gen() {
test_ad_allocator::<IdAllocatorInnerBitSet>();
}
#[crate::test]
#[should_panic]
fn test_mask_intersect<A: IdAllocatorInner>() {
let env_lower = org_id_conn_bits(&uuid::Uuid::from_u128(u128::MAX));
let ida = IdAllocator::<IdAllocatorInnerBitSet>::new(
1 << ORG_ID_OFFSET,
1 << ORG_ID_OFFSET,
env_lower,
);
let id = ida.alloc().unwrap();
assert_eq!(id.unhandled(), (0xfff << ORG_ID_OFFSET) | MAX_ORG_ID);
}
fn test_ad_allocator<A: IdAllocatorInner>() {
test_id_alloc::<A>();
test_static_id_sorting::<A>();
test_id_reuse::<A>();
test_display::<A>();
test_map_lookup::<A>();
test_serialization::<A>();
test_mask::<A>();
test_mask_envd::<A>();
}
fn test_mask<A: IdAllocatorInner>() {
let ida = IdAllocator::<A>::new(1, 1, 0xfff << 20);
let id = ida.alloc().unwrap();
assert_eq!(id.unhandled(), (0xfff << 20) | 1);
}
fn test_mask_envd<A: IdAllocatorInner>() {
let env_lower = org_id_conn_bits(&uuid::Uuid::from_u128(u128::MAX));
let ida = IdAllocator::<A>::new(MAX_ORG_ID, MAX_ORG_ID, env_lower);
let id = ida.alloc().unwrap();
assert_eq!(id.unhandled(), (0xfff << ORG_ID_OFFSET) | MAX_ORG_ID);
}
fn test_id_alloc<A: IdAllocatorInner>() {
let ida = IdAllocator::<A>::new(3, 5, 0);
let id3 = ida.alloc().unwrap();
let id4 = ida.alloc().unwrap();
let id5 = ida.alloc().unwrap();
assert_ne!(id3, id4);
assert_ne!(id3, id5);
assert_ne!(id4, id5);
drop(id4);
let _id4 = ida.alloc().unwrap();
drop(id5);
drop(id3);
let _id5 = ida.alloc().unwrap();
let _id3 = ida.alloc().unwrap();
match ida.alloc() {
Some(id) => panic!(
"id allocator returned {}, not expected id exhaustion error",
id
),
None => (),
}
}
fn test_static_id_sorting<A: IdAllocatorInner>() {
let ida = IdAllocator::<A>::new(0, 0, 0);
let id0 = ida.alloc().unwrap();
let id1 = IdHandle::Static(1);
assert!(id0 < id1);
let ida = IdAllocator::<A>::new(1, 1, 0);
let id0 = IdHandle::Static(0);
let id1 = ida.alloc().unwrap();
assert!(id0 < id1);
}
fn test_id_reuse<A: IdAllocatorInner>() {
let allocator = IdAllocator::<A>::new(10, 11, 0);
let id_a = allocator.alloc().unwrap();
let a = id_a.unhandled();
let id_a_clone = id_a.clone();
drop(id_a);
let _id_b = allocator.alloc().unwrap();
assert!(allocator.alloc().is_none());
drop(id_a_clone);
let id_c = allocator.alloc().unwrap();
assert_eq!(id_c.unhandled(), a);
}
fn test_display<A: IdAllocatorInner>() {
let allocator = IdAllocator::<A>::new(65_000, 65_000, 0);
let id_a = allocator.alloc().unwrap();
assert_eq!(id_a.unhandled(), 65_000);
let id_display = format!("{id_a}");
let val_display = format!("{}", id_a.unhandled());
assert_eq!(id_display, val_display);
}
fn test_map_lookup<A: IdAllocatorInner>() {
let allocator = IdAllocator::<A>::new(99, 101, 0);
let id_a = allocator.alloc().unwrap();
let a = id_a.unhandled();
let mut btree = BTreeMap::new();
btree.insert(id_a, "hello world");
let entry = btree.remove(&a).unwrap();
assert_eq!(entry, "hello world");
assert!(btree.is_empty());
}
fn test_serialization<A: IdAllocatorInner>() {
let allocator = IdAllocator::<A>::new(42, 42, 0);
let id_a = allocator.alloc().unwrap();
assert_eq!(id_a.unhandled(), 42);
let id_json = serde_json::to_string(&id_a).unwrap();
let val_json = serde_json::to_string(&id_a.unhandled()).unwrap();
assert_eq!(id_json, val_json);
}
}