Update bnum to 0.14.3 (#45)

Rather involved update due to it being the foundation of the physics.

Reviewed-on: #45
Co-authored-by: Rhys Lloyd <krakow20@gmail.com>
Co-committed-by: Rhys Lloyd <krakow20@gmail.com>
This commit was merged in pull request #45.
This commit is contained in:
2026-03-24 18:02:17 +00:00
committed by Rhys Lloyd
parent 9b6be8bc68
commit 48316a677a
22 changed files with 771 additions and 579 deletions

View File

@@ -1,21 +1,22 @@
use strafesnet_common::integer::Planar64;
use strafesnet_common::{model,integer};
use strafesnet_common::integer::{vec3::Vector3,Fixed,Ratio};
use strafesnet_common::integer::fixed_types::F192_96;
use strafesnet_common::integer::vec3::Vector3;
use strafesnet_common::integer::{Planar64,Planar64Vec3,Ratio};
use crate::{valve_transform_normal,valve_transform_dist};
#[derive(Hash,Eq,PartialEq)]
struct Face{
normal:integer::Planar64Vec3,
normal:Planar64Vec3,
dot:Planar64,
}
#[derive(Debug)]
struct Faces{
faces:Vec<Vec<integer::Planar64Vec3>>,
faces:Vec<Vec<Planar64Vec3>>,
}
fn solve3(c0:&Face,c1:&Face,c2:&Face)->Option<Ratio<Vector3<Fixed<3,96>>,Fixed<3,96>>>{
fn solve3(c0:&Face,c1:&Face,c2:&Face)->Option<Ratio<Vector3<F192_96>,F192_96>>{
let n0_n1=c0.normal.cross(c1.normal);
let det=c2.normal.dot(n0_n1);
if det.abs().is_zero(){
@@ -82,12 +83,12 @@ fn planes_to_faces(face_list:std::collections::HashSet<Face>)->Result<Faces,Plan
// test if any *other* faces occlude the intersection
for new_face in &face_list{
// new face occludes intersection point
if (new_face.dot.widen_2()/Planar64::ONE).lt_ratio(new_face.normal.dot(intersection.num)/intersection.den){
if (new_face.dot.widen_128()/Planar64::ONE).lt_ratio(new_face.normal.dot(intersection.num)/intersection.den){
// replace one of the faces with the new face
// dont' try to replace face0 because we are exploring that face in particular
if let Some(new_intersection)=solve3(face0,new_face,face2){
// face1 does not occlude (or intersect) the new intersection
if (face1.dot.widen_2()/Planar64::ONE).gt_ratio(face1.normal.dot(new_intersection.num)/new_intersection.den){
if (face1.dot.widen_128()/Planar64::ONE).gt_ratio(face1.normal.dot(new_intersection.num)/new_intersection.den){
face1=new_face;
intersection=new_intersection;
continue 'find;
@@ -95,7 +96,7 @@ fn planes_to_faces(face_list:std::collections::HashSet<Face>)->Result<Faces,Plan
}
if let Some(new_intersection)=solve3(face0,face1,new_face){
// face2 does not occlude (or intersect) the new intersection
if (face2.dot.widen_2()/Planar64::ONE).gt_ratio(face2.normal.dot(new_intersection.num)/new_intersection.den){
if (face2.dot.widen_128()/Planar64::ONE).gt_ratio(face2.normal.dot(new_intersection.num)/new_intersection.den){
face2=new_face;
intersection=new_intersection;
continue 'find;
@@ -120,7 +121,7 @@ fn planes_to_faces(face_list:std::collections::HashSet<Face>)->Result<Faces,Plan
continue;
}
// new_face occludes intersection meaning intersection is not on convex solid and face0 is degenrate
if (new_face.dot.widen_2()/Planar64::ONE).lt_ratio(new_face.normal.dot(intersection.num)/intersection.den){
if (new_face.dot.widen_128()/Planar64::ONE).lt_ratio(new_face.normal.dot(intersection.num)/intersection.den){
// abort! reject face0 entirely
continue 'face;
}
@@ -138,7 +139,7 @@ fn planes_to_faces(face_list:std::collections::HashSet<Face>)->Result<Faces,Plan
loop{
// push point onto vertices
// problem: this may push a vertex that does not fit in the fixed point range and is thus meaningless
face.push(intersection.divide().narrow_1().unwrap());
face.push(intersection.divide().narrow_64().unwrap());
// we looped back around to face1, we're done!
if core::ptr::eq(face1,face2){
@@ -204,7 +205,7 @@ impl std::fmt::Display for BrushToMeshError{
}
impl core::error::Error for BrushToMeshError{}
pub fn faces_to_mesh(faces:Vec<Vec<integer::Planar64Vec3>>)->model::Mesh{
pub fn faces_to_mesh(faces:Vec<Vec<Planar64Vec3>>)->model::Mesh{
// generate the mesh
let mut mb=model::MeshBuilder::new();
let color=mb.acquire_color_id(glam::Vec4::ONE);

View File

@@ -62,7 +62,7 @@ impl Aabb{
self.min.map_zip(self.max,|(min,max)|min.midpoint(max))
}
#[inline]
pub fn area_weight(&self)->fixed_wide::fixed::Fixed<2,64>{
pub fn area_weight(&self)->fixed_wide::types::F128_64{
let d=self.max-self.min;
d.x*d.y+d.y*d.z+d.z*d.x
}

View File

@@ -66,7 +66,7 @@ impl JumpImpulse{
_mass:Planar64,
)->Planar64Vec3{
match self{
&JumpImpulse::Time(time)=>velocity-(*gravity*time).map(|t|t.divide().clamp_1()),
&JumpImpulse::Time(time)=>velocity-(*gravity*time).map(|t|t.divide().clamp_64()),
&JumpImpulse::Height(height)=>{
//height==-v.y*v.y/(2*g.y);
//use energy to determine max height
@@ -74,10 +74,10 @@ impl JumpImpulse{
let g=gg.sqrt();
let v_g=gravity.dot(velocity);
//do it backwards
let radicand=v_g*v_g+(g*height*2).widen_4();
velocity-(*gravity*(radicand.sqrt().wrap_2()+v_g)/gg).divide().clamp_1()
let radicand=v_g*v_g+(g*height*2).widen_256();
velocity-(*gravity*(radicand.sqrt().wrap_128()+v_g)/gg).divide().clamp_64()
},
&JumpImpulse::Linear(jump_speed)=>velocity+(jump_dir*jump_speed/jump_dir.length()).divide().clamp_1(),
&JumpImpulse::Linear(jump_speed)=>velocity+(jump_dir*jump_speed/jump_dir.length()).divide().clamp_64(),
&JumpImpulse::Energy(_energy)=>{
//calculate energy
//let e=gravity.dot(velocity);
@@ -91,10 +91,10 @@ impl JumpImpulse{
pub fn get_jump_deltav(&self,gravity:&Planar64Vec3,mass:Planar64)->Planar64{
//gravity.length() is actually the proper calculation because the jump is always opposite the gravity direction
match self{
&JumpImpulse::Time(time)=>(gravity.length().wrap_1()*time/2).divide().clamp_1(),
&JumpImpulse::Height(height)=>(gravity.length()*height*2).sqrt().wrap_1(),
&JumpImpulse::Time(time)=>(gravity.length().wrap_64()*time/2).divide().clamp_64(),
&JumpImpulse::Height(height)=>(gravity.length()*height*2).sqrt().wrap_64(),
&JumpImpulse::Linear(deltav)=>deltav,
&JumpImpulse::Energy(energy)=>(energy.sqrt()*2/mass.sqrt()).divide().clamp_1(),
&JumpImpulse::Energy(energy)=>(energy.sqrt()*2/mass.sqrt()).divide().clamp_64(),
}
}
}
@@ -126,10 +126,10 @@ impl JumpSettings{
None=>rel_velocity,
};
let j=boost_vel.dot(jump_dir);
let js=jump_speed.widen_2();
let js=jump_speed.widen_128();
if j<js{
//weak booster: just do a regular jump
boost_vel+jump_dir.with_length(js-j).divide().wrap_1()
boost_vel+jump_dir.with_length(js-j).divide().wrap_64()
}else{
//activate booster normally, jump does nothing
boost_vel
@@ -142,13 +142,13 @@ impl JumpSettings{
None=>rel_velocity,
};
let j=boost_vel.dot(jump_dir);
let js=jump_speed.widen_2();
let js=jump_speed.widen_128();
if j<js{
//speed in direction of jump cannot be lower than amount
boost_vel+jump_dir.with_length(js-j).divide().wrap_1()
boost_vel+jump_dir.with_length(js-j).divide().wrap_64()
}else{
//boost and jump add together
boost_vel+jump_dir.with_length(js).divide().wrap_1()
boost_vel+jump_dir.with_length(js).divide().wrap_64()
}
}
(false,JumpCalculation::Max)=>{
@@ -159,10 +159,10 @@ impl JumpSettings{
None=>rel_velocity,
};
let boost_dot=boost_vel.dot(jump_dir);
let js=jump_speed.widen_2();
let js=jump_speed.widen_128();
if boost_dot<js{
//weak boost is extended to jump speed
boost_vel+jump_dir.with_length(js-boost_dot).divide().wrap_1()
boost_vel+jump_dir.with_length(js-boost_dot).divide().wrap_64()
}else{
//activate booster normally, jump does nothing
boost_vel
@@ -174,7 +174,7 @@ impl JumpSettings{
Some(booster)=>booster.boost(rel_velocity),
None=>rel_velocity,
};
boost_vel+jump_dir.with_length(jump_speed).divide().wrap_1()
boost_vel+jump_dir.with_length(jump_speed).divide().wrap_64()
},
}
}
@@ -267,9 +267,9 @@ pub struct StrafeSettings{
impl StrafeSettings{
pub fn tick_velocity(&self,velocity:Planar64Vec3,control_dir:Planar64Vec3)->Option<Planar64Vec3>{
let d=velocity.dot(control_dir);
let mv=self.mv.widen_2();
let mv=self.mv.widen_128();
match d<mv{
true=>Some(velocity+(control_dir*self.air_accel_limit.map_or(mv-d,|limit|limit.widen_2().min(mv-d))).wrap_1()),
true=>Some(velocity+(control_dir*self.air_accel_limit.map_or(mv-d,|limit|limit.widen_128().min(mv-d))).wrap_64()),
false=>None,
}
}
@@ -290,7 +290,7 @@ pub struct PropulsionSettings{
}
impl PropulsionSettings{
pub fn acceleration(&self,control_dir:Planar64Vec3)->Planar64Vec3{
(control_dir*self.magnitude).clamp_1()
(control_dir*self.magnitude).clamp_64()
}
}
@@ -310,13 +310,13 @@ pub struct WalkSettings{
impl WalkSettings{
pub fn accel(&self,target_diff:Planar64Vec3,gravity:Planar64Vec3)->Planar64{
//TODO: fallible walk accel
let diff_len=target_diff.length().wrap_1();
let diff_len=target_diff.length().wrap_64();
let friction=if diff_len<self.accelerate.topspeed{
self.static_friction
}else{
self.kinetic_friction
};
self.accelerate.accel.min((-gravity.y*friction).clamp_1())
self.accelerate.accel.min((-gravity.y*friction).clamp_64())
}
pub fn get_walk_target_velocity(&self,control_dir:Planar64Vec3,normal:Planar64Vec3)->Planar64Vec3{
if control_dir==crate::integer::vec3::zero(){
@@ -332,7 +332,7 @@ impl WalkSettings{
if cr==crate::integer::vec3::zero(){
crate::integer::vec3::zero()
}else{
(cr.cross(normal)*self.accelerate.topspeed/((nn*(nnmm-dd)).sqrt())).divide().clamp_1()
(cr.cross(normal)*self.accelerate.topspeed/((nn*(nnmm-dd)).sqrt())).divide().clamp_64()
}
}else{
crate::integer::vec3::zero()
@@ -341,7 +341,7 @@ impl WalkSettings{
pub fn is_slope_walkable(&self,normal:Planar64Vec3,up:Planar64Vec3)->bool{
//normal is not guaranteed to be unit length
let ny=normal.dot(up);
let h=normal.length().wrap_1();
let h=normal.length().wrap_64();
//remember this is a normal vector
ny.is_positive()&&h*self.surf_dot<ny
}
@@ -368,13 +368,13 @@ impl LadderSettings{
let nnmm=nn*mm;
let d=normal.dot(control_dir);
let mut dd=d*d;
if (self.dot*self.dot*nnmm).clamp_4()<dd{
if (self.dot*self.dot*nnmm).clamp_256()<dd{
if d.is_negative(){
control_dir=Planar64Vec3::new([Planar64::ZERO,mm.clamp_1(),Planar64::ZERO]);
control_dir=Planar64Vec3::new([Planar64::ZERO,mm.clamp_64(),Planar64::ZERO]);
}else{
control_dir=Planar64Vec3::new([Planar64::ZERO,-mm.clamp_1(),Planar64::ZERO]);
control_dir=Planar64Vec3::new([Planar64::ZERO,-mm.clamp_64(),Planar64::ZERO]);
}
dd=(normal.y*normal.y).widen_4();
dd=(normal.y*normal.y).widen_256();
}
//n=d if you are standing on top of a ladder and press E.
//two fixes:
@@ -385,7 +385,7 @@ impl LadderSettings{
if cr==crate::integer::vec3::zero(){
crate::integer::vec3::zero()
}else{
(cr.cross(normal)*self.accelerate.topspeed/((nn*(nnmm-dd)).sqrt())).divide().clamp_1()
(cr.cross(normal)*self.accelerate.topspeed/((nn*(nnmm-dd)).sqrt())).divide().clamp_64()
}
}else{
crate::integer::vec3::zero()
@@ -417,7 +417,7 @@ impl Hitbox{
}
pub fn source()->Self{
Self{
halfsize:((int3(33,73,33)>>1)*VALVE_SCALE).narrow_1().unwrap(),
halfsize:((int3(33,73,33)>>1)*VALVE_SCALE).narrow_64().unwrap(),
mesh:HitboxMesh::Box,
}
}
@@ -538,11 +538,11 @@ impl StyleModifiers{
tick_rate:Ratio64::new(100,AbsoluteTime::ONE_SECOND.get() as u64).unwrap(),
}),
jump:Some(JumpSettings{
impulse:JumpImpulse::Height((int(52)*VALVE_SCALE).narrow_1().unwrap()),
impulse:JumpImpulse::Height((int(52)*VALVE_SCALE).narrow_64().unwrap()),
calculation:JumpCalculation::JumpThenBoost,
limit_minimum:true,
}),
gravity:(int3(0,-800,0)*VALVE_SCALE).narrow_1().unwrap(),
gravity:(int3(0,-800,0)*VALVE_SCALE).narrow_64().unwrap(),
mass:int(1),
rocket:None,
walk:Some(WalkSettings{
@@ -565,7 +565,7 @@ impl StyleModifiers{
magnitude:int(12),//?
}),
hitbox:Hitbox::source(),
camera_offset:((int3(0,64,0)-(int3(0,73,0)>>1))*VALVE_SCALE).narrow_1().unwrap(),
camera_offset:((int3(0,64,0)-(int3(0,73,0)>>1))*VALVE_SCALE).narrow_64().unwrap(),
}
}
pub fn source_surf()->Self{
@@ -574,16 +574,16 @@ impl StyleModifiers{
controls_mask_state:Controls::all(),
strafe:Some(StrafeSettings{
enable:ControlsActivation::full_2d(),
air_accel_limit:Some((int(150)*66*VALVE_SCALE).narrow_1().unwrap()),
air_accel_limit:Some((int(150)*66*VALVE_SCALE).narrow_64().unwrap()),
mv:Planar64::raw(30<<28),
tick_rate:Ratio64::new(66,AbsoluteTime::ONE_SECOND.get() as u64).unwrap(),
}),
jump:Some(JumpSettings{
impulse:JumpImpulse::Height((int(52)*VALVE_SCALE).narrow_1().unwrap()),
impulse:JumpImpulse::Height((int(52)*VALVE_SCALE).narrow_64().unwrap()),
calculation:JumpCalculation::JumpThenBoost,
limit_minimum:true,
}),
gravity:(int3(0,-800,0)*VALVE_SCALE).narrow_1().unwrap(),
gravity:(int3(0,-800,0)*VALVE_SCALE).narrow_64().unwrap(),
mass:int(1),
rocket:None,
walk:Some(WalkSettings{
@@ -606,7 +606,7 @@ impl StyleModifiers{
magnitude:int(12),//?
}),
hitbox:Hitbox::source(),
camera_offset:((int3(0,64,0)-(int3(0,73,0)>>1))*VALVE_SCALE).narrow_1().unwrap(),
camera_offset:((int3(0,64,0)-(int3(0,73,0)>>1))*VALVE_SCALE).narrow_64().unwrap(),
}
}
}

View File

@@ -1,6 +1,12 @@
pub use fixed_wide::fixed::*;
pub use ratio_ops::ratio::{Ratio,Divide,Parity};
pub mod fixed_types{
pub use fixed_wide::types::*;
}
use fixed_wide::types::F128_64;
//integer units
/// specific example of a "default" time type
@@ -68,7 +74,7 @@ impl<T> Time<T>{
impl<T> From<Planar64> for Time<T>{
#[inline]
fn from(value:Planar64)->Self{
Self::raw((value*Planar64::raw(1_000_000_000)).clamp_1().to_raw())
Self::raw((value*Planar64::raw(1_000_000_000)).clamp_64().to_raw())
}
}
impl<T> From<Time<T>> for Ratio<Planar64,Planar64>{
@@ -134,10 +140,10 @@ impl_time_additive_assign_operator!(core::ops::AddAssign,add_assign);
impl_time_additive_assign_operator!(core::ops::SubAssign,sub_assign);
impl_time_additive_assign_operator!(core::ops::RemAssign,rem_assign);
impl<T> std::ops::Mul for Time<T>{
type Output=Ratio<Fixed<2,64>,Fixed<2,64>>;
type Output=Ratio<F128_64,F128_64>;
#[inline]
fn mul(self,rhs:Self)->Self::Output{
Ratio::new(Fixed::raw(self.0)*Fixed::raw(rhs.0),Fixed::raw_digit(1_000_000_000i64.pow(2)))
Ratio::new(Fixed::raw(self.0)*Fixed::raw(rhs.0),Fixed::from_u64(1_000_000_000u64.pow(2)))
}
}
macro_rules! impl_time_i64_rhs_operator {
@@ -156,7 +162,7 @@ impl_time_i64_rhs_operator!(Mul,mul);
impl_time_i64_rhs_operator!(Shr,shr);
impl_time_i64_rhs_operator!(Shl,shl);
impl<T> core::ops::Mul<Time<T>> for Planar64{
type Output=Ratio<Fixed<2,64>,Planar64>;
type Output=Ratio<F128_64,Planar64>;
#[inline]
fn mul(self,rhs:Time<T>)->Self::Output{
Ratio::new(self*Fixed::raw(rhs.0),Planar64::raw(1_000_000_000))
@@ -177,6 +183,7 @@ impl<T> From<Time<T>> for f64{
#[cfg(test)]
mod test_time{
use super::*;
use fixed_wide::types::F64_32;
type Time=AbsoluteTime;
#[test]
fn time_from_planar64(){
@@ -191,13 +198,13 @@ mod test_time{
#[test]
fn time_squared(){
let a=Time::from_secs(2);
assert_eq!(a*a,Ratio::new(Fixed::<2,64>::raw_digit(1_000_000_000i64.pow(2))*4,Fixed::<2,64>::raw_digit(1_000_000_000i64.pow(2))));
assert_eq!(a*a,Ratio::new(F128_64::from_u64(1_000_000_000u64.pow(2))*4,F128_64::from_u64(1_000_000_000u64.pow(2))));
}
#[test]
fn time_times_planar64(){
let a=Time::from_secs(2);
let b=Planar64::from(2);
assert_eq!(b*a,Ratio::new(Fixed::<2,64>::raw_digit(1_000_000_000*(1<<32))<<2,Fixed::<1,32>::raw_digit(1_000_000_000)));
assert_eq!(b*a,Ratio::new(F128_64::from_u64(1_000_000_000*(1<<32))<<2,F64_32::from_u64(1_000_000_000)));
}
}
@@ -565,8 +572,8 @@ fn angle_sin_cos(){
println!("cordic s={} c={}",(s/h).divide(),(c/h).divide());
let (fs,fc)=f.sin_cos();
println!("float s={} c={}",fs,fc);
assert!(close_enough((c/h).divide().wrap_1(),Planar64::raw((fc*((1u64<<32) as f64)) as i64)));
assert!(close_enough((s/h).divide().wrap_1(),Planar64::raw((fs*((1u64<<32) as f64)) as i64)));
assert!(close_enough((c/h).divide().wrap_64(),Planar64::raw((fc*((1u64<<32) as f64)) as i64)));
assert!(close_enough((s/h).divide().wrap_64(),Planar64::raw((fs*((1u64<<32) as f64)) as i64)));
}
test_angle(1.0);
test_angle(std::f64::consts::PI/4.0);
@@ -598,7 +605,7 @@ impl TryFrom<[f32;3]> for Unit32Vec3{
*/
pub type Planar64TryFromFloatError=FixedFromFloatError;
pub type Planar64=fixed_wide::types::I32F32;
pub type Planar64=fixed_wide::types::F64_32;
pub type Planar64Vec3=linear_ops::types::Vector3<Planar64>;
pub type Planar64Mat3=linear_ops::types::Matrix3<Planar64>;
pub mod vec3{
@@ -677,8 +684,8 @@ pub mod mat3{
let (yc,ys)=y.cos_sin();
Planar64Mat3::from_cols([
Planar64Vec3::new([xc,Planar64::ZERO,-xs]),
Planar64Vec3::new([(xs*ys).wrap_1(),yc,(xc*ys).wrap_1()]),
Planar64Vec3::new([(xs*yc).wrap_1(),-ys,(xc*yc).wrap_1()]),
Planar64Vec3::new([(xs*ys).wrap_64(),yc,(xc*ys).wrap_64()]),
Planar64Vec3::new([(xs*yc).wrap_64(),-ys,(xc*yc).wrap_64()]),
])
}
#[inline]
@@ -719,8 +726,8 @@ impl Planar64Affine3{
}
}
#[inline]
pub fn transform_point3(&self,point:Planar64Vec3)->vec3::Vector3<Fixed<2,64>>{
self.translation.widen_2()+self.matrix3*point
pub fn transform_point3(&self,point:Planar64Vec3)->vec3::Vector3<F128_64>{
self.translation.widen_128()+self.matrix3*point
}
}
impl Into<glam::Mat4> for Planar64Affine3{

View File

@@ -14,7 +14,7 @@ wide-mul=[]
zeroes=["dep:arrayvec"]
[dependencies]
bnum = "0.13.0"
bnum = "0.14.3"
arrayvec = { version = "0.7.6", optional = true }
paste = "1.0.15"
ratio_ops = { workspace = true, optional = true }

View File

@@ -1,54 +1,58 @@
use bnum::{BInt,cast::As};
use bnum::{Int,cast::As,n};
const BNUM_DIGIT_WIDTH:usize=64;
pub(crate)const BNUM_DIGIT_WIDTH:usize=8;
const DIGIT_SHIFT:u32=BNUM_DIGIT_WIDTH.ilog2();
#[derive(Clone,Copy,Default,Hash,PartialEq,Eq,PartialOrd,Ord)]
/// A Fixed point number for which multiply operations widen the bits in the output. (when the wide-mul feature is enabled)
/// N is the number of u64s to use
/// F is the number of fractional bits (always N*32 lol)
/// N is the number of u8s to use
/// F is the number of fractional bits (currently always N*8/2)
pub struct Fixed<const N:usize,const F:usize>{
bits:BInt<{N}>,
bits:Int<N>,
}
impl<const N:usize,const F:usize> Fixed<N,F>{
pub const MAX:Self=Self::from_bits(BInt::<N>::MAX);
pub const MIN:Self=Self::from_bits(BInt::<N>::MIN);
pub const ZERO:Self=Self::from_bits(BInt::<N>::ZERO);
pub const EPSILON:Self=Self::from_bits(BInt::<N>::ONE);
pub const NEG_EPSILON:Self=Self::from_bits(BInt::<N>::NEG_ONE);
pub const ONE:Self=Self::from_bits(BInt::<N>::ONE.shl(F as u32));
pub const TWO:Self=Self::from_bits(BInt::<N>::TWO.shl(F as u32));
pub const HALF:Self=Self::from_bits(BInt::<N>::ONE.shl(F as u32-1));
pub const NEG_ONE:Self=Self::from_bits(BInt::<N>::NEG_ONE.shl(F as u32));
pub const NEG_TWO:Self=Self::from_bits(BInt::<N>::NEG_TWO.shl(F as u32));
pub const NEG_HALF:Self=Self::from_bits(BInt::<N>::NEG_ONE.shl(F as u32-1));
pub const MAX:Self=Self::from_bits(Int::<N>::MAX);
pub const MIN:Self=Self::from_bits(Int::<N>::MIN);
pub const ZERO:Self=Self::from_bits(n!(0));
pub const EPSILON:Self=Self::from_bits(n!(1));
pub const NEG_EPSILON:Self=Self::from_bits(n!(-1));
pub const ONE:Self=Self::from_bits(n!(1).shl(F as u32));
pub const TWO:Self=Self::from_bits(n!(2).shl(F as u32));
pub const HALF:Self=Self::from_bits(n!(1).shl(F as u32-1));
pub const NEG_ONE:Self=Self::from_bits(n!(-1).shl(F as u32));
pub const NEG_TWO:Self=Self::from_bits(n!(-2).shl(F as u32));
pub const NEG_HALF:Self=Self::from_bits(n!(-1).shl(F as u32-1));
}
impl<const N:usize,const F:usize> Fixed<N,F>{
#[inline]
pub const fn from_bits(bits:BInt::<N>)->Self{
pub const fn from_bits(bits:Int::<N>)->Self{
Self{
bits,
}
}
#[inline]
pub const fn to_bits(self)->BInt<N>{
pub const fn to_bits(self)->Int<N>{
self.bits
}
#[inline]
pub const fn as_bits(&self)->&BInt<N>{
pub const fn as_bits(&self)->&Int<N>{
&self.bits
}
#[inline]
pub const fn as_bits_mut(&mut self)->&mut BInt<N>{
pub const fn as_bits_mut(&mut self)->&mut Int<N>{
&mut self.bits
}
#[inline]
pub const fn raw_digit(value:i64)->Self{
let mut digits=[0u64;N];
digits[0]=value.abs() as u64;
//sign bit
digits[N-1]|=(value&i64::MIN) as u64;
Self::from_bits(BInt::from_bits(bnum::BUint::from_digits(digits)))
pub const fn from_u64(value:u64)->Self{
let mut digits=Self::ZERO;
let bytes=value.to_ne_bytes();
let mut digit=0;
while digit<N&&digit<bytes.len(){
digits.as_bits_mut().as_bytes_mut()[digit]=bytes[digit];
digit+=1;
}
digits
}
#[inline]
pub const fn is_zero(self)->bool{
@@ -99,26 +103,25 @@ impl<const N:usize,const F:usize> Fixed<N,F>{
}
}
}
impl<const F:usize> Fixed<1,F>{
impl<const F:usize> Fixed<{64/BNUM_DIGIT_WIDTH},F>{
/// My old code called this function everywhere so let's provide it
#[inline]
pub const fn raw(value:i64)->Self{
Self::from_bits(BInt::from_bits(bnum::BUint::from_digit(value as u64)))
Self::from_bits(Int::from_bytes(value.to_ne_bytes()))
}
#[inline]
pub const fn to_raw(self)->i64{
let &[digit]=self.to_bits().to_bits().digits();
digit as i64
i64::from_le_bytes(self.to_bits().to_bytes())
}
}
macro_rules! impl_from {
macro_rules! impl_from{
($($from:ty),*)=>{
$(
impl<const N:usize,const F:usize> From<$from> for Fixed<N,F>{
#[inline]
fn from(value:$from)->Self{
Self::from_bits(BInt::<{N}>::from(value)<<F as u32)
Self::from_bits(value.as_::<Int::<{N}>>()<<F as u32)
}
}
)*
@@ -147,84 +150,153 @@ impl<const N:usize,const F:usize> std::iter::Sum for Fixed<N,F>{
}
}
const fn signed_shift(lhs:u64,rhs:i32)->u64{
if rhs.is_negative(){
lhs>>-rhs
}else{
lhs<<rhs
}
}
macro_rules! impl_into_float {
( $output: ty, $unsigned:ty, $exponent_bits:expr, $mantissa_bits:expr ) => {
macro_rules! impl_into_float{
($output:ty,$unsigned:ty,$mantissa_msb:expr,$bias:expr) => {
impl<const N:usize,const F:usize> Into<$output> for Fixed<N,F>{
#[inline]
fn into(self)->$output{
const DIGIT_SHIFT:u32=6;//Log2[64]
// SBBB BBBB
// 1001 1110 0000 0000
let sign=if self.bits.is_negative(){(1 as $unsigned)<<(<$unsigned>::BITS-1)}else{0};
let unsigned=self.bits.unsigned_abs();
let most_significant_bit=unsigned.bits();
let exp=if unsigned.is_zero(){
0
}else{
let msb=most_significant_bit as $unsigned;
let _127=((1 as $unsigned)<<($exponent_bits-1))-1;
let msb_offset=msb+_127-1-F as $unsigned;
msb_offset<<($mantissa_bits-1)
// most_significant_bit is the "index" of the most significant bit.
// 0b0000_0000.msb()==0 (but we just special case return 0.0)
// 0b0000_0001.msb()==0
// 0b1000_0000.msb()==7
let Some(most_significant_bit)=unsigned.bit_width().checked_sub(1)else{
return 0.0;
};
let digits=unsigned.digits();
let digit_index=most_significant_bit.saturating_sub(1)>>DIGIT_SHIFT;
let digit=digits[digit_index as usize];
//How many bits does the mantissa take from this digit
let take_bits=most_significant_bit-(digit_index<<DIGIT_SHIFT);
let rest_of_mantissa=$mantissa_bits as i32-(take_bits as i32);
let mut unmasked_mant=signed_shift(digit,rest_of_mantissa) as $unsigned;
if 0<rest_of_mantissa&&digit_index!=0{
//take the next digit down and shove some of its bits onto the bottom of the mantissa
let digit=digits[digit_index as usize-1];
let take_bits=most_significant_bit-((digit_index-1)<<DIGIT_SHIFT);
let rest_of_mantissa=$mantissa_bits as i32-(take_bits as i32);
let unmasked_mant2=signed_shift(digit,rest_of_mantissa) as $unsigned;
unmasked_mant|=unmasked_mant2;
// sign
let sign=if self.bits.is_negative(){(1 as $unsigned)<<(<$unsigned>::BITS-1)}else{0};
// exp
let msb=most_significant_bit as $unsigned;
let msb_offset=msb+$bias-F as $unsigned;
let exp=msb_offset<<$mantissa_msb;
// mant
let digits=unsigned.to_bytes();
// Copy digits into mantissa
let mut m_bytes=[0u8;_];
let mant_unmasked;
const MOD8:usize=((1<<DIGIT_SHIFT)-1);
const MANT_REM:usize=$mantissa_msb&MOD8;
const NEG_MANT_REM:usize=(1<<DIGIT_SHIFT)-MANT_REM;
if $mantissa_msb<most_significant_bit{
// lsb of mantissa is higher than lsb of fixed point
// Copy bytes (f64)
// CASE 0:
// F64_32 [00000000,00011111,11111111,11111111,11111111,11111111,11111111,11111111,...]
// u64 [00011111,11111111,11111111,11111111,11111111,11111111,11111111,00000000]
// msb%8=4
// i_m=1
// CASE 1:
// F64_32 [00000000,00111111,11111111,11111111,11111111,11111111,11111111,11111110,...]
// u64 [00111111,11111111,11111111,11111111,11111111,11111111,11111110,00000000]
// CASE 2:
// F64_32 [00000000,01111111,11111111,11111111,11111111,11111111,11111111,11111100,...]
// u64 [01111111,11111111,11111111,11111111,11111111,11111111,11111100,00000000]
// CASE 3:
// F64_32 [00000000,11111111,11111111,11111111,11111111,11111111,11111111,11111000,...]
// u64 [11111111,11111111,11111111,11111111,11111111,11111111,11111000,00000000]
// CASE 4:
// F64_32 [00000001,11111111,11111111,11111111,11111111,11111111,11111111,11110000,...]
// u64 [00000001,11111111,11111111,11111111,11111111,11111111,11111111,11110000]
// CASE 5:
// F64_32 [00000011,11111111,11111111,11111111,11111111,11111111,11111111,11100000,...]
// u64 [00000011,11111111,11111111,11111111,11111111,11111111,11111111,11100000]
// CASE 6:
// F64_32 [00000111,11111111,11111111,11111111,11111111,11111111,11111111,11000000,...]
// u64 [00000111,11111111,11111111,11111111,11111111,11111111,11111111,11000000]
// CASE 7:
// F64_32 [00001111,11111111,11111111,11111111,11111111,11111111,11111111,10000000,...]
// u64 [00001111,11111111,11111111,11111111,11111111,11111111,11111111,10000000]
//
// Copy bytes (f32)
// CASE 0:
// F64_32 [00000000,01111111,11111111,11111111,...]
// u32 [01111111,11111111,11111111,00000000]
// msb%8=4
// i_m=1
// CASE 1:
// F64_32 [00000000,11111111,11111111,11111110,...]
// u32 [11111111,11111111,11111110,00000000]
// CASE 2:
// F64_32 [00000001,11111111,11111111,11111100,...]
// u32 [00000001,11111111,11111111,11111100]
// CASE 3:
// F64_32 [00000011,11111111,11111111,11111000,...]
// u32 [00000011,11111111,11111111,11111000]
// CASE 4:
// F64_32 [00000111,11111111,11111111,11110000,...]
// u32 [00000111,11111111,11111111,11110000]
// CASE 5:
// F64_32 [00001111,11111111,11111111,11100000,...]
// u32 [00001111,11111111,11111111,11100000]
// CASE 6:
// F64_32 [00011111,11111111,11111111,11000000,...]
// u32 [00011111,11111111,11111111,11000000]
// CASE 7:
// F64_32 [00111111,11111111,11111111,10000000,...]
// u32 [00111111,11111111,11111111,10000000]
let right_shift=most_significant_bit as usize-$mantissa_msb;
let mut i_m=((most_significant_bit as usize&MOD8)+NEG_MANT_REM)>>DIGIT_SHIFT;
let mut i_d=right_shift>>DIGIT_SHIFT;
while i_m<m_bytes.len()&&i_d<N{
m_bytes[i_m]=digits[i_d];
i_m+=1;
i_d+=1;
}
let unsigned=<$unsigned>::from_le_bytes(m_bytes);
let right_shift=((right_shift+MANT_REM)&MOD8)+NEG_MANT_REM;
mant_unmasked=unsigned>>right_shift;
}else{
// lsb of mantissa is lower than lsb of fixed point
// [0,0,0,0,0b0100_0000,0,0,0]
// [0,0b0001_0000,0,0,0,0,0,0]<<e
let left_shift=$mantissa_msb-most_significant_bit as usize;
let mut i_m=left_shift>>DIGIT_SHIFT;
let mut i_d=0;
while i_m<m_bytes.len()&&i_d<N{
m_bytes[i_m]=digits[i_d];
i_m+=1;
i_d+=1;
}
mant_unmasked=<$unsigned>::from_le_bytes(m_bytes)<<(left_shift&MOD8);
}
let mant=unmasked_mant&((1 as $unsigned)<<($mantissa_bits-1))-1;
let mant=mant_unmasked&(((1 as $unsigned)<<$mantissa_msb)-1);
let bits=sign|exp|mant;
<$output>::from_bits(bits)
}
}
}
}
impl_into_float!(f32,u32,8,24);
impl_into_float!(f64,u64,11,53);
impl_into_float!(f32,u32,23,127);
impl_into_float!(f64,u64,52,1023);
#[inline]
fn integer_decode_f32(f: f32) -> (u64, i16, bool) {
fn integer_decode_f32(f: f32) -> (u32, u8, bool) {
let bits: u32 = f.to_bits();
let sign: bool = bits & (1<<31) != 0;
let mut exponent: i16 = ((bits >> 23) & 0xff) as i16;
let exponent = (bits >> 23) & 0xff;
let mantissa = if exponent == 0 {
(bits & 0x7fffff) << 1
} else {
(bits & 0x7fffff) | 0x800000
};
// Exponent bias + mantissa shift
exponent -= 127 + 23;
(mantissa as u64, exponent, sign)
(mantissa, exponent as u8, sign)
}
#[inline]
fn integer_decode_f64(f: f64) -> (u64, i16, bool) {
fn integer_decode_f64(f: f64) -> (u64, u16, bool) {
let bits: u64 = f.to_bits();
let sign: bool = bits & (1u64<<63) != 0;
let mut exponent: i16 = ((bits >> 52) & 0x7ff) as i16;
let exponent = (bits >> 52) & 0x7ff;
let mantissa = if exponent == 0 {
(bits & 0xfffffffffffff) << 1
} else {
(bits & 0xfffffffffffff) | 0x10000000000000
};
// Exponent bias + mantissa shift
exponent -= 1023 + 52;
(mantissa, exponent, sign)
(mantissa, exponent as u16, sign)
}
#[derive(Debug,Eq,PartialEq)]
pub enum FixedFromFloatError{
@@ -246,13 +318,12 @@ impl core::fmt::Display for FixedFromFloatError{
write!(f,"{self:?}")
}
}
macro_rules! impl_from_float {
( $decode:ident, $input: ty, $mantissa_bits:expr ) => {
macro_rules! impl_from_float{
($decode:ident,$input:ty,$mantissa_bits:expr,$bias:expr)=>{
impl<const N:usize,const F:usize> TryFrom<$input> for Fixed<N,F>{
type Error=FixedFromFloatError;
#[inline]
fn try_from(value:$input)->Result<Self,Self::Error>{
const DIGIT_SHIFT:u32=6;
match value.classify(){
std::num::FpCategory::Nan=>Err(FixedFromFloatError::Nan),
std::num::FpCategory::Infinite=>Err(FixedFromFloatError::Infinite),
@@ -261,25 +332,62 @@ macro_rules! impl_from_float {
|std::num::FpCategory::Normal
=>{
let (m,e,s)=$decode(value);
let mut digits=[0u64;N];
let most_significant_bit=e as i32+$mantissa_bits as i32+F as i32;
if most_significant_bit<0{
let mut digits=[0u8;N];
let msb_biased=e as usize+F+1;
if msb_biased<$bias{
return Err(FixedFromFloatError::Underflow);
};
if N*BNUM_DIGIT_WIDTH+$bias<=msb_biased{
return Err(FixedFromFloatError::Overflow);
}
let digit_index=most_significant_bit>>DIGIT_SHIFT;
let digit=digits.get_mut(digit_index as usize).ok_or(FixedFromFloatError::Overflow)?;
let take_bits=most_significant_bit-(digit_index<<DIGIT_SHIFT);
let rest_of_mantissa=-($mantissa_bits as i32-(take_bits as i32));
*digit=signed_shift(m,rest_of_mantissa);
if rest_of_mantissa<0&&digit_index!=0{
//we don't care if some float bits are partially truncated
if let Some(digit)=digits.get_mut((digit_index-1) as usize){
let take_bits=most_significant_bit-((digit_index-1)<<DIGIT_SHIFT);
let rest_of_mantissa=-($mantissa_bits as i32-(take_bits as i32));
*digit=signed_shift(m,rest_of_mantissa);
}
let lsb=msb_biased as isize-$bias-$mantissa_bits;
// underflow is ok, we only need to know the alignment
let lsb_alignment=lsb&((1<<DIGIT_SHIFT)-1);
// the 53 bit mantissa has room to shift by 0-7 bits
let aligned_mantissa=m<<lsb_alignment;
let m_bytes=aligned_mantissa.to_le_bytes();
let digit_index=lsb>>DIGIT_SHIFT;
let mut i_m;
let mut i_d;
if digit_index<0{
// lsb of mantissa is lower than lsb of fixed point
// [0,0,0,0]<<e
// [0,0,0,1,0,0,0,0]
i_m=-digit_index as usize;
i_d=0;
}else{
// lsb of mantissa is higher than lsb of fixed point
// [0,0,0,0]<<e
// [0,0,0,1,0,0,0,0]
i_m=0;
i_d=digit_index as usize;
}
let bits=BInt::from_bits(bnum::BUint::from_digits(digits));
while i_m<m_bytes.len()&&i_d<N{
digits[i_d]=m_bytes[i_m];
i_m+=1;
i_d+=1;
}
/* AI idea is not bad
// Calculate i_m and i_d without branching
// is_less is 1 if lsb < bias + mantissa_bits (mantissa is "above" fixed point)
let is_less = (lsb < ($bias + $mantissa_bits)) as usize;
// If lsb < bias, we skip i_m bytes in m_bytes, i_d is 0
// If lsb >= bias, i_m is 0, we skip i_d bytes in digits
i_m = (((($bias + $mantissa_bits) - lsb) >> DIGIT_SHIFT) & is_less);
i_d = ((lsb.wrapping_sub($bias + $mantissa_bits) >> DIGIT_SHIFT) & !is_less);
// Calculate how many bytes to copy safely
let m_bytes_len = m_bytes.len();
let count = (m_bytes_len.saturating_sub(i_m)).min(N.saturating_sub(i_d));
if count > 0 {
digits[i_d..i_d + count].copy_from_slice(&m_bytes[i_m..i_m + count]);
}
*/
let bits=Int::from_bytes(digits);
Ok(if s{
Self::from_bits(bits.overflowing_neg().0)
}else{
@@ -291,14 +399,14 @@ macro_rules! impl_from_float {
}
}
}
impl_from_float!(integer_decode_f32,f32,24);
impl_from_float!(integer_decode_f64,f64,53);
impl_from_float!(integer_decode_f32,f32,24,127);
impl_from_float!(integer_decode_f64,f64,53,1023);
impl<const N:usize,const F:usize> core::fmt::Debug for Fixed<N,F>{
#[inline]
fn fmt(&self,f:&mut core::fmt::Formatter)->Result<(),core::fmt::Error>{
let integral=self.as_bits().unsigned_abs()>>F;
let fractional=self.as_bits().unsigned_abs()&((bnum::BUint::<N>::ONE<<F)-bnum::BUint::<N>::ONE);
let fractional=self.as_bits().unsigned_abs()&((n!(1)<<F)-n!(1));
let leading_zeroes=(fractional.leading_zeros() as usize).saturating_sub(N*BNUM_DIGIT_WIDTH-F)>>2;
if self.is_negative(){
core::write!(f,"-")?;
@@ -320,14 +428,14 @@ impl<const N:usize,const F:usize> core::fmt::Display for Fixed<N,F>{
}
macro_rules! impl_additive_operator {
( $struct: ident, $trait: ident, $method: ident, $output: ty ) => {
impl<const N:usize,const F:usize> $struct<N,F>{
( $trait: ident, $method: ident, $output: ty ) => {
impl<const N:usize,const F:usize> Fixed<N,F>{
#[inline]
pub const fn $method(self, other: Self) -> Self {
Self::from_bits(self.bits.$method(other.bits))
}
}
impl<const N:usize,const F:usize> core::ops::$trait for $struct<N,F>{
impl<const N:usize,const F:usize> core::ops::$trait for Fixed<N,F>{
type Output = $output;
#[inline]
fn $method(self, other: Self) -> Self::Output {
@@ -337,8 +445,8 @@ macro_rules! impl_additive_operator {
};
}
macro_rules! impl_additive_assign_operator {
( $struct: ident, $trait: ident, $method: ident ) => {
impl<const N:usize,const F:usize> core::ops::$trait for $struct<N,F>{
( $trait: ident, $method: ident ) => {
impl<const N:usize,const F:usize> core::ops::$trait for Fixed<N,F>{
#[inline]
fn $method(&mut self, other: Self) {
self.bits.$method(other.bits);
@@ -348,28 +456,28 @@ macro_rules! impl_additive_assign_operator {
}
// Impl arithmetic pperators
impl_additive_assign_operator!( Fixed, AddAssign, add_assign );
impl_additive_operator!( Fixed, Add, add, Self );
impl_additive_assign_operator!( Fixed, SubAssign, sub_assign );
impl_additive_operator!( Fixed, Sub, sub, Self );
impl_additive_assign_operator!( Fixed, RemAssign, rem_assign );
impl_additive_operator!( Fixed, Rem, rem, Self );
impl_additive_assign_operator!( AddAssign, add_assign );
impl_additive_operator!( Add, add, Self );
impl_additive_assign_operator!( SubAssign, sub_assign );
impl_additive_operator!( Sub, sub, Self );
impl_additive_assign_operator!( RemAssign, rem_assign );
impl_additive_operator!( Rem, rem, Self );
// Impl bitwise operators
impl_additive_assign_operator!( Fixed, BitAndAssign, bitand_assign );
impl_additive_operator!( Fixed, BitAnd, bitand, Self );
impl_additive_assign_operator!( Fixed, BitOrAssign, bitor_assign );
impl_additive_operator!( Fixed, BitOr, bitor, Self );
impl_additive_assign_operator!( Fixed, BitXorAssign, bitxor_assign );
impl_additive_operator!( Fixed, BitXor, bitxor, Self );
impl_additive_assign_operator!( BitAndAssign, bitand_assign );
impl_additive_operator!( BitAnd, bitand, Self );
impl_additive_assign_operator!( BitOrAssign, bitor_assign );
impl_additive_operator!( BitOr, bitor, Self );
impl_additive_assign_operator!( BitXorAssign, bitxor_assign );
impl_additive_operator!( BitXor, bitxor, Self );
// non-wide operators. The result is the same width as the inputs.
// This macro is not used in the default configuration.
#[expect(unused_macros)]
macro_rules! impl_multiplicative_operator_not_const_generic {
( ($struct: ident, $trait: ident, $method: ident, $output: ty ), $width:expr ) => {
impl<const F:usize> core::ops::$trait for $struct<$width,F>{
( ($trait: ident, $method: ident, $output: ty ), $width:expr ) => {
impl<const F:usize> core::ops::$trait for Fixed<{$width/BNUM_DIGIT_WIDTH},F>{
type Output = $output;
#[inline]
fn $method(self, other: Self) -> Self::Output {
@@ -381,8 +489,8 @@ macro_rules! impl_multiplicative_operator_not_const_generic {
};
}
macro_rules! impl_multiplicative_assign_operator_not_const_generic {
( ($struct: ident, $trait: ident, $method: ident, $non_assign_method: ident ), $width:expr ) => {
impl<const F:usize> core::ops::$trait for $struct<$width,F>{
( ($trait: ident, $method: ident, $non_assign_method: ident ), $width:expr ) => {
impl<const F:usize> core::ops::$trait for Fixed<{$width/BNUM_DIGIT_WIDTH},F>{
#[inline]
fn $method(&mut self, other: Self) {
paste::item!{
@@ -394,13 +502,13 @@ macro_rules! impl_multiplicative_assign_operator_not_const_generic {
}
macro_rules! impl_multiply_operator_not_const_generic {
( ($struct: ident, $trait: ident, $method: ident, $output: ty ), $width:expr ) => {
impl<const F:usize> $struct<$width,F>{
( ($trait: ident, $method: ident, $output: ty ), $width:expr ) => {
impl<const F:usize> Fixed<{$width/BNUM_DIGIT_WIDTH},F>{
paste::item!{
#[inline]
pub fn [<fixed_ $method>](self, rhs: Self) -> Self {
let (low,high)=self.bits.unsigned_abs().widening_mul(rhs.bits.unsigned_abs());
let out:BInt::<{$width*2}>=unsafe{core::mem::transmute([low,high])};
let out:Int::<{$width*2/BNUM_DIGIT_WIDTH}>=unsafe{core::mem::transmute([low,high])};
if self.is_negative()==rhs.is_negative(){
Self::from_bits(out.shr(F as u32).as_())
}else{
@@ -410,34 +518,34 @@ macro_rules! impl_multiply_operator_not_const_generic {
}
}
#[cfg(not(feature="wide-mul"))]
impl_multiplicative_operator_not_const_generic!(($struct, $trait, $method, $output ), $width);
impl_multiplicative_operator_not_const_generic!(($trait,$method,$output),$width);
#[cfg(feature="deferred-division")]
impl ratio_ops::ratio::Divide<i64> for Fixed<$width,{$width*32}>{
impl ratio_ops::ratio::Divide<i64> for Fixed<{$width/BNUM_DIGIT_WIDTH},{$width>>1}>{
type Output=Self;
#[inline]
fn divide(self, other: i64)->Self::Output{
Self::from_bits(self.bits.div_euclid(BInt::from(other)))
Self::from_bits(self.bits.div_euclid(other.as_()))
}
}
}
}
macro_rules! impl_divide_operator_not_const_generic {
( ($struct: ident, $trait: ident, $method: ident, $output: ty ), $width:expr ) => {
impl<const F:usize> $struct<$width,F>{
( ($trait: ident, $method: ident, $output: ty ), $width:expr ) => {
impl<const F:usize> Fixed<{$width/BNUM_DIGIT_WIDTH},F>{
paste::item!{
#[inline]
pub fn [<fixed_ $method>](self,other:Self)->Self{
//this only needs to be $width+F as u32/64+1 but MUH CONST GENERICS!!!!!
let lhs=self.bits.as_::<BInt::<{$width*2}>>().shl(F as u32);
let rhs=other.bits.as_::<BInt::<{$width*2}>>();
let lhs=self.bits.as_::<Int::<{$width*2/BNUM_DIGIT_WIDTH}>>().shl(F as u32);
let rhs=other.bits.as_::<Int::<{$width*2/BNUM_DIGIT_WIDTH}>>();
Self::from_bits(lhs.div_euclid(rhs).as_())
}
}
}
#[cfg(all(not(feature="wide-mul"),not(feature="deferred-division")))]
impl_multiplicative_operator_not_const_generic!(($struct, $trait, $method, $output ), $width);
impl_multiplicative_operator_not_const_generic!(($trait,$method,$output),$width);
#[cfg(all(not(feature="wide-mul"),feature="deferred-division"))]
impl<const F:usize> ratio_ops::ratio::Divide for $struct<$width,F>{
impl<const F:usize> ratio_ops::ratio::Divide for Fixed<{$width/BNUM_DIGIT_WIDTH},F>{
type Output = $output;
#[inline]
fn divide(self, other: Self) -> Self::Output {
@@ -450,28 +558,28 @@ macro_rules! impl_divide_operator_not_const_generic {
}
macro_rules! impl_multiplicative_operator {
( $struct: ident, $trait: ident, $method: ident, $inner_method: ident, $output: ty ) => {
impl<const N:usize,const F:usize,U> core::ops::$trait<U> for $struct<N,F>
( $trait: ident, $method: ident, $inner_method: ident, $output: ty ) => {
impl<const N:usize,const F:usize,U> core::ops::$trait<U> for Fixed<N,F>
where
BInt::<N>:From<U>+core::ops::$trait,
Int::<N>:bnum::cast::CastFrom<U>+core::ops::$trait,
{
type Output = $output;
#[inline]
fn $method(self,other:U)->Self::Output{
Self::from_bits(self.bits.$inner_method(BInt::<N>::from(other)))
Self::from_bits(self.bits.$inner_method(other.as_()))
}
}
};
}
macro_rules! impl_multiplicative_assign_operator {
( $struct: ident, $trait: ident, $method: ident, $not_assign_method: ident ) => {
impl<const N:usize,const F:usize,U> core::ops::$trait<U> for $struct<N,F>
( $trait: ident, $method: ident, $not_assign_method: ident ) => {
impl<const N:usize,const F:usize,U> core::ops::$trait<U> for Fixed<N,F>
where
BInt::<N>:From<U>+core::ops::$trait,
Int::<N>:bnum::cast::CastFrom<U>+core::ops::$trait,
{
#[inline]
fn $method(&mut self,other:U){
self.bits=self.bits.$not_assign_method(BInt::<N>::from(other));
self.bits=self.bits.$not_assign_method(other.as_());
}
}
};
@@ -491,18 +599,18 @@ macro_rules! macro_repeated{
macro_rules! macro_16 {
( $macro: ident, $any:tt ) => {
macro_repeated!($macro,$any,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16);
macro_repeated!($macro,$any,64,128,192,256,320,384,448,512,576,640,704,768,832,896,960,1024);
}
}
macro_16!( impl_multiplicative_assign_operator_not_const_generic, (Fixed, MulAssign, mul_assign, mul) );
macro_16!( impl_multiply_operator_not_const_generic, (Fixed, Mul, mul, Self) );
macro_16!( impl_multiplicative_assign_operator_not_const_generic, (Fixed, DivAssign, div_assign, div_euclid) );
macro_16!( impl_divide_operator_not_const_generic, (Fixed, Div, div_euclid, Self) );
impl_multiplicative_assign_operator!( Fixed, MulAssign, mul_assign, mul );
impl_multiplicative_operator!( Fixed, Mul, mul, mul, Self );
impl_multiplicative_assign_operator!( Fixed, DivAssign, div_assign, div_euclid );
impl_multiplicative_operator!( Fixed, Div, div, div_euclid, Self );
macro_16!( impl_multiplicative_assign_operator_not_const_generic, (MulAssign, mul_assign, mul) );
macro_16!( impl_multiply_operator_not_const_generic, (Mul, mul, Self) );
macro_16!( impl_multiplicative_assign_operator_not_const_generic, (DivAssign, div_assign, div_euclid) );
macro_16!( impl_divide_operator_not_const_generic, (Div, div_euclid, Self) );
impl_multiplicative_assign_operator!( MulAssign, mul_assign, mul );
impl_multiplicative_operator!( Mul, mul, mul, Self );
impl_multiplicative_assign_operator!( DivAssign, div_assign, div_euclid );
impl_multiplicative_operator!( Div, div, div_euclid, Self );
#[cfg(feature="deferred-division")]
impl<const LHS_N:usize,const LHS_F:usize,const RHS_N:usize,const RHS_F:usize> core::ops::Div<Fixed<RHS_N,RHS_F>> for Fixed<LHS_N,LHS_F>{
type Output=ratio_ops::ratio::Ratio<Fixed<LHS_N,LHS_F>,Fixed<RHS_N,RHS_F>>;
@@ -518,8 +626,8 @@ impl<const N:usize,const F:usize> ratio_ops::ratio::Parity for Fixed<N,F>{
}
}
macro_rules! impl_shift_operator {
( $struct: ident, $trait: ident, $method: ident, $output: ty ) => {
impl<const N:usize,const F:usize> core::ops::$trait<u32> for $struct<N,F>{
( $trait: ident, $method: ident, $output: ty ) => {
impl<const N:usize,const F:usize> core::ops::$trait<u32> for Fixed<N,F>{
type Output = $output;
#[inline]
fn $method(self, other: u32) -> Self::Output {
@@ -529,8 +637,8 @@ macro_rules! impl_shift_operator {
};
}
macro_rules! impl_shift_assign_operator {
( $struct: ident, $trait: ident, $method: ident ) => {
impl<const N:usize,const F:usize> core::ops::$trait<u32> for $struct<N,F>{
( $trait: ident, $method: ident ) => {
impl<const N:usize,const F:usize> core::ops::$trait<u32> for Fixed<N,F>{
#[inline]
fn $method(&mut self, other: u32) {
self.bits.$method(other);
@@ -538,40 +646,40 @@ macro_rules! impl_shift_assign_operator {
}
};
}
impl_shift_assign_operator!( Fixed, ShlAssign, shl_assign );
impl_shift_operator!( Fixed, Shl, shl, Self );
impl_shift_assign_operator!( Fixed, ShrAssign, shr_assign );
impl_shift_operator!( Fixed, Shr, shr, Self );
impl_shift_assign_operator!( ShlAssign, shl_assign );
impl_shift_operator!( Shl, shl, Self );
impl_shift_assign_operator!( ShrAssign, shr_assign );
impl_shift_operator!( Shr, shr, Self );
// wide operators. The result width is the sum of the input widths, i.e. none of the multiplication
#[allow(unused_macros)]
macro_rules! impl_wide_operators{
($lhs:expr,$rhs:expr)=>{
impl core::ops::Mul<Fixed<$rhs,{$rhs*32}>> for Fixed<$lhs,{$lhs*32}>{
type Output=Fixed<{$lhs+$rhs},{($lhs+$rhs)*32}>;
impl core::ops::Mul<Fixed<{$rhs/BNUM_DIGIT_WIDTH},{$rhs>>1}>> for Fixed<{$lhs/BNUM_DIGIT_WIDTH},{$lhs>>1}>{
type Output=Fixed<{($lhs+$rhs)/BNUM_DIGIT_WIDTH},{($lhs+$rhs)>>1}>;
#[inline]
fn mul(self, other: Fixed<$rhs,{$rhs*32}>)->Self::Output{
fn mul(self, other: Fixed<{$rhs/BNUM_DIGIT_WIDTH},{$rhs>>1}>)->Self::Output{
paste::item!{
self.[<wide_mul_ $lhs _ $rhs>](other)
}
}
}
#[cfg(not(feature="deferred-division"))]
impl core::ops::Div<Fixed<$rhs,{$rhs*32}>> for Fixed<$lhs,{$lhs*32}>{
type Output=Fixed<{$lhs+$rhs},{($lhs+$rhs)*32}>;
impl core::ops::Div<Fixed<{$rhs/BNUM_DIGIT_WIDTH},{$rhs>>1}>> for Fixed<{$lhs/BNUM_DIGIT_WIDTH},{$lhs>>1}>{
type Output=Fixed<{($lhs+$rhs)/BNUM_DIGIT_WIDTH},{($lhs+$rhs)>>1}>;
#[inline]
fn div(self, other: Fixed<$rhs,{$rhs*32}>)->Self::Output{
fn div(self, other: Fixed<{$rhs/BNUM_DIGIT_WIDTH},{$rhs>>1}>)->Self::Output{
paste::item!{
self.[<wide_div_ $lhs _ $rhs>](other)
}
}
}
#[cfg(feature="deferred-division")]
impl ratio_ops::ratio::Divide<Fixed<$rhs,{$rhs*32}>> for Fixed<$lhs,{$lhs*32}>{
type Output=Fixed<{$lhs+$rhs},{($lhs+$rhs)*32}>;
impl ratio_ops::ratio::Divide<Fixed<{$rhs/BNUM_DIGIT_WIDTH},{$rhs>>1}>> for Fixed<{$lhs/BNUM_DIGIT_WIDTH},{$lhs>>1}>{
type Output=Fixed<{($lhs+$rhs)/BNUM_DIGIT_WIDTH},{($lhs+$rhs)>>1}>;
#[inline]
fn divide(self, other: Fixed<$rhs,{$rhs*32}>)->Self::Output{
fn divide(self, other: Fixed<{$rhs/BNUM_DIGIT_WIDTH},{$rhs>>1}>)->Self::Output{
paste::item!{
self.[<wide_div_ $lhs _ $rhs>](other)
}
@@ -588,23 +696,23 @@ macro_rules! impl_wide_not_const_generic{
(),
($lhs:expr,$rhs:expr)
)=>{
impl Fixed<$lhs,{$lhs*32}>
impl Fixed<{$lhs/BNUM_DIGIT_WIDTH},{$lhs>>1}>
{
paste::item!{
#[inline]
pub fn [<wide_mul_ $lhs _ $rhs>](self,rhs:Fixed<$rhs,{$rhs*32}>)->Fixed<{$lhs+$rhs},{($lhs+$rhs)*32}>{
let lhs=self.bits.as_::<BInt<{$lhs+$rhs}>>();
let rhs=rhs.bits.as_::<BInt<{$lhs+$rhs}>>();
pub fn [<wide_mul_ $lhs _ $rhs>](self,rhs:Fixed<{$rhs/BNUM_DIGIT_WIDTH},{$rhs>>1}>)->Fixed<{($lhs+$rhs)/BNUM_DIGIT_WIDTH},{($lhs+$rhs)>>1}>{
let lhs=self.bits.as_::<Int<{($lhs+$rhs)/BNUM_DIGIT_WIDTH}>>();
let rhs=rhs.bits.as_::<Int<{($lhs+$rhs)/BNUM_DIGIT_WIDTH}>>();
Fixed::from_bits(lhs*rhs)
}
/// This operation cannot represent the fraction exactly,
/// but it shapes the output to have precision for the
/// largest and smallest possible fractions.
#[inline]
pub fn [<wide_div_ $lhs _ $rhs>](self,rhs:Fixed<$rhs,{$rhs*32}>)->Fixed<{$lhs+$rhs},{($lhs+$rhs)*32}>{
pub fn [<wide_div_ $lhs _ $rhs>](self,rhs:Fixed<{$rhs/BNUM_DIGIT_WIDTH},{$rhs>>1}>)->Fixed<{($lhs+$rhs)/BNUM_DIGIT_WIDTH},{($lhs+$rhs)>>1}>{
// (lhs/2^LHS_FRAC)/(rhs/2^RHS_FRAC)
let lhs=self.bits.as_::<BInt<{$lhs+$rhs}>>().shl($rhs*64);
let rhs=rhs.bits.as_::<BInt<{$lhs+$rhs}>>();
let lhs=self.bits.as_::<Int<{($lhs+$rhs)/BNUM_DIGIT_WIDTH}>>().shl($rhs);
let rhs=rhs.bits.as_::<Int<{($lhs+$rhs)/BNUM_DIGIT_WIDTH}>>();
Fixed::from_bits(lhs.div_euclid(rhs))
}
}
@@ -618,13 +726,13 @@ macro_rules! impl_wide_same_size_not_const_generic{
(),
$width:expr
)=>{
impl Fixed<$width,{$width*32}>
impl Fixed<{$width/BNUM_DIGIT_WIDTH},{$width>>1}>
{
paste::item!{
#[inline]
pub fn [<wide_mul_ $width _ $width>](self,rhs:Fixed<$width,{$width*32}>)->Fixed<{$width*2},{$width*2*32}>{
pub fn [<wide_mul_ $width _ $width>](self,rhs:Fixed<{$width/BNUM_DIGIT_WIDTH},{$width>>1}>)->Fixed<{$width*2/BNUM_DIGIT_WIDTH},{$width*2>>1}>{
let (low,high)=self.bits.unsigned_abs().widening_mul(rhs.bits.unsigned_abs());
let out:BInt::<{$width*2}>=unsafe{core::mem::transmute([low,high])};
let out:Int::<{$width*2/BNUM_DIGIT_WIDTH}>=unsafe{core::mem::transmute([low,high])};
if self.is_negative()==rhs.is_negative(){
Fixed::from_bits(out)
}else{
@@ -637,10 +745,10 @@ macro_rules! impl_wide_same_size_not_const_generic{
/// but it shapes the output to have precision for the
/// largest and smallest possible fractions.
#[inline]
pub fn [<wide_div_ $width _ $width>](self,rhs:Fixed<$width,{$width*32}>)->Fixed<{$width*2},{$width*2*32}>{
pub fn [<wide_div_ $width _ $width>](self,rhs:Fixed<{$width/BNUM_DIGIT_WIDTH},{$width>>1}>)->Fixed<{$width*2/BNUM_DIGIT_WIDTH},{$width*2>>1}>{
// (lhs/2^LHS_FRAC)/(rhs/2^RHS_FRAC)
let lhs=self.bits.as_::<BInt<{$width*2}>>().shl($width*64);
let rhs=rhs.bits.as_::<BInt<{$width*2}>>();
let lhs=self.bits.as_::<Int<{$width*2/BNUM_DIGIT_WIDTH}>>().shl($width);
let rhs=rhs.bits.as_::<Int<{$width*2/BNUM_DIGIT_WIDTH}>>();
Fixed::from_bits(lhs.div_euclid(rhs))
}
}
@@ -653,25 +761,25 @@ macro_rules! impl_wide_same_size_not_const_generic{
//const generics sidestepped wahoo
macro_repeated!(
impl_wide_not_const_generic,(),
(2,1),(3,1),(4,1),(5,1),(6,1),(7,1),(8,1),(9,1),(10,1),(11,1),(12,1),(13,1),(14,1),(15,1),
(1,2), (3,2),(4,2),(5,2),(6,2),(7,2),(8,2),(9,2),(10,2),(11,2),(12,2),(13,2),(14,2),
(1,3),(2,3), (4,3),(5,3),(6,3),(7,3),(8,3),(9,3),(10,3),(11,3),(12,3),(13,3),
(1,4),(2,4),(3,4), (5,4),(6,4),(7,4),(8,4),(9,4),(10,4),(11,4),(12,4),
(1,5),(2,5),(3,5),(4,5), (6,5),(7,5),(8,5),(9,5),(10,5),(11,5),
(1,6),(2,6),(3,6),(4,6),(5,6), (7,6),(8,6),(9,6),(10,6),
(1,7),(2,7),(3,7),(4,7),(5,7),(6,7), (8,7),(9,7),
(1,8),(2,8),(3,8),(4,8),(5,8),(6,8),(7,8), (9,8),
(1,9),(2,9),(3,9),(4,9),(5,9),(6,9),(7,9),
(1,10),(2,10),(3,10),(4,10),(5,10),(6,10),
(1,11),(2,11),(3,11),(4,11),(5,11),
(1,12),(2,12),(3,12),(4,12),
(1,13),(2,13),(3,13),
(1,14),(2,14),
(1,15)
(128,64),(192,64),(256,64),(320,64),(384,64),(448,64),(512,64),(576,64),(640,64),(704,64),(768,64),(832,64),(896,64),(960,64),
(64,128), (192,128),(256,128),(320,128),(384,128),(448,128),(512,128),(576,128),(640,128),(704,128),(768,128),(832,128),(896,128),
(64,192),(128,192), (256,192),(320,192),(384,192),(448,192),(512,192),(576,192),(640,192),(704,192),(768,192),(832,192),
(64,256),(128,256),(192,256), (320,256),(384,256),(448,256),(512,256),(576,256),(640,256),(704,256),(768,256),
(64,320),(128,320),(192,320),(256,320), (384,320),(448,320),(512,320),(576,320),(640,320),(704,320),
(64,384),(128,384),(192,384),(256,384),(320,384), (448,384),(512,384),(576,384),(640,384),
(64,448),(128,448),(192,448),(256,448),(320,448),(384,448), (512,448),(576,448),
(64,512),(128,512),(192,512),(256,512),(320,512),(384,512),(448,512), (576,512),
(64,576),(128,576),(192,576),(256,576),(320,576),(384,576),(448,576),
(64,640),(128,640),(192,640),(256,640),(320,640),(384,640),
(64,704),(128,704),(192,704),(256,704),(320,704),
(64,768),(128,768),(192,768),(256,768),
(64,832),(128,832),(192,832),
(64,896),(128,896),
(64,960)
);
macro_repeated!(
impl_wide_same_size_not_const_generic,(),
1,2,3,4,5,6,7,8
64,128,192,256,320,384,448,512
);
#[derive(Debug,Eq,PartialEq)]
@@ -702,43 +810,43 @@ macro_rules! impl_narrow_not_const_generic{
($lhs:expr,$rhs:expr)
)=>{
paste::item!{
impl Fixed<$lhs,{$lhs*32}>
impl Fixed<{$lhs/BNUM_DIGIT_WIDTH},{$lhs>>1}>
{
#[inline]
pub fn [<wrap_ $rhs>](self)->Fixed<$rhs,{$rhs*32}>{
Fixed::from_bits(bnum::cast::As::as_::<BInt::<$rhs>>(self.bits.shr(($lhs-$rhs)*32)))
pub fn [<wrap_ $rhs>](self)->Fixed<{$rhs/BNUM_DIGIT_WIDTH},{$rhs>>1}>{
Fixed::from_bits(bnum::cast::As::as_::<Int::<{$rhs/BNUM_DIGIT_WIDTH}>>(self.bits.shr(($lhs-$rhs)>>1)))
}
#[inline]
pub fn [<narrow_ $rhs>](self)->Result<Fixed<$rhs,{$rhs*32}>,NarrowError>{
if Fixed::<$rhs,{$rhs*32}>::MAX.[<widen_ $lhs>]().bits<self.bits{
pub fn [<narrow_ $rhs>](self)->Result<Fixed<{$rhs/BNUM_DIGIT_WIDTH},{$rhs>>1}>,NarrowError>{
if Fixed::<{$rhs/BNUM_DIGIT_WIDTH},{$rhs>>1}>::MAX.[<widen_ $lhs>]().bits<self.bits{
return Err(NarrowError::Overflow);
}
if self.bits<Fixed::<$rhs,{$rhs*32}>::MIN.[<widen_ $lhs>]().bits{
if self.bits<Fixed::<{$rhs/BNUM_DIGIT_WIDTH},{$rhs>>1}>::MIN.[<widen_ $lhs>]().bits{
return Err(NarrowError::Underflow);
}
Ok(self.[<wrap_ $rhs>]())
}
#[inline]
pub fn [<clamp_ $rhs>](self)->Fixed<$rhs,{$rhs*32}>{
pub fn [<clamp_ $rhs>](self)->Fixed<{$rhs/BNUM_DIGIT_WIDTH},{$rhs>>1}>{
self.[<narrow_ $rhs>]().clamp()
}
}
impl Wrap<Fixed<$rhs,{$rhs*32}>> for Fixed<$lhs,{$lhs*32}>{
impl Wrap<Fixed<{$rhs/BNUM_DIGIT_WIDTH},{$rhs>>1}>> for Fixed<{$lhs/BNUM_DIGIT_WIDTH},{$lhs>>1}>{
#[inline]
fn wrap(self)->Fixed<$rhs,{$rhs*32}>{
fn wrap(self)->Fixed<{$rhs/BNUM_DIGIT_WIDTH},{$rhs>>1}>{
self.[<wrap_ $rhs>]()
}
}
impl TryInto<Fixed<$rhs,{$rhs*32}>> for Fixed<$lhs,{$lhs*32}>{
impl TryInto<Fixed<{$rhs/BNUM_DIGIT_WIDTH},{$rhs>>1}>> for Fixed<{$lhs/BNUM_DIGIT_WIDTH},{$lhs>>1}>{
type Error=NarrowError;
#[inline]
fn try_into(self)->Result<Fixed<$rhs,{$rhs*32}>,Self::Error>{
fn try_into(self)->Result<Fixed<{$rhs/BNUM_DIGIT_WIDTH},{$rhs>>1}>,Self::Error>{
self.[<narrow_ $rhs>]()
}
}
impl Clamp<Fixed<$rhs,{$rhs*32}>> for Fixed<$lhs,{$lhs*32}>{
impl Clamp<Fixed<{$rhs/BNUM_DIGIT_WIDTH},{$rhs>>1}>> for Fixed<{$lhs/BNUM_DIGIT_WIDTH},{$lhs>>1}>{
#[inline]
fn clamp(self)->Fixed<$rhs,{$rhs*32}>{
fn clamp(self)->Fixed<{$rhs/BNUM_DIGIT_WIDTH},{$rhs>>1}>{
self.[<clamp_ $rhs>]()
}
}
@@ -751,16 +859,16 @@ macro_rules! impl_widen_not_const_generic{
($lhs:expr,$rhs:expr)
)=>{
paste::item!{
impl Fixed<$lhs,{$lhs*32}>
impl Fixed<{$lhs/BNUM_DIGIT_WIDTH},{$lhs>>1}>
{
#[inline]
pub fn [<widen_ $rhs>](self)->Fixed<$rhs,{$rhs*32}>{
Fixed::from_bits(bnum::cast::As::as_::<BInt::<$rhs>>(self.bits).shl(($rhs-$lhs)*32))
pub fn [<widen_ $rhs>](self)->Fixed<{$rhs/BNUM_DIGIT_WIDTH},{$rhs>>1}>{
Fixed::from_bits(bnum::cast::As::as_::<Int::<{$rhs/BNUM_DIGIT_WIDTH}>>(self.bits).shl(($rhs-$lhs)>>1))
}
}
impl Into<Fixed<$rhs,{$rhs*32}>> for Fixed<$lhs,{$lhs*32}>{
impl Into<Fixed<{$rhs/BNUM_DIGIT_WIDTH},{$rhs>>1}>> for Fixed<{$lhs/BNUM_DIGIT_WIDTH},{$lhs>>1}>{
#[inline]
fn into(self)->Fixed<$rhs,{$rhs*32}>{
fn into(self)->Fixed<{$rhs/BNUM_DIGIT_WIDTH},{$rhs>>1}>{
self.[<widen_ $rhs>]()
}
}
@@ -772,45 +880,45 @@ macro_rules! impl_widen_not_const_generic{
macro_repeated!(
impl_narrow_not_const_generic,(),
(2,1),(3,1),(4,1),(5,1),(6,1),(7,1),(8,1),(9,1),(10,1),(11,1),(12,1),(13,1),(14,1),(15,1),(16,1),(17,1),
(3,2),(4,2),(5,2),(6,2),(7,2),(8,2),(9,2),(10,2),(11,2),(12,2),(13,2),(14,2),(15,2),(16,2),
(4,3),(5,3),(6,3),(7,3),(8,3),(9,3),(10,3),(11,3),(12,3),(13,3),(14,3),(15,3),(16,3),
(5,4),(6,4),(7,4),(8,4),(9,4),(10,4),(11,4),(12,4),(13,4),(14,4),(15,4),(16,4),
(6,5),(7,5),(8,5),(9,5),(10,5),(11,5),(12,5),(13,5),(14,5),(15,5),(16,5),
(7,6),(8,6),(9,6),(10,6),(11,6),(12,6),(13,6),(14,6),(15,6),(16,6),
(8,7),(9,7),(10,7),(11,7),(12,7),(13,7),(14,7),(15,7),(16,7),
(9,8),(10,8),(11,8),(12,8),(13,8),(14,8),(15,8),(16,8),
(10,9),(11,9),(12,9),(13,9),(14,9),(15,9),(16,9),
(11,10),(12,10),(13,10),(14,10),(15,10),(16,10),
(12,11),(13,11),(14,11),(15,11),(16,11),
(13,12),(14,12),(15,12),(16,12),
(14,13),(15,13),(16,13),
(15,14),(16,14),
(16,15)
(128,64),(192,64),(256,64),(320,64),(384,64),(448,64),(512,64),(576,64),(640,64),(704,64),(768,64),(832,64),(896,64),(960,64),(1024,64),(1088,64),
(192,128),(256,128),(320,128),(384,128),(448,128),(512,128),(576,128),(640,128),(704,128),(768,128),(832,128),(896,128),(960,128),(1024,128),
(256,192),(320,192),(384,192),(448,192),(512,192),(576,192),(640,192),(704,192),(768,192),(832,192),(896,192),(960,192),(1024,192),
(320,256),(384,256),(448,256),(512,256),(576,256),(640,256),(704,256),(768,256),(832,256),(896,256),(960,256),(1024,256),
(384,320),(448,320),(512,320),(576,320),(640,320),(704,320),(768,320),(832,320),(896,320),(960,320),(1024,320),
(448,384),(512,384),(576,384),(640,384),(704,384),(768,384),(832,384),(896,384),(960,384),(1024,384),
(512,448),(576,448),(640,448),(704,448),(768,448),(832,448),(896,448),(960,448),(1024,448),
(576,512),(640,512),(704,512),(768,512),(832,512),(896,512),(960,512),(1024,512),
(640,576),(704,576),(768,576),(832,576),(896,576),(960,576),(1024,576),
(704,640),(768,640),(832,640),(896,640),(960,640),(1024,640),
(768,704),(832,704),(896,704),(960,704),(1024,704),
(832,768),(896,768),(960,768),(1024,768),
(896,832),(960,832),(1024,832),
(960,896),(1024,896),
(1024,960)
);
macro_repeated!(
impl_widen_not_const_generic,(),
(1,2),
(1,3),(2,3),
(1,4),(2,4),(3,4),
(1,5),(2,5),(3,5),(4,5),
(1,6),(2,6),(3,6),(4,6),(5,6),
(1,7),(2,7),(3,7),(4,7),(5,7),(6,7),
(1,8),(2,8),(3,8),(4,8),(5,8),(6,8),(7,8),
(1,9),(2,9),(3,9),(4,9),(5,9),(6,9),(7,9),(8,9),
(1,10),(2,10),(3,10),(4,10),(5,10),(6,10),(7,10),(8,10),(9,10),
(1,11),(2,11),(3,11),(4,11),(5,11),(6,11),(7,11),(8,11),(9,11),(10,11),
(1,12),(2,12),(3,12),(4,12),(5,12),(6,12),(7,12),(8,12),(9,12),(10,12),(11,12),
(1,13),(2,13),(3,13),(4,13),(5,13),(6,13),(7,13),(8,13),(9,13),(10,13),(11,13),(12,13),
(1,14),(2,14),(3,14),(4,14),(5,14),(6,14),(7,14),(8,14),(9,14),(10,14),(11,14),(12,14),(13,14),
(1,15),(2,15),(3,15),(4,15),(5,15),(6,15),(7,15),(8,15),(9,15),(10,15),(11,15),(12,15),(13,15),(14,15),
(1,16),(2,16),(3,16),(4,16),(5,16),(6,16),(7,16),(8,16),(9,16),(10,16),(11,16),(12,16),(13,16),(14,16),(15,16),
(1,17)
(64,128),
(64,192),(128,192),
(64,256),(128,256),(192,256),
(64,320),(128,320),(192,320),(256,320),
(64,384),(128,384),(192,384),(256,384),(320,384),
(64,448),(128,448),(192,448),(256,448),(320,448),(384,448),
(64,512),(128,512),(192,512),(256,512),(320,512),(384,512),(448,512),
(64,576),(128,576),(192,576),(256,576),(320,576),(384,576),(448,576),(512,576),
(64,640),(128,640),(192,640),(256,640),(320,640),(384,640),(448,640),(512,640),(576,640),
(64,704),(128,704),(192,704),(256,704),(320,704),(384,704),(448,704),(512,704),(576,704),(640,704),
(64,768),(128,768),(192,768),(256,768),(320,768),(384,768),(448,768),(512,768),(576,768),(640,768),(704,768),
(64,832),(128,832),(192,832),(256,832),(320,832),(384,832),(448,832),(512,832),(576,832),(640,832),(704,832),(768,832),
(64,896),(128,896),(192,896),(256,896),(320,896),(384,896),(448,896),(512,896),(576,896),(640,896),(704,896),(768,896),(832,896),
(64,960),(128,960),(192,960),(256,960),(320,960),(384,960),(448,960),(512,960),(576,960),(640,960),(704,960),(768,960),(832,960),(896,960),
(64,1024),(128,1024),(192,1024),(256,1024),(320,1024),(384,1024),(448,1024),(512,1024),(576,1024),(640,1024),(704,1024),(768,1024),(832,1024),(896,1024),(960,1024),
(64,1088)
);
macro_rules! impl_not_const_generic{
($n:expr,$_2n:expr)=>{
impl Fixed<$n,{$n*32}>{
impl Fixed<{$n/BNUM_DIGIT_WIDTH},{$n>>1}>{
paste::item!{
#[inline]
pub fn sqrt_unchecked(self)->Self{
@@ -820,18 +928,18 @@ macro_rules! impl_not_const_generic{
//2. divide by 2 via >>1 (sqrt-ish)
//3. add on fractional offset
//Voila
let used_bits=self.bits.bits() as i32-1-($n*32) as i32;
let max_shift=((used_bits>>1)+($n*32) as i32) as u32;
let used_bits=self.bits.unsigned_abs().bit_width() as i32-1-($n>>1) as i32;
let max_shift=((used_bits>>1)+($n>>1) as i32) as u32;
let mut result=Self::ZERO;
//resize self to match the wide mul output
let wide_self=self.[<widen_ $_2n>]();
//descend down the bits and check if flipping each bit would push the square over the input value
for shift in (0..=max_shift).rev(){
result.as_bits_mut().as_bits_mut().set_bit(shift,true);
result.as_bits_mut().set_bit(shift,true);
if wide_self<result.[<wide_mul_ $n _ $n>](result){
// put it back lol
result.as_bits_mut().as_bits_mut().set_bit(shift,false);
result.as_bits_mut().set_bit(shift,false);
}
}
result
@@ -856,11 +964,11 @@ macro_rules! impl_not_const_generic{
}
}
}
impl_not_const_generic!(1,2);
impl_not_const_generic!(2,4);
impl_not_const_generic!(3,6);
impl_not_const_generic!(4,8);
impl_not_const_generic!(5,10);
impl_not_const_generic!(6,12);
impl_not_const_generic!(7,14);
impl_not_const_generic!(8,16);
impl_not_const_generic!(64,128);
impl_not_const_generic!(128,256);
impl_not_const_generic!(192,384);
impl_not_const_generic!(256,512);
impl_not_const_generic!(320,640);
impl_not_const_generic!(384,768);
impl_not_const_generic!(448,896);
impl_not_const_generic!(512,1024);

View File

@@ -1,208 +1,273 @@
use crate::types::I32F32;
use crate::types::I256F256;
use crate::fixed::Fixed;
use crate::types::{F64_32,F128_64,F192_96,F512_256};
#[test]
fn you_can_add_numbers(){
let a=I256F256::from((3i128*2).pow(4));
assert_eq!(a+a,I256F256::from((3i128*2).pow(4)*2));
let a=F512_256::from((3i128*2).pow(4));
assert_eq!(a+a,F512_256::from((3i128*2).pow(4)*2));
}
macro_rules! test_bit_by_bit{
($n:expr,$float:ty,$mantissa_bits:expr)=>{{
const MANT:u64=(1<<$mantissa_bits)-1;
// all bits in range
for i in 0..$n-$mantissa_bits{
let a=Fixed::<{$n/8},{$n>>1}>::from_bits(bnum::cast::As::as_::<bnum::Int::<{$n/8}>>(MANT).shl(i));
let b=(MANT as $float)*(2.0 as $float).powi(i as i32-{$n>>1});
let f:$float=a.into();
assert_eq!(f,b,"F{}_{} Into float {i}",$n,$n>>1);
assert_eq!(a,b.try_into().unwrap(),"F{}_{} From float {i}",$n,$n>>1);
}
// underflow
for i in 0u32..$mantissa_bits{
let a=Fixed::<{$n/8},{$n>>1}>::from_bits(bnum::cast::As::as_::<bnum::Int::<{$n/8}>>(MANT>>i));
let b=((MANT>>i) as $float)*(2.0 as $float).powi(-{$n>>1});
let f:$float=a.into();
assert_eq!(f,b,"Underflow F{}_{} Into float {i}",$n,$n>>1);
assert_eq!(a,b.try_into().unwrap(),"Underflow F{}_{} From float {i}",$n,$n>>1);
}
}};
}
#[test]
fn test_many(){
test_bit_by_bit!(64,f32,24);
test_bit_by_bit!(128,f32,24);
// f32 is reaching its limits here
// test_bit_by_bit!(256,f32,24);
// test_bit_by_bit!(512,f32,24);
test_bit_by_bit!(64,f64,53);
test_bit_by_bit!(128,f64,53);
test_bit_by_bit!(256,f64,53);
test_bit_by_bit!(512,f64,53);
}
#[test]
fn to_f32(){
let a=I256F256::from(1)>>2;
let a=F64_32::ZERO;
let f:f32=a.into();
assert_eq!(f,0.0f32);
let a=F64_32::from(1)>>2;
let f:f32=a.into();
assert_eq!(f,0.25f32);
let f:f32=(-a).into();
assert_eq!(f,-0.25f32);
let a=I256F256::from(0);
let a=F64_32::MIN;
let f:f32=a.into();
assert_eq!(f,i32::MIN as f32);
let a=F512_256::from(1)>>2;
let f:f32=a.into();
assert_eq!(f,0.25f32);
let f:f32=(-a).into();
assert_eq!(f,-0.25f32);
let a=F512_256::from(0);
let f:f32=(-a).into();
assert_eq!(f,0f32);
let a=I256F256::from(237946589723468975i64)<<16;
let a=F512_256::from(237946589723468975i64)<<16;
let f:f32=a.into();
assert_eq!(f,237946589723468975f32*2.0f32.powi(16));
}
#[test]
fn to_f64(){
let a=I256F256::from(1)>>2;
let a=F64_32::ZERO;
let f:f64=a.into();
assert_eq!(f,0.0f64);
let a=F64_32::from(1)>>2;
let f:f64=a.into();
assert_eq!(f,0.25f64);
let f:f64=(-a).into();
assert_eq!(f,-0.25f64);
let a=I256F256::from(0);
let a=F64_32::MIN;
let f:f64=a.into();
assert_eq!(f,i32::MIN as f64);
let a=F512_256::from(1)>>2;
let f:f64=a.into();
assert_eq!(f,0.25f64);
let f:f64=(-a).into();
assert_eq!(f,-0.25f64);
let a=F512_256::from(0);
let f:f64=(-a).into();
assert_eq!(f,0f64);
let a=I256F256::from(237946589723468975i64)<<16;
let a=F512_256::from(237946589723468975i64)<<16;
let f:f64=a.into();
assert_eq!(f,237946589723468975f64*2.0f64.powi(16));
}
#[test]
fn from_f32(){
let a=I256F256::from(1)>>2;
let b:Result<I256F256,_>=0.25f32.try_into();
let a=F64_32::ZERO;
let b:Result<F64_32,_>=0.0f32.try_into();
assert_eq!(b,Ok(a));
let a=I256F256::from(-1)>>2;
let b:Result<I256F256,_>=(-0.25f32).try_into();
let a=F512_256::from(1)>>2;
let b:Result<F512_256,_>=0.25f32.try_into();
assert_eq!(b,Ok(a));
let a=I256F256::from(0);
let b:Result<I256F256,_>=0.try_into();
let a=F512_256::from(-1)>>2;
let b:Result<F512_256,_>=(-0.25f32).try_into();
assert_eq!(b,Ok(a));
let a=I256F256::from(0b101011110101001010101010000000000000000000000000000i64)<<16;
let b:Result<I256F256,_>=(0b101011110101001010101010000000000000000000000000000u64 as f32*2.0f32.powi(16)).try_into();
let a=F512_256::from(0);
let b:Result<F512_256,_>=0.try_into();
assert_eq!(b,Ok(a));
let a=F512_256::from(0b101011110101001010101010000000000000000000000000000i64)<<16;
let b:Result<F512_256,_>=(0b101011110101001010101010000000000000000000000000000u64 as f32*2.0f32.powi(16)).try_into();
assert_eq!(b,Ok(a));
//I32F32::MAX into f32 is truncated into this value
let a=I32F32::raw(0b111111111111111111111111000000000000000000000000000000000000000i64);
let b:Result<I32F32,_>=Into::<f32>::into(I32F32::MAX).try_into();
let a=F64_32::raw(0b111111111111111111111111000000000000000000000000000000000000000i64);
let b:Result<F64_32,_>=Into::<f32>::into(F64_32::MAX).try_into();
assert_eq!(b,Ok(a));
//I32F32::MIN hits a special case since it's not representable as a positive signed integer
//TODO: don't return an overflow because this is technically possible
let _a=I32F32::MIN;
let b:Result<I32F32,_>=Into::<f32>::into(I32F32::MIN).try_into();
let a=F64_32::MIN;
let f:f32=a.into();
let b:Result<F64_32,_>=f.try_into();
assert_eq!(b,Err(crate::fixed::FixedFromFloatError::Overflow));
//16 is within the 24 bits of float precision
let b:Result<I32F32,_>=Into::<f32>::into(-I32F32::MIN.widen_2()).try_into();
let a=-F64_32::MIN.widen_128();
let f:f32=a.into();
let b:Result<F64_32,_>=f.try_into();
assert_eq!(b,Err(crate::fixed::FixedFromFloatError::Overflow));
let b:Result<I32F32,_>=f32::MIN_POSITIVE.try_into();
let b:Result<F64_32,_>=f32::MIN_POSITIVE.try_into();
assert_eq!(b,Err(crate::fixed::FixedFromFloatError::Underflow));
//test many cases
for i in 0..64{
let a=crate::fixed::Fixed::<2,64>::raw_digit(0b111111111111111111111111000000000000000000000000000000000000000i64)<<i;
let a=F128_64::from_u64(0b111111111111111111111111000000000000000000000000000000000000000u64)<<i;
let f:f32=a.into();
let b:Result<crate::fixed::Fixed<2,64>,_>=f.try_into();
let b:Result<F128_64,_>=f.try_into();
assert_eq!(b,Ok(a));
}
}
#[test]
fn from_f64(){
let a=I256F256::from(1)>>2;
let b:Result<I256F256,_>=0.25f64.try_into();
let a=F64_32::ZERO;
let b:Result<F64_32,_>=0.0f64.try_into();
assert_eq!(b,Ok(a));
let a=I256F256::from(-1)>>2;
let b:Result<I256F256,_>=(-0.25f64).try_into();
let a=F512_256::from(1)>>2;
let b:Result<F512_256,_>=0.25f64.try_into();
assert_eq!(b,Ok(a));
let a=I256F256::from(0);
let b:Result<I256F256,_>=0.try_into();
let a=F512_256::from(-1)>>2;
let b:Result<F512_256,_>=(-0.25f64).try_into();
assert_eq!(b,Ok(a));
let a=I256F256::from(0b101011110101001010101010000000000000000000000000000i64)<<16;
let b:Result<I256F256,_>=(0b101011110101001010101010000000000000000000000000000u64 as f64*2.0f64.powi(16)).try_into();
let a=F512_256::from(0);
let b:Result<F512_256,_>=0.try_into();
assert_eq!(b,Ok(a));
let a=F512_256::from(0b101011110101001010101010000000000000000000000000000i64)<<16;
let b:Result<F512_256,_>=(0b101011110101001010101010000000000000000000000000000u64 as f64*2.0f64.powi(16)).try_into();
assert_eq!(b,Ok(a));
}
#[test]
fn you_can_shr_numbers(){
let a=I32F32::from(4);
assert_eq!(a>>1,I32F32::from(2));
let a=F64_32::from(4);
assert_eq!(a>>1,F64_32::from(2));
}
#[test]
fn test_wide_mul(){
let a=I32F32::ONE;
let aa=a.wide_mul_1_1(a);
assert_eq!(aa,crate::types::I64F64::ONE);
let a=F64_32::ONE;
let aa=a.wide_mul_64_64(a);
assert_eq!(aa,F128_64::ONE);
}
#[test]
fn test_wide_div(){
let a=I32F32::ONE*4;
let b=I32F32::ONE*2;
let wide_a=a.wide_mul_1_1(I32F32::ONE);
let wide_b=b.wide_mul_1_1(I32F32::ONE);
let ab=a.wide_div_1_1(b);
assert_eq!(ab,crate::types::I64F64::ONE*2);
let wab=wide_a.wide_div_2_1(b);
assert_eq!(wab,crate::fixed::Fixed::<3,96>::ONE*2);
let awb=a.wide_div_1_2(wide_b);
assert_eq!(awb,crate::fixed::Fixed::<3,96>::ONE*2);
let a=F64_32::ONE*4;
let b=F64_32::ONE*2;
let wide_a=a.wide_mul_64_64(F64_32::ONE);
let wide_b=b.wide_mul_64_64(F64_32::ONE);
let ab=a.wide_div_64_64(b);
assert_eq!(ab,F128_64::ONE*2);
let wab=wide_a.wide_div_128_64(b);
assert_eq!(wab,F192_96::ONE*2);
let awb=a.wide_div_64_128(wide_b);
assert_eq!(awb,F192_96::ONE*2);
}
#[test]
fn test_wide_mul_repeated() {
let a=I32F32::from(2);
let b=I32F32::from(3);
let a=F64_32::from(2);
let b=F64_32::from(3);
let w1=a.wide_mul_1_1(b);
let w2=w1.wide_mul_2_2(w1);
let w3=w2.wide_mul_4_4(w2);
let w1=a.wide_mul_64_64(b);
let w2=w1.wide_mul_128_128(w1);
let w3=w2.wide_mul_256_256(w2);
assert_eq!(w3,I256F256::from((3i128*2).pow(4)));
assert_eq!(w3,F512_256::from((3i128*2).pow(4)));
}
#[test]
fn test_bint(){
let a=I32F32::ONE;
assert_eq!(a*2,I32F32::from(2));
let a=F64_32::ONE;
assert_eq!(a*2,F64_32::from(2));
}
#[test]
fn test_wrap(){
assert_eq!(I32F32::ONE,I256F256::ONE.wrap_1());
assert_eq!(I32F32::NEG_ONE,I256F256::NEG_ONE.wrap_1());
assert_eq!(F64_32::ONE,F512_256::ONE.wrap_64());
assert_eq!(F64_32::NEG_ONE,F512_256::NEG_ONE.wrap_64());
}
#[test]
fn test_narrow(){
assert_eq!(Ok(I32F32::ONE),I256F256::ONE.narrow_1());
assert_eq!(Ok(I32F32::NEG_ONE),I256F256::NEG_ONE.narrow_1());
assert_eq!(Ok(F64_32::ONE),F512_256::ONE.narrow_64());
assert_eq!(Ok(F64_32::NEG_ONE),F512_256::NEG_ONE.narrow_64());
}
#[test]
fn test_widen(){
assert_eq!(I32F32::ONE.widen_8(),I256F256::ONE);
assert_eq!(I32F32::NEG_ONE.widen_8(),I256F256::NEG_ONE);
assert_eq!(F64_32::ONE.widen_512(),F512_256::ONE);
assert_eq!(F64_32::NEG_ONE.widen_512(),F512_256::NEG_ONE);
}
#[test]
fn test_clamp(){
assert_eq!(I32F32::ONE,I256F256::ONE.clamp_1());
assert_eq!(I32F32::NEG_ONE,I256F256::NEG_ONE.clamp_1());
assert_eq!(F64_32::ONE,F512_256::ONE.clamp_64());
assert_eq!(F64_32::NEG_ONE,F512_256::NEG_ONE.clamp_64());
}
#[test]
fn test_sqrt(){
let a=I32F32::ONE*4;
assert_eq!(a.sqrt(),I32F32::from(2));
let a=F64_32::ONE*4;
assert_eq!(a.sqrt(),F64_32::from(2));
}
#[test]
fn test_sqrt_zero(){
let a=I32F32::ZERO;
assert_eq!(a.sqrt(),I32F32::ZERO);
let a=F64_32::ZERO;
assert_eq!(a.sqrt(),F64_32::ZERO);
}
#[test]
fn test_sqrt_low(){
let a=I32F32::HALF;
let a=F64_32::HALF;
let b=a.fixed_mul(a);
assert_eq!(b.sqrt(),a);
}
fn find_equiv_sqrt_via_f64(n:I32F32)->I32F32{
fn find_equiv_sqrt_via_f64(n:F64_32)->F64_32{
//GIMME THEM BITS BOY
let &[bits]=n.to_bits().to_bits().digits();
let ibits=bits as i64;
let ibits=i64::from_le_bytes(n.to_bits().to_bytes());
let f=(ibits as f64)/((1u64<<32) as f64);
let f_ans=f.sqrt();
let i=(f_ans*((1u64<<32) as f64)) as i64;
let r=I32F32::from_bits(bnum::BInt::<1>::from(i));
let i=(f_ans*((1u64<<32) as f64)) as u64;
let r=F64_32::from_u64(i);
//mimic the behaviour of the algorithm,
//return the result if it truncates to the exact answer
if (r+I32F32::EPSILON).wide_mul_1_1(r+I32F32::EPSILON)==n.wide_mul_1_1(I32F32::ONE){
return r+I32F32::EPSILON;
if (r+F64_32::EPSILON).wide_mul_64_64(r+F64_32::EPSILON)==n.wide_mul_64_64(F64_32::ONE){
return r+F64_32::EPSILON;
}
if (r-I32F32::EPSILON).wide_mul_1_1(r-I32F32::EPSILON)==n.wide_mul_1_1(I32F32::ONE){
return r-I32F32::EPSILON;
if (r-F64_32::EPSILON).wide_mul_64_64(r-F64_32::EPSILON)==n.wide_mul_64_64(F64_32::ONE){
return r-F64_32::EPSILON;
}
return r;
}
fn test_exact(n:I32F32){
fn test_exact(n:F64_32){
assert_eq!(n.sqrt(),find_equiv_sqrt_via_f64(n));
}
#[test]
fn test_sqrt_exact(){
//43
for i in 0..((i64::MAX as f32).ln() as u32){
let n=I32F32::from_bits(bnum::BInt::<1>::from((i as f32).exp() as i64));
let n=F64_32::from_u64((i as f32).exp() as u64);
test_exact(n);
}
}
#[test]
fn test_sqrt_max(){
let a=I32F32::MAX;
let a=F64_32::MAX;
test_exact(a);
}
#[test]
@@ -210,9 +275,9 @@ fn test_sqrt_max(){
fn test_zeroes_normal(){
// (x-1)*(x+1)
// x^2-1
let zeroes=I32F32::zeroes2(I32F32::NEG_ONE,I32F32::ZERO,I32F32::ONE);
let zeroes=F64_32::zeroes2(F64_32::NEG_ONE,F64_32::ZERO,F64_32::ONE);
assert_eq!(zeroes,arrayvec::ArrayVec::from_iter([I32F32::NEG_ONE,I32F32::ONE]));
let zeroes=I32F32::zeroes2(I32F32::NEG_ONE*3,I32F32::ONE*2,I32F32::ONE);
let zeroes=F64_32::zeroes2(F64_32::NEG_ONE*3,F64_32::ONE*2,F64_32::ONE);
assert_eq!(zeroes,arrayvec::ArrayVec::from_iter([I32F32::NEG_ONE*3,I32F32::ONE]));
}
#[test]
@@ -220,25 +285,25 @@ fn test_zeroes_normal(){
fn test_zeroes_deferred_division(){
// (x-1)*(x+1)
// x^2-1
let zeroes=I32F32::zeroes2(I32F32::NEG_ONE,I32F32::ZERO,I32F32::ONE);
let zeroes=F64_32::zeroes2(F64_32::NEG_ONE,F64_32::ZERO,F64_32::ONE);
assert_eq!(
zeroes,
arrayvec::ArrayVec::from_iter([
ratio_ops::ratio::Ratio::new(I32F32::ONE*2,I32F32::NEG_ONE*2),
ratio_ops::ratio::Ratio::new(I32F32::ONE*2,I32F32::ONE*2),
ratio_ops::ratio::Ratio::new(F64_32::ONE*2,F64_32::NEG_ONE*2),
ratio_ops::ratio::Ratio::new(F64_32::ONE*2,F64_32::ONE*2),
])
);
}
#[test]
fn test_debug(){
assert_eq!(format!("{:?}",I32F32::EPSILON),"0.00000001");
assert_eq!(format!("{:?}",I32F32::ONE),"1.00000000");
assert_eq!(format!("{:?}",I32F32::TWO),"2.00000000");
assert_eq!(format!("{:?}",I32F32::MAX),"7fffffff.ffffffff");
assert_eq!(format!("{:?}",I32F32::try_from(core::f64::consts::PI).unwrap()),"3.243f6a88");
assert_eq!(format!("{:?}",I32F32::NEG_EPSILON),"-0.00000001");
assert_eq!(format!("{:?}",I32F32::NEG_ONE),"-1.00000000");
assert_eq!(format!("{:?}",I32F32::NEG_TWO),"-2.00000000");
assert_eq!(format!("{:?}",I32F32::MIN),"-80000000.00000000");
assert_eq!(format!("{:?}",F64_32::EPSILON),"0.00000001");
assert_eq!(format!("{:?}",F64_32::ONE),"1.00000000");
assert_eq!(format!("{:?}",F64_32::TWO),"2.00000000");
assert_eq!(format!("{:?}",F64_32::MAX),"7fffffff.ffffffff");
assert_eq!(format!("{:?}",F64_32::try_from(core::f64::consts::PI).unwrap()),"3.243f6a88");
assert_eq!(format!("{:?}",F64_32::NEG_EPSILON),"-0.00000001");
assert_eq!(format!("{:?}",F64_32::NEG_ONE),"-1.00000000");
assert_eq!(format!("{:?}",F64_32::NEG_TWO),"-2.00000000");
assert_eq!(format!("{:?}",F64_32::MIN),"-80000000.00000000");
}

View File

@@ -1,4 +1,7 @@
pub type I32F32=crate::fixed::Fixed<1,32>;
pub type I64F64=crate::fixed::Fixed<2,64>;
pub type I128F128=crate::fixed::Fixed<4,128>;
pub type I256F256=crate::fixed::Fixed<8,256>;
use crate::fixed::BNUM_DIGIT_WIDTH;
pub type F64_32=crate::fixed::Fixed<{64/BNUM_DIGIT_WIDTH},32>;
pub type F128_64=crate::fixed::Fixed<{128/BNUM_DIGIT_WIDTH},64>;
pub type F192_96=crate::fixed::Fixed<{192/BNUM_DIGIT_WIDTH},96>;
pub type F256_128=crate::fixed::Fixed<{256/BNUM_DIGIT_WIDTH},128>;
pub type F320_160=crate::fixed::Fixed<{320/BNUM_DIGIT_WIDTH},160>;
pub type F512_256=crate::fixed::Fixed<{512/BNUM_DIGIT_WIDTH},256>;

View File

@@ -1,18 +1,19 @@
use crate::fixed::Fixed;
use crate::fixed::BNUM_DIGIT_WIDTH;
use arrayvec::ArrayVec;
use std::cmp::Ordering;
macro_rules! impl_zeroes{
($n:expr)=>{
impl Fixed<$n,{$n*32}>{
impl Fixed<{$n/BNUM_DIGIT_WIDTH},{$n>>1}>{
#[inline]
pub fn zeroes2(a0:Self,a1:Self,a2:Self)->ArrayVec<<Self as core::ops::Div>::Output,2>{
let a2pos=match a2.cmp(&Self::ZERO){
Ordering::Greater=>true,
Ordering::Equal=>return ArrayVec::from_iter(Self::zeroes1(a0,a1).into_iter()),
Ordering::Equal=>return ArrayVec::from_iter(Self::zeroes1(a0,a1)),
Ordering::Less=>false,
};
let radicand=a1*a1-a2*a0*4;
let radicand=a1*a1-((a2*a0)<<2);
match radicand.cmp(&<Self as core::ops::Mul>::Output::ZERO){
Ordering::Greater=>{
// using wrap because sqrt always halves the number of leading digits.
@@ -21,21 +22,21 @@ macro_rules! impl_zeroes{
let planar_radicand=radicand.sqrt().[<wrap_ $n>]();
}
//sort roots ascending and avoid taking the difference of large numbers
let zeroes=match (a2pos,Self::ZERO<a1){
(true, true )=>[(-a1-planar_radicand)/(a2*2),(a0*2)/(-a1-planar_radicand)],
(true, false)=>[(a0*2)/(-a1+planar_radicand),(-a1+planar_radicand)/(a2*2)],
(false,true )=>[(a0*2)/(-a1-planar_radicand),(-a1-planar_radicand)/(a2*2)],
(false,false)=>[(-a1+planar_radicand)/(a2*2),(a0*2)/(-a1+planar_radicand)],
let zeroes=match (a2pos,a1.is_positive()){
(true, true )=>[(-a1-planar_radicand)/(a2<<1),(a0<<1)/(-a1-planar_radicand)],
(true, false)=>[(a0<<1)/(-a1+planar_radicand),(-a1+planar_radicand)/(a2<<1)],
(false,true )=>[(a0<<1)/(-a1-planar_radicand),(-a1-planar_radicand)/(a2<<1)],
(false,false)=>[(-a1+planar_radicand)/(a2<<1),(a0<<1)/(-a1+planar_radicand)],
};
ArrayVec::from_iter(zeroes)
},
Ordering::Equal=>ArrayVec::from_iter([(a1)/(a2*-2)]),
Ordering::Equal=>ArrayVec::from_iter([(a1)/(-a2<<1)]),
Ordering::Less=>ArrayVec::new_const(),
}
}
#[inline]
pub fn zeroes1(a0:Self,a1:Self)->ArrayVec<<Self as core::ops::Div>::Output,1>{
if a1==Self::ZERO{
if a1.is_zero(){
ArrayVec::new_const()
}else{
ArrayVec::from_iter([(-a0)/(a1)])
@@ -44,10 +45,10 @@ macro_rules! impl_zeroes{
}
};
}
impl_zeroes!(1);
impl_zeroes!(2);
impl_zeroes!(3);
impl_zeroes!(4);
impl_zeroes!(64);
impl_zeroes!(128);
impl_zeroes!(192);
impl_zeroes!(256);
//sqrt doubles twice!
//impl_zeroes!(5);
//impl_zeroes!(6);

View File

@@ -5,17 +5,17 @@ macro_rules! impl_fixed_wide_vector_not_const_generic {
(),
$n:expr
) => {
impl<const N:usize> Vector<N,fixed_wide::fixed::Fixed<$n,{$n*32}>>{
impl<const N:usize> Vector<N,fixed_wide::fixed::Fixed<{$n>>3},{$n>>1}>>{
#[inline]
pub fn length(self)-><fixed_wide::fixed::Fixed::<$n,{$n*32}> as core::ops::Mul>::Output{
pub fn length(self)-><fixed_wide::fixed::Fixed::<{$n>>3},{$n>>1}> as core::ops::Mul>::Output{
self.length_squared().sqrt_unchecked()
}
#[inline]
pub fn with_length<U,V>(self,length:U)-><Vector<N,V> as core::ops::Div<<fixed_wide::fixed::Fixed::<$n,{$n*32}> as core::ops::Mul>::Output>>::Output
pub fn with_length<U,V>(self,length:U)-><Vector<N,V> as core::ops::Div<<fixed_wide::fixed::Fixed::<{$n>>3},{$n>>1}> as core::ops::Mul>::Output>>::Output
where
fixed_wide::fixed::Fixed<$n,{$n*32}>:core::ops::Mul<U,Output=V>,
fixed_wide::fixed::Fixed<{$n>>3},{$n>>1}>:core::ops::Mul<U,Output=V>,
U:Copy,
V:core::ops::Div<<fixed_wide::fixed::Fixed::<$n,{$n*32}> as core::ops::Mul>::Output>,
V:core::ops::Div<<fixed_wide::fixed::Fixed::<{$n>>3},{$n>>1}> as core::ops::Mul>::Output>,
{
self*length/self.length()
}
@@ -27,7 +27,7 @@ macro_rules! impl_fixed_wide_vector_not_const_generic {
#[macro_export(local_inner_macros)]
macro_rules! macro_4 {
( $macro: ident, $any:tt ) => {
$crate::macro_repeated!($macro,$any,1,2,3,4);
$crate::macro_repeated!($macro,$any,64,128,192,256);
}
}
@@ -39,40 +39,40 @@ macro_rules! impl_fixed_wide_vector {
// I LOVE NOT BEING ABLE TO USE CONST GENERICS
$crate::macro_repeated!(
impl_narrow_not_const_generic,(),
(2,1),(3,1),(4,1),(5,1),(6,1),(7,1),(8,1),(9,1),(10,1),(11,1),(12,1),(13,1),(14,1),(15,1),(16,1),(17,1),
(3,2),(4,2),(5,2),(6,2),(7,2),(8,2),(9,2),(10,2),(11,2),(12,2),(13,2),(14,2),(15,2),(16,2),
(4,3),(5,3),(6,3),(7,3),(8,3),(9,3),(10,3),(11,3),(12,3),(13,3),(14,3),(15,3),(16,3),
(5,4),(6,4),(7,4),(8,4),(9,4),(10,4),(11,4),(12,4),(13,4),(14,4),(15,4),(16,4),
(6,5),(7,5),(8,5),(9,5),(10,5),(11,5),(12,5),(13,5),(14,5),(15,5),(16,5),
(7,6),(8,6),(9,6),(10,6),(11,6),(12,6),(13,6),(14,6),(15,6),(16,6),
(8,7),(9,7),(10,7),(11,7),(12,7),(13,7),(14,7),(15,7),(16,7),
(9,8),(10,8),(11,8),(12,8),(13,8),(14,8),(15,8),(16,8),
(10,9),(11,9),(12,9),(13,9),(14,9),(15,9),(16,9),
(11,10),(12,10),(13,10),(14,10),(15,10),(16,10),
(12,11),(13,11),(14,11),(15,11),(16,11),
(13,12),(14,12),(15,12),(16,12),
(14,13),(15,13),(16,13),
(15,14),(16,14),
(16,15)
(128,64),(192,64),(256,64),(320,64),(384,64),(448,64),(512,64),(576,64),(640,64),(704,64),(768,64),(832,64),(896,64),(960,64),(1024,64),(1088,64),
(192,128),(256,128),(320,128),(384,128),(448,128),(512,128),(576,128),(640,128),(704,128),(768,128),(832,128),(896,128),(960,128),(1024,128),
(256,192),(320,192),(384,192),(448,192),(512,192),(576,192),(640,192),(704,192),(768,192),(832,192),(896,192),(960,192),(1024,192),
(320,256),(384,256),(448,256),(512,256),(576,256),(640,256),(704,256),(768,256),(832,256),(896,256),(960,256),(1024,256),
(384,320),(448,320),(512,320),(576,320),(640,320),(704,320),(768,320),(832,320),(896,320),(960,320),(1024,320),
(448,384),(512,384),(576,384),(640,384),(704,384),(768,384),(832,384),(896,384),(960,384),(1024,384),
(512,448),(576,448),(640,448),(704,448),(768,448),(832,448),(896,448),(960,448),(1024,448),
(576,512),(640,512),(704,512),(768,512),(832,512),(896,512),(960,512),(1024,512),
(640,576),(704,576),(768,576),(832,576),(896,576),(960,576),(1024,576),
(704,640),(768,640),(832,640),(896,640),(960,640),(1024,640),
(768,704),(832,704),(896,704),(960,704),(1024,704),
(832,768),(896,768),(960,768),(1024,768),
(896,832),(960,832),(1024,832),
(960,896),(1024,896),
(1024,960)
);
$crate::macro_repeated!(
impl_widen_not_const_generic,(),
(1,2),
(1,3),(2,3),
(1,4),(2,4),(3,4),
(1,5),(2,5),(3,5),(4,5),
(1,6),(2,6),(3,6),(4,6),(5,6),
(1,7),(2,7),(3,7),(4,7),(5,7),(6,7),
(1,8),(2,8),(3,8),(4,8),(5,8),(6,8),(7,8),
(1,9),(2,9),(3,9),(4,9),(5,9),(6,9),(7,9),(8,9),
(1,10),(2,10),(3,10),(4,10),(5,10),(6,10),(7,10),(8,10),(9,10),
(1,11),(2,11),(3,11),(4,11),(5,11),(6,11),(7,11),(8,11),(9,11),(10,11),
(1,12),(2,12),(3,12),(4,12),(5,12),(6,12),(7,12),(8,12),(9,12),(10,12),(11,12),
(1,13),(2,13),(3,13),(4,13),(5,13),(6,13),(7,13),(8,13),(9,13),(10,13),(11,13),(12,13),
(1,14),(2,14),(3,14),(4,14),(5,14),(6,14),(7,14),(8,14),(9,14),(10,14),(11,14),(12,14),(13,14),
(1,15),(2,15),(3,15),(4,15),(5,15),(6,15),(7,15),(8,15),(9,15),(10,15),(11,15),(12,15),(13,15),(14,15),
(1,16),(2,16),(3,16),(4,16),(5,16),(6,16),(7,16),(8,16),(9,16),(10,16),(11,16),(12,16),(13,16),(14,16),(15,16),
(1,17)
(64,128),
(64,192),(128,192),
(64,256),(128,256),(192,256),
(64,320),(128,320),(192,320),(256,320),
(64,384),(128,384),(192,384),(256,384),(320,384),
(64,448),(128,448),(192,448),(256,448),(320,448),(384,448),
(64,512),(128,512),(192,512),(256,512),(320,512),(384,512),(448,512),
(64,576),(128,576),(192,576),(256,576),(320,576),(384,576),(448,576),(512,576),
(64,640),(128,640),(192,640),(256,640),(320,640),(384,640),(448,640),(512,640),(576,640),
(64,704),(128,704),(192,704),(256,704),(320,704),(384,704),(448,704),(512,704),(576,704),(640,704),
(64,768),(128,768),(192,768),(256,768),(320,768),(384,768),(448,768),(512,768),(576,768),(640,768),(704,768),
(64,832),(128,832),(192,832),(256,832),(320,832),(384,832),(448,832),(512,832),(576,832),(640,832),(704,832),(768,832),
(64,896),(128,896),(192,896),(256,896),(320,896),(384,896),(448,896),(512,896),(576,896),(640,896),(704,896),(768,896),(832,896),
(64,960),(128,960),(192,960),(256,960),(320,960),(384,960),(448,960),(512,960),(576,960),(640,960),(704,960),(768,960),(832,960),(896,960),
(64,1024),(128,1024),(192,1024),(256,1024),(320,1024),(384,1024),(448,1024),(512,1024),(576,1024),(640,1024),(704,1024),(768,1024),(832,1024),(896,1024),(960,1024),
(64,1088)
);
impl<const N:usize,T:fixed_wide::fixed::Wrap<U>,U> fixed_wide::fixed::Wrap<Vector<N,U>> for Vector<N,T>
{
@@ -98,17 +98,17 @@ macro_rules! impl_narrow_not_const_generic{
($lhs:expr,$rhs:expr)
)=>{
paste::item!{
impl<const N:usize> Vector<N,fixed_wide::fixed::Fixed<$lhs,{$lhs*32}>>{
impl<const N:usize> Vector<N,fixed_wide::fixed::Fixed<{$lhs>>3},{$lhs>>1}>>{
#[inline]
pub fn [<wrap_ $rhs>](self)->Vector<N,fixed_wide::fixed::Fixed<$rhs,{$rhs*32}>>{
pub fn [<wrap_ $rhs>](self)->Vector<N,fixed_wide::fixed::Fixed<{$rhs>>3},{$rhs>>1}>>{
self.map(|t|t.[<wrap_ $rhs>]())
}
#[inline]
pub fn [<narrow_ $rhs>](self)->Vector<N,Result<fixed_wide::fixed::Fixed<$rhs,{$rhs*32}>,fixed_wide::fixed::NarrowError>>{
pub fn [<narrow_ $rhs>](self)->Vector<N,Result<fixed_wide::fixed::Fixed<{$rhs>>3},{$rhs>>1}>,fixed_wide::fixed::NarrowError>>{
self.map(|t|t.[<narrow_ $rhs>]())
}
#[inline]
pub fn [<clamp_ $rhs>](self)->Vector<N,fixed_wide::fixed::Fixed<$rhs,{$rhs*32}>>{
pub fn [<clamp_ $rhs>](self)->Vector<N,fixed_wide::fixed::Fixed<{$rhs>>3},{$rhs>>1}>>{
self.map(|t|t.[<clamp_ $rhs>]())
}
}
@@ -123,9 +123,9 @@ macro_rules! impl_widen_not_const_generic{
($lhs:expr,$rhs:expr)
)=>{
paste::item!{
impl<const N:usize> Vector<N,fixed_wide::fixed::Fixed<$lhs,{$lhs*32}>>{
impl<const N:usize> Vector<N,fixed_wide::fixed::Fixed<{$lhs>>3},{$lhs>>1}>>{
#[inline]
pub fn [<widen_ $rhs>](self)->Vector<N,fixed_wide::fixed::Fixed<$rhs,{$rhs*32}>>{
pub fn [<widen_ $rhs>](self)->Vector<N,fixed_wide::fixed::Fixed<{$rhs>>3},{$rhs>>1}>>{
self.map(|t|t.[<widen_ $rhs>]())
}
}

View File

@@ -1,9 +1,9 @@
use crate::types::{Matrix3,Matrix3x2,Matrix3x4,Matrix4x2,Vector3};
type Planar64=fixed_wide::types::I32F32;
type Planar64Wide1=fixed_wide::types::I64F64;
type Planar64=fixed_wide::types::F64_32;
type Planar64Wide1=fixed_wide::types::F128_64;
//type Planar64Wide2=fixed_wide::types::I128F128;
type Planar64Wide3=fixed_wide::types::I256F256;
type Planar64Wide3=fixed_wide::types::F512_256;
#[test]
fn wide_vec3(){
@@ -72,7 +72,7 @@ fn wide_matrix_det(){
]);
// In[2]:= Det[{{1, 2, 3}, {4, 5, 7}, {6, 8, 9}}]
// Out[2]= 7
assert_eq!(m.det(),fixed_wide::fixed::Fixed::<3,96>::from(7));
assert_eq!(m.det(),fixed_wide::types::F192_96::from(7));
}
#[test]

View File

@@ -519,7 +519,7 @@ pub fn unit_cylinder(face_descriptions:CubeFaceDescription)->Mesh{
(glam::vec2(-x as f32,y as f32).normalize()+1.0)/2.0
)
);
let pos=mb.acquire_pos_id($end+vec3::int(0,-x,y).with_length(Planar64::ONE).divide().wrap_1());
let pos=mb.acquire_pos_id($end+vec3::int(0,-x,y).with_length(Planar64::ONE).divide().wrap_64());
mb.acquire_vertex_id(IndexedVertex{pos,tex,normal,color})
}).collect();
@@ -560,9 +560,9 @@ pub fn unit_cylinder(face_descriptions:CubeFaceDescription)->Mesh{
let mut polygon_list=Vec::with_capacity(CubeFaceDescription::FACES);
for $loop in -GON..GON{
// lo Z
let lz_dir=$lo_dir.with_length(Planar64::ONE).divide().wrap_1();
let lz_dir=$lo_dir.with_length(Planar64::ONE).divide().wrap_64();
// hi Z
let hz_dir=$hi_dir.with_length(Planar64::ONE).divide().wrap_1();
let hz_dir=$hi_dir.with_length(Planar64::ONE).divide().wrap_64();
// pos
let lx_lz_pos=mb.acquire_pos_id(vec3::NEG_X+lz_dir);

View File

@@ -31,11 +31,11 @@ fn planar64_affine3_from_roblox(cf:&rbx_dom_weak::types::CFrame,size:&rbx_dom_we
Ok(Planar64Affine3::new(
Planar64Mat3::from_cols([
(vec3::try_from_f32_array([cf.orientation.x.x,cf.orientation.y.x,cf.orientation.z.x])?
*integer::try_from_f32(size.x/2.0)?).narrow_1().unwrap(),//.map_err(Planar64ConvertError::Narrow)?
*integer::try_from_f32(size.x/2.0)?).narrow_64().unwrap(),//.map_err(Planar64ConvertError::Narrow)?
(vec3::try_from_f32_array([cf.orientation.x.y,cf.orientation.y.y,cf.orientation.z.y])?
*integer::try_from_f32(size.y/2.0)?).narrow_1().unwrap(),//.map_err(Planar64ConvertError::Narrow)?
*integer::try_from_f32(size.y/2.0)?).narrow_64().unwrap(),//.map_err(Planar64ConvertError::Narrow)?
(vec3::try_from_f32_array([cf.orientation.x.z,cf.orientation.y.z,cf.orientation.z.z])?
*integer::try_from_f32(size.z/2.0)?).narrow_1().unwrap(),//.map_err(Planar64ConvertError::Narrow)?
*integer::try_from_f32(size.z/2.0)?).narrow_64().unwrap(),//.map_err(Planar64ConvertError::Narrow)?
]),
vec3::try_from_f32_array([cf.position.x,cf.position.y,cf.position.z])?
))
@@ -909,13 +909,13 @@ impl PartialMap1<'_>{
model.mesh=mesh;
// avoid devide by zero but introduce more edge cases. not sure what the correct thing to do here is.
if mesh_size.x!=integer::Fixed::ZERO{
model.transform.matrix3.x_axis=(model.transform.matrix3.x_axis*2/mesh_size.x).divide().narrow_1().unwrap();
model.transform.matrix3.x_axis=(model.transform.matrix3.x_axis*2/mesh_size.x).divide().narrow_64().unwrap();
}
if mesh_size.y!=integer::Fixed::ZERO{
model.transform.matrix3.y_axis=(model.transform.matrix3.y_axis*2/mesh_size.y).divide().narrow_1().unwrap();
model.transform.matrix3.y_axis=(model.transform.matrix3.y_axis*2/mesh_size.y).divide().narrow_64().unwrap();
}
if mesh_size.z!=integer::Fixed::ZERO{
model.transform.matrix3.z_axis=(model.transform.matrix3.z_axis*2/mesh_size.z).divide().narrow_1().unwrap();
model.transform.matrix3.z_axis=(model.transform.matrix3.z_axis*2/mesh_size.z).divide().narrow_64().unwrap();
}
Some(model)
}).chain(self.deferred_unions_deferred_attributes.into_iter().flat_map(|deferred_union_deferred_attributes|{
@@ -932,13 +932,13 @@ impl PartialMap1<'_>{
model.mesh=mesh;
// avoid devide by zero but introduce more edge cases. not sure what the correct thing to do here is.
if size.x!=integer::Fixed::ZERO{
model.transform.matrix3.x_axis=(model.transform.matrix3.x_axis*2/size.x).divide().narrow_1().unwrap();
model.transform.matrix3.x_axis=(model.transform.matrix3.x_axis*2/size.x).divide().narrow_64().unwrap();
}
if size.y!=integer::Fixed::ZERO{
model.transform.matrix3.y_axis=(model.transform.matrix3.y_axis*2/size.y).divide().narrow_1().unwrap();
model.transform.matrix3.y_axis=(model.transform.matrix3.y_axis*2/size.y).divide().narrow_64().unwrap();
}
if size.z!=integer::Fixed::ZERO{
model.transform.matrix3.z_axis=(model.transform.matrix3.z_axis*2/size.z).divide().narrow_1().unwrap();
model.transform.matrix3.z_axis=(model.transform.matrix3.z_axis*2/size.z).divide().narrow_64().unwrap();
}
Some(model)
}))

View File

@@ -386,7 +386,7 @@ pub fn write_map<W:BinWriterExt>(mut writer:W,map:strafesnet_common::map::Comple
let mesh=map.meshes.get(model.mesh.get() as usize).ok_or(Error::InvalidMeshId(model.mesh))?;
let mut aabb=Aabb::default();
for &pos in &mesh.unique_pos{
aabb.grow(model.transform.transform_point3(pos).narrow_1().unwrap());
aabb.grow(model.transform.transform_point3(pos).narrow_64().unwrap());
}
Ok(((model::ModelId::new(model_id as u32),model.into()),aabb))
}).collect::<Result<Vec<_>,_>>()?;