Struct f64x2
#[repr(C, align(16))]pub struct f64x2 { /* private fields */ }
dep_wide
only.Implementations§
§impl f64x2
impl f64x2
pub const ONE: f64x2
pub const ZERO: f64x2
pub const HALF: f64x2
pub const E: f64x2
pub const FRAC_1_PI: f64x2
pub const FRAC_2_PI: f64x2
pub const FRAC_2_SQRT_PI: f64x2
pub const FRAC_1_SQRT_2: f64x2
pub const FRAC_PI_2: f64x2
pub const FRAC_PI_3: f64x2
pub const FRAC_PI_4: f64x2
pub const FRAC_PI_6: f64x2
pub const FRAC_PI_8: f64x2
pub const LN_2: f64x2
pub const LN_10: f64x2
pub const LOG2_E: f64x2
pub const LOG10_E: f64x2
pub const LOG10_2: f64x2
pub const LOG2_10: f64x2
pub const PI: f64x2
pub const SQRT_2: f64x2
pub const TAU: f64x2
§impl f64x2
impl f64x2
pub const fn new(array: [f64; 2]) -> f64x2
pub fn blend(self, t: f64x2, f: f64x2) -> f64x2
pub fn abs(self) -> f64x2
pub fn floor(self) -> f64x2
pub fn ceil(self) -> f64x2
pub fn fast_max(self, rhs: f64x2) -> f64x2
pub fn fast_max(self, rhs: f64x2) -> f64x2
Calculates the lanewise maximum of both vectors. This is a faster
implementation than max
, but it doesn’t specify any behavior if NaNs are
involved.
pub fn max(self, rhs: f64x2) -> f64x2
pub fn max(self, rhs: f64x2) -> f64x2
Calculates the lanewise maximum of both vectors. If either lane is NaN,
the other lane gets chosen. Use fast_max
for a faster implementation
that doesn’t handle NaNs.
pub fn fast_min(self, rhs: f64x2) -> f64x2
pub fn fast_min(self, rhs: f64x2) -> f64x2
Calculates the lanewise minimum of both vectors. This is a faster
implementation than min
, but it doesn’t specify any behavior if NaNs are
involved.
pub fn min(self, rhs: f64x2) -> f64x2
pub fn min(self, rhs: f64x2) -> f64x2
Calculates the lanewise minimum of both vectors. If either lane is NaN,
the other lane gets chosen. Use fast_min
for a faster implementation
that doesn’t handle NaNs.
pub fn is_nan(self) -> f64x2
pub fn is_finite(self) -> f64x2
pub fn is_inf(self) -> f64x2
pub fn round(self) -> f64x2
pub fn round_int(self) -> i64x2
pub fn mul_add(self, m: f64x2, a: f64x2) -> f64x2
pub fn mul_sub(self, m: f64x2, a: f64x2) -> f64x2
pub fn mul_neg_add(self, m: f64x2, a: f64x2) -> f64x2
pub fn mul_neg_sub(self, m: f64x2, a: f64x2) -> f64x2
pub fn flip_signs(self, signs: f64x2) -> f64x2
pub fn copysign(self, sign: f64x2) -> f64x2
pub fn asin_acos(self) -> (f64x2, f64x2) ⓘ
pub fn acos(self) -> f64x2
pub fn asin(self) -> f64x2
pub fn atan(self) -> f64x2
pub fn atan2(self, x: f64x2) -> f64x2
pub fn sin_cos(self) -> (f64x2, f64x2) ⓘ
pub fn sin(self) -> f64x2
pub fn cos(self) -> f64x2
pub fn tan(self) -> f64x2
pub fn to_degrees(self) -> f64x2
pub fn to_radians(self) -> f64x2
pub fn sqrt(self) -> f64x2
pub fn move_mask(self) -> i32 ⓘ
pub fn any(self) -> bool
pub fn all(self) -> bool
pub fn none(self) -> bool
pub fn reduce_add(self) -> f64 ⓘ
pub fn reduce_add(self) -> f64 ⓘ
horizontal add of all the elements of the vector
pub fn ln(self) -> f64x2
pub fn log2(self) -> f64x2
pub fn log10(self) -> f64x2
pub fn pow_f64x2(self, y: f64x2) -> f64x2
pub fn powf(self, y: f64) -> f64x2
pub fn to_array(self) -> [f64; 2]
pub fn as_array_ref(&self) -> &[f64; 2]
pub fn as_array_mut(&mut self) -> &mut [f64; 2]
pub fn from_i32x4_lower2(v: i32x4) -> f64x2
pub fn from_i32x4_lower2(v: i32x4) -> f64x2
Converts the lower two i32
lanes to two f64
lanes (and dropping the
higher two i32
lanes)
Trait Implementations§
§impl AddAssign<&f64x2> for f64x2
impl AddAssign<&f64x2> for f64x2
§fn add_assign(&mut self, rhs: &f64x2)
fn add_assign(&mut self, rhs: &f64x2)
+=
operation. Read more§impl BitAndAssign<&f64x2> for f64x2
impl BitAndAssign<&f64x2> for f64x2
§fn bitand_assign(&mut self, rhs: &f64x2)
fn bitand_assign(&mut self, rhs: &f64x2)
&=
operation. Read more§impl BitAndAssign for f64x2
impl BitAndAssign for f64x2
§fn bitand_assign(&mut self, rhs: f64x2)
fn bitand_assign(&mut self, rhs: f64x2)
&=
operation. Read more§impl BitOrAssign<&f64x2> for f64x2
impl BitOrAssign<&f64x2> for f64x2
§fn bitor_assign(&mut self, rhs: &f64x2)
fn bitor_assign(&mut self, rhs: &f64x2)
|=
operation. Read more§impl BitOrAssign for f64x2
impl BitOrAssign for f64x2
§fn bitor_assign(&mut self, rhs: f64x2)
fn bitor_assign(&mut self, rhs: f64x2)
|=
operation. Read more§impl BitXorAssign<&f64x2> for f64x2
impl BitXorAssign<&f64x2> for f64x2
§fn bitxor_assign(&mut self, rhs: &f64x2)
fn bitxor_assign(&mut self, rhs: &f64x2)
^=
operation. Read more§impl BitXorAssign for f64x2
impl BitXorAssign for f64x2
§fn bitxor_assign(&mut self, rhs: f64x2)
fn bitxor_assign(&mut self, rhs: f64x2)
^=
operation. Read more§impl<'de> Deserialize<'de> for f64x2
impl<'de> Deserialize<'de> for f64x2
§fn deserialize<D>(
deserializer: D,
) -> Result<f64x2, <D as Deserializer<'de>>::Error> ⓘwhere
D: Deserializer<'de>,
fn deserialize<D>(
deserializer: D,
) -> Result<f64x2, <D as Deserializer<'de>>::Error> ⓘwhere
D: Deserializer<'de>,
§impl DivAssign<&f64x2> for f64x2
impl DivAssign<&f64x2> for f64x2
§fn div_assign(&mut self, rhs: &f64x2)
fn div_assign(&mut self, rhs: &f64x2)
/=
operation. Read more§impl MulAssign<&f64x2> for f64x2
impl MulAssign<&f64x2> for f64x2
§fn mul_assign(&mut self, rhs: &f64x2)
fn mul_assign(&mut self, rhs: &f64x2)
*=
operation. Read more§impl Serialize for f64x2
impl Serialize for f64x2
§fn serialize<S>(
&self,
serializer: S,
) -> Result<<S as Serializer>::Ok, <S as Serializer>::Error> ⓘwhere
S: Serializer,
fn serialize<S>(
&self,
serializer: S,
) -> Result<<S as Serializer>::Ok, <S as Serializer>::Error> ⓘwhere
S: Serializer,
§impl SubAssign<&f64x2> for f64x2
impl SubAssign<&f64x2> for f64x2
§fn sub_assign(&mut self, rhs: &f64x2)
fn sub_assign(&mut self, rhs: &f64x2)
-=
operation. Read moreimpl Copy for f64x2
impl Pod for f64x2
impl StructuralPartialEq for f64x2
Auto Trait Implementations§
impl Freeze for f64x2
impl RefUnwindSafe for f64x2
impl Send for f64x2
impl Sync for f64x2
impl Unpin for f64x2
impl UnwindSafe for f64x2
Blanket Implementations§
§impl<T> ArchivePointee for T
impl<T> ArchivePointee for T
§type ArchivedMetadata = ()
type ArchivedMetadata = ()
§fn pointer_metadata(
_: &<T as ArchivePointee>::ArchivedMetadata,
) -> <T as Pointee>::Metadata
fn pointer_metadata( _: &<T as ArchivePointee>::ArchivedMetadata, ) -> <T as Pointee>::Metadata
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Source§impl<T> ByteSized for T
impl<T> ByteSized for T
Source§const BYTE_ALIGN: usize = _
const BYTE_ALIGN: usize = _
Source§fn byte_align(&self) -> usize ⓘ
fn byte_align(&self) -> usize ⓘ
Source§fn ptr_size_ratio(&self) -> [usize; 2]
fn ptr_size_ratio(&self) -> [usize; 2]
Source§impl<T, R> Chain<R> for Twhere
T: ?Sized,
impl<T, R> Chain<R> for Twhere
T: ?Sized,
§impl<T> CheckedBitPattern for Twhere
T: AnyBitPattern,
impl<T> CheckedBitPattern for Twhere
T: AnyBitPattern,
§type Bits = T
type Bits = T
Self
must have the same layout as the specified Bits
except for
the possible invalid bit patterns being checked during
is_valid_bit_pattern
.§fn is_valid_bit_pattern(_bits: &T) -> bool
fn is_valid_bit_pattern(_bits: &T) -> bool
bits
as &Self
.Source§impl<T> CloneToUninit for Twhere
T: Clone,
impl<T> CloneToUninit for Twhere
T: Clone,
Source§impl<T> ExtAny for T
impl<T> ExtAny for T
Source§fn as_any_mut(&mut self) -> &mut dyn Anywhere
Self: Sized,
fn as_any_mut(&mut self) -> &mut dyn Anywhere
Self: Sized,
Source§impl<T> ExtMem for Twhere
T: ?Sized,
impl<T> ExtMem for Twhere
T: ?Sized,
Source§const NEEDS_DROP: bool = _
const NEEDS_DROP: bool = _
Source§fn mem_align_of_val(&self) -> usize ⓘ
fn mem_align_of_val(&self) -> usize ⓘ
Source§fn mem_size_of_val(&self) -> usize ⓘ
fn mem_size_of_val(&self) -> usize ⓘ
Source§fn mem_needs_drop(&self) -> bool
fn mem_needs_drop(&self) -> bool
true
if dropping values of this type matters. Read moreSource§fn mem_forget(self)where
Self: Sized,
fn mem_forget(self)where
Self: Sized,
self
without running its destructor. Read moreSource§fn mem_replace(&mut self, other: Self) -> Selfwhere
Self: Sized,
fn mem_replace(&mut self, other: Self) -> Selfwhere
Self: Sized,
Source§unsafe fn mem_zeroed<T>() -> T
unsafe fn mem_zeroed<T>() -> T
unsafe_layout
only.T
represented by the all-zero byte-pattern. Read moreSource§unsafe fn mem_transmute_copy<Src, Dst>(src: &Src) -> Dst
unsafe fn mem_transmute_copy<Src, Dst>(src: &Src) -> Dst
unsafe_layout
only.T
represented by the all-zero byte-pattern. Read moreSource§fn mem_as_bytes(&self) -> &[u8] ⓘ
fn mem_as_bytes(&self) -> &[u8] ⓘ
unsafe_slice
only.§impl<S> FromSample<S> for S
impl<S> FromSample<S> for S
fn from_sample_(s: S) -> S
Source§impl<T> Hook for T
impl<T> Hook for T
§impl<T> Instrument for T
impl<T> Instrument for T
§fn instrument(self, span: Span) -> Instrumented<Self> ⓘ
fn instrument(self, span: Span) -> Instrumented<Self> ⓘ
§fn in_current_span(self) -> Instrumented<Self> ⓘ
fn in_current_span(self) -> Instrumented<Self> ⓘ
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self> ⓘ
fn into_either(self, into_left: bool) -> Either<Self, Self> ⓘ
self
into a Left
variant of Either<Self, Self>
if into_left
is true
.
Converts self
into a Right
variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self> ⓘ
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self> ⓘ
self
into a Left
variant of Either<Self, Self>
if into_left(&self)
returns true
.
Converts self
into a Right
variant of Either<Self, Self>
otherwise. Read more§impl<F, T> IntoSample<T> for Fwhere
T: FromSample<F>,
impl<F, T> IntoSample<T> for Fwhere
T: FromSample<F>,
fn into_sample(self) -> T
§impl<T> LayoutRaw for T
impl<T> LayoutRaw for T
§fn layout_raw(_: <T as Pointee>::Metadata) -> Result<Layout, LayoutError> ⓘ
fn layout_raw(_: <T as Pointee>::Metadata) -> Result<Layout, LayoutError> ⓘ
§impl<T, N1, N2> Niching<NichedOption<T, N1>> for N2
impl<T, N1, N2> Niching<NichedOption<T, N1>> for N2
§unsafe fn is_niched(niched: *const NichedOption<T, N1>) -> bool
unsafe fn is_niched(niched: *const NichedOption<T, N1>) -> bool
§fn resolve_niched(out: Place<NichedOption<T, N1>>)
fn resolve_niched(out: Place<NichedOption<T, N1>>)
out
indicating that a T
is niched.