diff --git a/.travis.yml b/.travis.yml index 117f56f173..70120ce016 100644 --- a/.travis.yml +++ b/.travis.yml @@ -24,6 +24,8 @@ matrix: - env: DOCUMENTATION install: true script: ci/dox.sh + - script: cargo test --manifest-path stdsimd-verify/Cargo.toml + install: true - env: RUSTFMT=On TARGET=x86_64-unknown-linux-gnu NO_ADD=1 script: | cargo install rustfmt-nightly --force @@ -40,6 +42,8 @@ install: script: - cargo generate-lockfile + # FIXME (travis-ci/travis-ci#8920) shouldn't be necessary... + - python -c "import fcntl; fcntl.fcntl(1, fcntl.F_SETFL, 0)" - ci/run-docker.sh $TARGET $FEATURES notifications: diff --git a/Cargo.toml b/Cargo.toml index b64b2f8e67..93ce1939e4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,6 +12,7 @@ categories = ["hardware-support"] license = "MIT/Apache-2.0" [workspace] +members = ["stdsimd-verify"] [badges] travis-ci = { repository = "BurntSushi/stdsimd" } diff --git a/ci/run.sh b/ci/run.sh index 51734e5fce..337b9e8176 100755 --- a/ci/run.sh +++ b/ci/run.sh @@ -22,7 +22,9 @@ echo "FEATURES=${FEATURES}" echo "OBJDUMP=${OBJDUMP}" cargo_test() { - cmd="cargo test --all --target=$TARGET --features $FEATURES --verbose $1 -- --nocapture $2" + cmd="cargo test --target=$TARGET --features $FEATURES $1" + cmd="$cmd -p coresimd -p stdsimd" + cmd="$cmd -- $2" $cmd } diff --git a/coresimd/src/x86/i586/abm.rs b/coresimd/src/x86/i586/abm.rs index 52dc991a84..2ca2cc1cc3 100644 --- a/coresimd/src/x86/i586/abm.rs +++ b/coresimd/src/x86/i586/abm.rs @@ -44,16 +44,16 @@ pub unsafe fn _lzcnt_u64(x: u64) -> u64 { #[inline(always)] #[target_feature = "+popcnt"] #[cfg_attr(test, assert_instr(popcnt))] -pub unsafe fn _popcnt32(x: u32) -> u32 { - x.count_ones() +pub unsafe fn _popcnt32(x: i32) -> i32 { + x.count_ones() as i32 } /// Counts the bits that are set. #[inline(always)] #[target_feature = "+popcnt"] #[cfg_attr(test, assert_instr(popcnt))] -pub unsafe fn _popcnt64(x: u64) -> u64 { - x.count_ones() as u64 +pub unsafe fn _popcnt64(x: i64) -> i32 { + x.count_ones() as i32 } #[cfg(test)] @@ -64,21 +64,21 @@ mod tests { #[simd_test = "lzcnt"] unsafe fn _lzcnt_u32() { - assert_eq!(abm::_lzcnt_u32(0b0101_1010u32), 25u32); + assert_eq!(abm::_lzcnt_u32(0b0101_1010), 25); } #[simd_test = "lzcnt"] unsafe fn _lzcnt_u64() { - assert_eq!(abm::_lzcnt_u64(0b0101_1010u64), 57u64); + assert_eq!(abm::_lzcnt_u64(0b0101_1010), 57); } #[simd_test = "popcnt"] unsafe fn _popcnt32() { - assert_eq!(abm::_popcnt32(0b0101_1010u32), 4); + assert_eq!(abm::_popcnt32(0b0101_1010), 4); } #[simd_test = "popcnt"] unsafe fn _popcnt64() { - assert_eq!(abm::_popcnt64(0b0101_1010u64), 4); + assert_eq!(abm::_popcnt64(0b0101_1010), 4); } } diff --git a/coresimd/src/x86/i586/avx.rs b/coresimd/src/x86/i586/avx.rs index 6ea508bb88..efa842d9e2 100644 --- a/coresimd/src/x86/i586/avx.rs +++ b/coresimd/src/x86/i586/avx.rs @@ -607,69 +607,69 @@ pub unsafe fn _mm256_xor_ps(a: f32x8, b: f32x8) -> f32x8 { } /// Equal (ordered, non-signaling) -pub const _CMP_EQ_OQ: u8 = 0x00; +pub const _CMP_EQ_OQ: i32 = 0x00; /// Less-than (ordered, signaling) -pub const _CMP_LT_OS: u8 = 0x01; +pub const _CMP_LT_OS: i32 = 0x01; /// Less-than-or-equal (ordered, signaling) -pub const _CMP_LE_OS: u8 = 0x02; +pub const _CMP_LE_OS: i32 = 0x02; /// Unordered (non-signaling) -pub const _CMP_UNORD_Q: u8 = 0x03; +pub const _CMP_UNORD_Q: i32 = 0x03; /// Not-equal (unordered, non-signaling) -pub const _CMP_NEQ_UQ: u8 = 0x04; +pub const _CMP_NEQ_UQ: i32 = 0x04; /// Not-less-than (unordered, signaling) -pub const _CMP_NLT_US: u8 = 0x05; +pub const _CMP_NLT_US: i32 = 0x05; /// Not-less-than-or-equal (unordered, signaling) -pub const _CMP_NLE_US: u8 = 0x06; +pub const _CMP_NLE_US: i32 = 0x06; /// Ordered (non-signaling) -pub const _CMP_ORD_Q: u8 = 0x07; +pub const _CMP_ORD_Q: i32 = 0x07; /// Equal (unordered, non-signaling) -pub const _CMP_EQ_UQ: u8 = 0x08; +pub const _CMP_EQ_UQ: i32 = 0x08; /// Not-greater-than-or-equal (unordered, signaling) -pub const _CMP_NGE_US: u8 = 0x09; +pub const _CMP_NGE_US: i32 = 0x09; /// Not-greater-than (unordered, signaling) -pub const _CMP_NGT_US: u8 = 0x0a; +pub const _CMP_NGT_US: i32 = 0x0a; /// False (ordered, non-signaling) -pub const _CMP_FALSE_OQ: u8 = 0x0b; +pub const _CMP_FALSE_OQ: i32 = 0x0b; /// Not-equal (ordered, non-signaling) -pub const _CMP_NEQ_OQ: u8 = 0x0c; +pub const _CMP_NEQ_OQ: i32 = 0x0c; /// Greater-than-or-equal (ordered, signaling) -pub const _CMP_GE_OS: u8 = 0x0d; +pub const _CMP_GE_OS: i32 = 0x0d; /// Greater-than (ordered, signaling) -pub const _CMP_GT_OS: u8 = 0x0e; +pub const _CMP_GT_OS: i32 = 0x0e; /// True (unordered, non-signaling) -pub const _CMP_TRUE_UQ: u8 = 0x0f; +pub const _CMP_TRUE_UQ: i32 = 0x0f; /// Equal (ordered, signaling) -pub const _CMP_EQ_OS: u8 = 0x10; +pub const _CMP_EQ_OS: i32 = 0x10; /// Less-than (ordered, non-signaling) -pub const _CMP_LT_OQ: u8 = 0x11; +pub const _CMP_LT_OQ: i32 = 0x11; /// Less-than-or-equal (ordered, non-signaling) -pub const _CMP_LE_OQ: u8 = 0x12; +pub const _CMP_LE_OQ: i32 = 0x12; /// Unordered (signaling) -pub const _CMP_UNORD_S: u8 = 0x13; +pub const _CMP_UNORD_S: i32 = 0x13; /// Not-equal (unordered, signaling) -pub const _CMP_NEQ_US: u8 = 0x14; +pub const _CMP_NEQ_US: i32 = 0x14; /// Not-less-than (unordered, non-signaling) -pub const _CMP_NLT_UQ: u8 = 0x15; +pub const _CMP_NLT_UQ: i32 = 0x15; /// Not-less-than-or-equal (unordered, non-signaling) -pub const _CMP_NLE_UQ: u8 = 0x16; +pub const _CMP_NLE_UQ: i32 = 0x16; /// Ordered (signaling) -pub const _CMP_ORD_S: u8 = 0x17; +pub const _CMP_ORD_S: i32 = 0x17; /// Equal (unordered, signaling) -pub const _CMP_EQ_US: u8 = 0x18; +pub const _CMP_EQ_US: i32 = 0x18; /// Not-greater-than-or-equal (unordered, non-signaling) -pub const _CMP_NGE_UQ: u8 = 0x19; +pub const _CMP_NGE_UQ: i32 = 0x19; /// Not-greater-than (unordered, non-signaling) -pub const _CMP_NGT_UQ: u8 = 0x1a; +pub const _CMP_NGT_UQ: i32 = 0x1a; /// False (ordered, signaling) -pub const _CMP_FALSE_OS: u8 = 0x1b; +pub const _CMP_FALSE_OS: i32 = 0x1b; /// Not-equal (ordered, signaling) -pub const _CMP_NEQ_OS: u8 = 0x1c; +pub const _CMP_NEQ_OS: i32 = 0x1c; /// Greater-than-or-equal (ordered, non-signaling) -pub const _CMP_GE_OQ: u8 = 0x1d; +pub const _CMP_GE_OQ: i32 = 0x1d; /// Greater-than (ordered, non-signaling) -pub const _CMP_GT_OQ: u8 = 0x1e; +pub const _CMP_GT_OQ: i32 = 0x1e; /// True (unordered, signaling) -pub const _CMP_TRUE_US: u8 = 0x1f; +pub const _CMP_TRUE_US: i32 = 0x1f; /// Compare packed double-precision (64-bit) floating-point /// elements in `a` and `b` based on the comparison operand @@ -677,7 +677,7 @@ pub const _CMP_TRUE_US: u8 = 0x1f; #[inline(always)] #[target_feature = "+avx,+sse2"] #[cfg_attr(test, assert_instr(vcmpeqpd, imm8 = 0))] // TODO Validate vcmppd -pub unsafe fn _mm_cmp_pd(a: f64x2, b: f64x2, imm8: u8) -> f64x2 { +pub unsafe fn _mm_cmp_pd(a: f64x2, b: f64x2, imm8: i32) -> f64x2 { macro_rules! call { ($imm8:expr) => { vcmppd(a, b, $imm8) } } @@ -690,7 +690,7 @@ pub unsafe fn _mm_cmp_pd(a: f64x2, b: f64x2, imm8: u8) -> f64x2 { #[inline(always)] #[target_feature = "+avx"] #[cfg_attr(test, assert_instr(vcmpeqpd, imm8 = 0))] // TODO Validate vcmppd -pub unsafe fn _mm256_cmp_pd(a: f64x4, b: f64x4, imm8: u8) -> f64x4 { +pub unsafe fn _mm256_cmp_pd(a: f64x4, b: f64x4, imm8: i32) -> f64x4 { macro_rules! call { ($imm8:expr) => { vcmppd256(a, b, $imm8) } } @@ -703,7 +703,7 @@ pub unsafe fn _mm256_cmp_pd(a: f64x4, b: f64x4, imm8: u8) -> f64x4 { #[inline(always)] #[target_feature = "+avx,+sse"] #[cfg_attr(test, assert_instr(vcmpeqps, imm8 = 0))] // TODO Validate vcmpps -pub unsafe fn _mm_cmp_ps(a: f32x4, b: f32x4, imm8: u8) -> f32x4 { +pub unsafe fn _mm_cmp_ps(a: f32x4, b: f32x4, imm8: i32) -> f32x4 { macro_rules! call { ($imm8:expr) => { vcmpps(a, b, $imm8) } } @@ -716,7 +716,7 @@ pub unsafe fn _mm_cmp_ps(a: f32x4, b: f32x4, imm8: u8) -> f32x4 { #[inline(always)] #[target_feature = "+avx"] #[cfg_attr(test, assert_instr(vcmpeqps, imm8 = 0))] // TODO Validate vcmpps -pub unsafe fn _mm256_cmp_ps(a: f32x8, b: f32x8, imm8: u8) -> f32x8 { +pub unsafe fn _mm256_cmp_ps(a: f32x8, b: f32x8, imm8: i32) -> f32x8 { macro_rules! call { ($imm8:expr) => { vcmpps256(a, b, $imm8) } } @@ -731,7 +731,7 @@ pub unsafe fn _mm256_cmp_ps(a: f32x8, b: f32x8, imm8: u8) -> f32x8 { #[inline(always)] #[target_feature = "+avx,+sse2"] #[cfg_attr(test, assert_instr(vcmpeqsd, imm8 = 0))] // TODO Validate vcmpsd -pub unsafe fn _mm_cmp_sd(a: f64x2, b: f64x2, imm8: u8) -> f64x2 { +pub unsafe fn _mm_cmp_sd(a: f64x2, b: f64x2, imm8: i32) -> f64x2 { macro_rules! call { ($imm8:expr) => { vcmpsd(a, b, $imm8) } } @@ -746,7 +746,7 @@ pub unsafe fn _mm_cmp_sd(a: f64x2, b: f64x2, imm8: u8) -> f64x2 { #[inline(always)] #[target_feature = "+avx,+sse"] #[cfg_attr(test, assert_instr(vcmpeqss, imm8 = 0))] // TODO Validate vcmpss -pub unsafe fn _mm_cmp_ss(a: f32x4, b: f32x4, imm8: u8) -> f32x4 { +pub unsafe fn _mm_cmp_ss(a: f32x4, b: f32x4, imm8: i32) -> f32x4 { macro_rules! call { ($imm8:expr) => { vcmpss(a, b, $imm8) } } @@ -862,48 +862,6 @@ pub unsafe fn _mm256_extractf128_si256(a: __m256i, imm8: i32) -> __m128i { __m128i::from(dst) } -/// Extract an 8-bit integer from `a`, selected with `imm8`. Returns a 32-bit -/// integer containing the zero-extended integer data. -/// -/// See [LLVM commit D20468][https://reviews.llvm.org/D20468]. -#[inline(always)] -#[target_feature = "+avx"] -// This intrinsic has no corresponding instruction. -pub unsafe fn _mm256_extract_epi8(a: i8x32, imm8: i32) -> i32 { - let imm8 = (imm8 & 31) as u32; - (a.extract_unchecked(imm8) as i32) & 0xFF -} - -/// Extract a 16-bit integer from `a`, selected with `imm8`. Returns a 32-bit -/// integer containing the zero-extended integer data. -/// -/// See [LLVM commit D20468][https://reviews.llvm.org/D20468]. -#[inline(always)] -#[target_feature = "+avx"] -// This intrinsic has no corresponding instruction. -pub unsafe fn _mm256_extract_epi16(a: i16x16, imm8: i32) -> i32 { - let imm8 = (imm8 & 15) as u32; - (a.extract_unchecked(imm8) as i32) & 0xFFFF -} - -/// Extract a 32-bit integer from `a`, selected with `imm8`. -#[inline(always)] -#[target_feature = "+avx"] -// This intrinsic has no corresponding instruction. -pub unsafe fn _mm256_extract_epi32(a: i32x8, imm8: i32) -> i32 { - let imm8 = (imm8 & 7) as u32; - a.extract_unchecked(imm8) -} - -/// Extract a 64-bit integer from `a`, selected with `imm8`. -#[inline(always)] -#[target_feature = "+avx"] -// This intrinsic has no corresponding instruction. -pub unsafe fn _mm256_extract_epi64(a: i64x4, imm8: i32) -> i64 { - let imm8 = (imm8 & 3) as u32; - a.extract_unchecked(imm8) -} - /// Zero the contents of all XMM or YMM registers. #[inline(always)] #[target_feature = "+avx"] @@ -1138,7 +1096,7 @@ pub unsafe fn _mm_permute_pd(a: f64x2, imm8: i32) -> f64x2 { #[inline(always)] #[target_feature = "+avx"] #[cfg_attr(test, assert_instr(vperm2f128, imm8 = 0x5))] -pub unsafe fn _mm256_permute2f128_ps(a: f32x8, b: f32x8, imm8: i8) -> f32x8 { +pub unsafe fn _mm256_permute2f128_ps(a: f32x8, b: f32x8, imm8: i32) -> f32x8 { macro_rules! call { ($imm8:expr) => { vperm2f128ps256(a, b, $imm8) } } @@ -1150,7 +1108,7 @@ pub unsafe fn _mm256_permute2f128_ps(a: f32x8, b: f32x8, imm8: i8) -> f32x8 { #[inline(always)] #[target_feature = "+avx"] #[cfg_attr(test, assert_instr(vperm2f128, imm8 = 0x31))] -pub unsafe fn _mm256_permute2f128_pd(a: f64x4, b: f64x4, imm8: i8) -> f64x4 { +pub unsafe fn _mm256_permute2f128_pd(a: f64x4, b: f64x4, imm8: i32) -> f64x4 { macro_rules! call { ($imm8:expr) => { vperm2f128pd256(a, b, $imm8) } } @@ -1163,7 +1121,7 @@ pub unsafe fn _mm256_permute2f128_pd(a: f64x4, b: f64x4, imm8: i8) -> f64x4 { #[target_feature = "+avx"] #[cfg_attr(test, assert_instr(vperm2f128, imm8 = 0x31))] pub unsafe fn _mm256_permute2f128_si256( - a: i32x8, b: i32x8, imm8: i8 + a: i32x8, b: i32x8, imm8: i32 ) -> i32x8 { macro_rules! call { ($imm8:expr) => { vperm2f128si256(a, b, $imm8) } @@ -3146,47 +3104,6 @@ mod tests { assert_eq!(r, __m128i::from(e)); } - #[simd_test = "avx"] - unsafe fn _mm256_extract_epi8() { - #[cfg_attr(rustfmt, rustfmt_skip)] - let a = i8x32::new( - -1, 1, 2, 3, 4, 5, 6, 7, - 8, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 23, - 24, 25, 26, 27, 28, 29, 30, 31 - ); - let r1 = avx::_mm256_extract_epi8(a, 0); - let r2 = avx::_mm256_extract_epi8(a, 35); - assert_eq!(r1, 0xFF); - assert_eq!(r2, 3); - } - - #[simd_test = "avx"] - unsafe fn _mm256_extract_epi16() { - let a = - i16x16::new(-1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); - let r1 = avx::_mm256_extract_epi16(a, 0); - let r2 = avx::_mm256_extract_epi16(a, 19); - assert_eq!(r1, 0xFFFF); - assert_eq!(r2, 3); - } - - #[simd_test = "avx"] - unsafe fn _mm256_extract_epi32() { - let a = i32x8::new(-1, 1, 2, 3, 4, 5, 6, 7); - let r1 = avx::_mm256_extract_epi32(a, 0); - let r2 = avx::_mm256_extract_epi32(a, 11); - assert_eq!(r1, -1); - assert_eq!(r2, 3); - } - - #[simd_test = "avx"] - unsafe fn _mm256_extract_epi64() { - let a = i64x4::new(0, 1, 2, 3); - let r = avx::_mm256_extract_epi64(a, 3); - assert_eq!(r, 3); - } - #[simd_test = "avx"] unsafe fn _mm256_zeroall() { avx::_mm256_zeroall(); diff --git a/coresimd/src/x86/i586/avx2.rs b/coresimd/src/x86/i586/avx2.rs index 52d61bf2fd..31d996750b 100644 --- a/coresimd/src/x86/i586/avx2.rs +++ b/coresimd/src/x86/i586/avx2.rs @@ -713,7 +713,7 @@ pub unsafe fn _mm256_hsubs_epi16(a: i16x16, b: i16x16) -> i16x16 { #[target_feature = "+avx2"] #[cfg_attr(test, assert_instr(vpgatherdd, scale = 1))] pub unsafe fn _mm_i32gather_epi32( - slice: *const i32, offsets: i32x4, scale: i8 + slice: *const i32, offsets: i32x4, scale: i32 ) -> i32x4 { macro_rules! call { ($imm8:expr) => (pgatherdd(i32x4::splat(0), slice as *const i8, offsets, i32x4::splat(-1), $imm8)) @@ -729,7 +729,7 @@ pub unsafe fn _mm_i32gather_epi32( #[target_feature = "+avx2"] #[cfg_attr(test, assert_instr(vpgatherdd, scale = 1))] pub unsafe fn _mm_mask_i32gather_epi32( - src: i32x4, slice: *const i32, offsets: i32x4, mask: i32x4, scale: i8 + src: i32x4, slice: *const i32, offsets: i32x4, mask: i32x4, scale: i32 ) -> i32x4 { macro_rules! call { ($imm8:expr) => (pgatherdd(src, slice as *const i8, offsets, mask, $imm8)) @@ -744,7 +744,7 @@ pub unsafe fn _mm_mask_i32gather_epi32( #[target_feature = "+avx2"] #[cfg_attr(test, assert_instr(vpgatherdd, scale = 1))] pub unsafe fn _mm256_i32gather_epi32( - slice: *const i32, offsets: i32x8, scale: i8 + slice: *const i32, offsets: i32x8, scale: i32 ) -> i32x8 { macro_rules! call { ($imm8:expr) => (vpgatherdd(i32x8::splat(0), slice as *const i8, offsets, i32x8::splat(-1), $imm8)) @@ -760,7 +760,7 @@ pub unsafe fn _mm256_i32gather_epi32( #[target_feature = "+avx2"] #[cfg_attr(test, assert_instr(vpgatherdd, scale = 1))] pub unsafe fn _mm256_mask_i32gather_epi32( - src: i32x8, slice: *const i32, offsets: i32x8, mask: i32x8, scale: i8 + src: i32x8, slice: *const i32, offsets: i32x8, mask: i32x8, scale: i32 ) -> i32x8 { macro_rules! call { ($imm8:expr) => (vpgatherdd(src, slice as *const i8, offsets, mask, $imm8)) @@ -775,7 +775,7 @@ pub unsafe fn _mm256_mask_i32gather_epi32( #[target_feature = "+avx2"] #[cfg_attr(test, assert_instr(vgatherdps, scale = 1))] pub unsafe fn _mm_i32gather_ps( - slice: *const f32, offsets: i32x4, scale: i8 + slice: *const f32, offsets: i32x4, scale: i32 ) -> f32x4 { macro_rules! call { ($imm8:expr) => (pgatherdps(f32x4::splat(0.0), slice as *const i8, offsets, f32x4::splat(-1.0), $imm8)) @@ -791,7 +791,7 @@ pub unsafe fn _mm_i32gather_ps( #[target_feature = "+avx2"] #[cfg_attr(test, assert_instr(vgatherdps, scale = 1))] pub unsafe fn _mm_mask_i32gather_ps( - src: f32x4, slice: *const f32, offsets: i32x4, mask: f32x4, scale: i8 + src: f32x4, slice: *const f32, offsets: i32x4, mask: f32x4, scale: i32 ) -> f32x4 { macro_rules! call { ($imm8:expr) => (pgatherdps(src, slice as *const i8, offsets, mask, $imm8)) @@ -806,7 +806,7 @@ pub unsafe fn _mm_mask_i32gather_ps( #[target_feature = "+avx2"] #[cfg_attr(test, assert_instr(vgatherdps, scale = 1))] pub unsafe fn _mm256_i32gather_ps( - slice: *const f32, offsets: i32x8, scale: i8 + slice: *const f32, offsets: i32x8, scale: i32 ) -> f32x8 { macro_rules! call { ($imm8:expr) => (vpgatherdps(f32x8::splat(0.0), slice as *const i8, offsets, f32x8::splat(-1.0), $imm8)) @@ -822,7 +822,7 @@ pub unsafe fn _mm256_i32gather_ps( #[target_feature = "+avx2"] #[cfg_attr(test, assert_instr(vgatherdps, scale = 1))] pub unsafe fn _mm256_mask_i32gather_ps( - src: f32x8, slice: *const f32, offsets: i32x8, mask: f32x8, scale: i8 + src: f32x8, slice: *const f32, offsets: i32x8, mask: f32x8, scale: i32 ) -> f32x8 { macro_rules! call { ($imm8:expr) => (vpgatherdps(src, slice as *const i8, offsets, mask, $imm8)) @@ -837,7 +837,7 @@ pub unsafe fn _mm256_mask_i32gather_ps( #[target_feature = "+avx2"] #[cfg_attr(test, assert_instr(vpgatherdq, scale = 1))] pub unsafe fn _mm_i32gather_epi64( - slice: *const i64, offsets: i32x4, scale: i8 + slice: *const i64, offsets: i32x4, scale: i32 ) -> i64x2 { macro_rules! call { ($imm8:expr) => (pgatherdq(i64x2::splat(0), slice as *const i8, offsets, i64x2::splat(-1), $imm8)) @@ -853,7 +853,7 @@ pub unsafe fn _mm_i32gather_epi64( #[target_feature = "+avx2"] #[cfg_attr(test, assert_instr(vpgatherdq, scale = 1))] pub unsafe fn _mm_mask_i32gather_epi64( - src: i64x2, slice: *const i64, offsets: i32x4, mask: i64x2, scale: i8 + src: i64x2, slice: *const i64, offsets: i32x4, mask: i64x2, scale: i32 ) -> i64x2 { macro_rules! call { ($imm8:expr) => (pgatherdq(src, slice as *const i8, offsets, mask, $imm8)) @@ -868,7 +868,7 @@ pub unsafe fn _mm_mask_i32gather_epi64( #[target_feature = "+avx2"] #[cfg_attr(test, assert_instr(vpgatherdq, scale = 1))] pub unsafe fn _mm256_i32gather_epi64( - slice: *const i64, offsets: i32x4, scale: i8 + slice: *const i64, offsets: i32x4, scale: i32 ) -> i64x4 { macro_rules! call { ($imm8:expr) => (vpgatherdq(i64x4::splat(0), slice as *const i8, offsets, i64x4::splat(-1), $imm8)) @@ -884,7 +884,7 @@ pub unsafe fn _mm256_i32gather_epi64( #[target_feature = "+avx2"] #[cfg_attr(test, assert_instr(vpgatherdq, scale = 1))] pub unsafe fn _mm256_mask_i32gather_epi64( - src: i64x4, slice: *const i64, offsets: i32x4, mask: i64x4, scale: i8 + src: i64x4, slice: *const i64, offsets: i32x4, mask: i64x4, scale: i32 ) -> i64x4 { macro_rules! call { ($imm8:expr) => (vpgatherdq(src, slice as *const i8, offsets, mask, $imm8)) @@ -899,7 +899,7 @@ pub unsafe fn _mm256_mask_i32gather_epi64( #[target_feature = "+avx2"] #[cfg_attr(test, assert_instr(vgatherdpd, scale = 1))] pub unsafe fn _mm_i32gather_pd( - slice: *const f64, offsets: i32x4, scale: i8 + slice: *const f64, offsets: i32x4, scale: i32 ) -> f64x2 { macro_rules! call { ($imm8:expr) => (pgatherdpd(f64x2::splat(0.0), slice as *const i8, offsets, f64x2::splat(-1.0), $imm8)) @@ -915,7 +915,7 @@ pub unsafe fn _mm_i32gather_pd( #[target_feature = "+avx2"] #[cfg_attr(test, assert_instr(vgatherdpd, scale = 1))] pub unsafe fn _mm_mask_i32gather_pd( - src: f64x2, slice: *const f64, offsets: i32x4, mask: f64x2, scale: i8 + src: f64x2, slice: *const f64, offsets: i32x4, mask: f64x2, scale: i32 ) -> f64x2 { macro_rules! call { ($imm8:expr) => (pgatherdpd(src, slice as *const i8, offsets, mask, $imm8)) @@ -930,7 +930,7 @@ pub unsafe fn _mm_mask_i32gather_pd( #[target_feature = "+avx2"] #[cfg_attr(test, assert_instr(vgatherdpd, scale = 1))] pub unsafe fn _mm256_i32gather_pd( - slice: *const f64, offsets: i32x4, scale: i8 + slice: *const f64, offsets: i32x4, scale: i32 ) -> f64x4 { macro_rules! call { ($imm8:expr) => (vpgatherdpd(f64x4::splat(0.0), slice as *const i8, offsets, f64x4::splat(-1.0), $imm8)) @@ -946,7 +946,7 @@ pub unsafe fn _mm256_i32gather_pd( #[target_feature = "+avx2"] #[cfg_attr(test, assert_instr(vgatherdpd, scale = 1))] pub unsafe fn _mm256_mask_i32gather_pd( - src: f64x4, slice: *const f64, offsets: i32x4, mask: f64x4, scale: i8 + src: f64x4, slice: *const f64, offsets: i32x4, mask: f64x4, scale: i32 ) -> f64x4 { macro_rules! call { ($imm8:expr) => (vpgatherdpd(src, slice as *const i8, offsets, mask, $imm8)) @@ -961,7 +961,7 @@ pub unsafe fn _mm256_mask_i32gather_pd( #[target_feature = "+avx2"] #[cfg_attr(test, assert_instr(vpgatherqd, scale = 1))] pub unsafe fn _mm_i64gather_epi32( - slice: *const i32, offsets: i64x2, scale: i8 + slice: *const i32, offsets: i64x2, scale: i32 ) -> i32x4 { macro_rules! call { ($imm8:expr) => (pgatherqd(i32x4::splat(0), slice as *const i8, offsets, i32x4::splat(-1), $imm8)) @@ -977,7 +977,7 @@ pub unsafe fn _mm_i64gather_epi32( #[target_feature = "+avx2"] #[cfg_attr(test, assert_instr(vpgatherqd, scale = 1))] pub unsafe fn _mm_mask_i64gather_epi32( - src: i32x4, slice: *const i32, offsets: i64x2, mask: i32x4, scale: i8 + src: i32x4, slice: *const i32, offsets: i64x2, mask: i32x4, scale: i32 ) -> i32x4 { macro_rules! call { ($imm8:expr) => (pgatherqd(src, slice as *const i8, offsets, mask, $imm8)) @@ -992,7 +992,7 @@ pub unsafe fn _mm_mask_i64gather_epi32( #[target_feature = "+avx2"] #[cfg_attr(test, assert_instr(vpgatherqd, scale = 1))] pub unsafe fn _mm256_i64gather_epi32( - slice: *const i32, offsets: i64x4, scale: i8 + slice: *const i32, offsets: i64x4, scale: i32 ) -> i32x4 { macro_rules! call { ($imm8:expr) => (vpgatherqd(i32x4::splat(0), slice as *const i8, offsets, i32x4::splat(-1), $imm8)) @@ -1008,7 +1008,7 @@ pub unsafe fn _mm256_i64gather_epi32( #[target_feature = "+avx2"] #[cfg_attr(test, assert_instr(vpgatherqd, scale = 1))] pub unsafe fn _mm256_mask_i64gather_epi32( - src: i32x4, slice: *const i32, offsets: i64x4, mask: i32x4, scale: i8 + src: i32x4, slice: *const i32, offsets: i64x4, mask: i32x4, scale: i32 ) -> i32x4 { macro_rules! call { ($imm8:expr) => (vpgatherqd(src, slice as *const i8, offsets, mask, $imm8)) @@ -1023,7 +1023,7 @@ pub unsafe fn _mm256_mask_i64gather_epi32( #[target_feature = "+avx2"] #[cfg_attr(test, assert_instr(vgatherqps, scale = 1))] pub unsafe fn _mm_i64gather_ps( - slice: *const f32, offsets: i64x2, scale: i8 + slice: *const f32, offsets: i64x2, scale: i32 ) -> f32x4 { macro_rules! call { ($imm8:expr) => (pgatherqps(f32x4::splat(0.0), slice as *const i8, offsets, f32x4::splat(-1.0), $imm8)) @@ -1039,7 +1039,7 @@ pub unsafe fn _mm_i64gather_ps( #[target_feature = "+avx2"] #[cfg_attr(test, assert_instr(vgatherqps, scale = 1))] pub unsafe fn _mm_mask_i64gather_ps( - src: f32x4, slice: *const f32, offsets: i64x2, mask: f32x4, scale: i8 + src: f32x4, slice: *const f32, offsets: i64x2, mask: f32x4, scale: i32 ) -> f32x4 { macro_rules! call { ($imm8:expr) => (pgatherqps(src, slice as *const i8, offsets, mask, $imm8)) @@ -1054,7 +1054,7 @@ pub unsafe fn _mm_mask_i64gather_ps( #[target_feature = "+avx2"] #[cfg_attr(test, assert_instr(vgatherqps, scale = 1))] pub unsafe fn _mm256_i64gather_ps( - slice: *const f32, offsets: i64x4, scale: i8 + slice: *const f32, offsets: i64x4, scale: i32 ) -> f32x4 { macro_rules! call { ($imm8:expr) => (vpgatherqps(f32x4::splat(0.0), slice as *const i8, offsets, f32x4::splat(-1.0), $imm8)) @@ -1070,7 +1070,7 @@ pub unsafe fn _mm256_i64gather_ps( #[target_feature = "+avx2"] #[cfg_attr(test, assert_instr(vgatherqps, scale = 1))] pub unsafe fn _mm256_mask_i64gather_ps( - src: f32x4, slice: *const f32, offsets: i64x4, mask: f32x4, scale: i8 + src: f32x4, slice: *const f32, offsets: i64x4, mask: f32x4, scale: i32 ) -> f32x4 { macro_rules! call { ($imm8:expr) => (vpgatherqps(src, slice as *const i8, offsets, mask, $imm8)) @@ -1085,7 +1085,7 @@ pub unsafe fn _mm256_mask_i64gather_ps( #[target_feature = "+avx2"] #[cfg_attr(test, assert_instr(vpgatherqq, scale = 1))] pub unsafe fn _mm_i64gather_epi64( - slice: *const i64, offsets: i64x2, scale: i8 + slice: *const i64, offsets: i64x2, scale: i32 ) -> i64x2 { macro_rules! call { ($imm8:expr) => (pgatherqq(i64x2::splat(0), slice as *const i8, offsets, i64x2::splat(-1), $imm8)) @@ -1101,7 +1101,7 @@ pub unsafe fn _mm_i64gather_epi64( #[target_feature = "+avx2"] #[cfg_attr(test, assert_instr(vpgatherqq, scale = 1))] pub unsafe fn _mm_mask_i64gather_epi64( - src: i64x2, slice: *const i64, offsets: i64x2, mask: i64x2, scale: i8 + src: i64x2, slice: *const i64, offsets: i64x2, mask: i64x2, scale: i32 ) -> i64x2 { macro_rules! call { ($imm8:expr) => (pgatherqq(src, slice as *const i8, offsets, mask, $imm8)) @@ -1116,7 +1116,7 @@ pub unsafe fn _mm_mask_i64gather_epi64( #[target_feature = "+avx2"] #[cfg_attr(test, assert_instr(vpgatherqq, scale = 1))] pub unsafe fn _mm256_i64gather_epi64( - slice: *const i64, offsets: i64x4, scale: i8 + slice: *const i64, offsets: i64x4, scale: i32 ) -> i64x4 { macro_rules! call { ($imm8:expr) => (vpgatherqq(i64x4::splat(0), slice as *const i8, offsets, i64x4::splat(-1), $imm8)) @@ -1132,7 +1132,7 @@ pub unsafe fn _mm256_i64gather_epi64( #[target_feature = "+avx2"] #[cfg_attr(test, assert_instr(vpgatherqq, scale = 1))] pub unsafe fn _mm256_mask_i64gather_epi64( - src: i64x4, slice: *const i64, offsets: i64x4, mask: i64x4, scale: i8 + src: i64x4, slice: *const i64, offsets: i64x4, mask: i64x4, scale: i32 ) -> i64x4 { macro_rules! call { ($imm8:expr) => (vpgatherqq(src, slice as *const i8, offsets, mask, $imm8)) @@ -1147,7 +1147,7 @@ pub unsafe fn _mm256_mask_i64gather_epi64( #[target_feature = "+avx2"] #[cfg_attr(test, assert_instr(vgatherqpd, scale = 1))] pub unsafe fn _mm_i64gather_pd( - slice: *const f64, offsets: i64x2, scale: i8 + slice: *const f64, offsets: i64x2, scale: i32 ) -> f64x2 { macro_rules! call { ($imm8:expr) => (pgatherqpd(f64x2::splat(0.0), slice as *const i8, offsets, f64x2::splat(-1.0), $imm8)) @@ -1163,7 +1163,7 @@ pub unsafe fn _mm_i64gather_pd( #[target_feature = "+avx2"] #[cfg_attr(test, assert_instr(vgatherqpd, scale = 1))] pub unsafe fn _mm_mask_i64gather_pd( - src: f64x2, slice: *const f64, offsets: i64x2, mask: f64x2, scale: i8 + src: f64x2, slice: *const f64, offsets: i64x2, mask: f64x2, scale: i32 ) -> f64x2 { macro_rules! call { ($imm8:expr) => (pgatherqpd(src, slice as *const i8, offsets, mask, $imm8)) @@ -1178,7 +1178,7 @@ pub unsafe fn _mm_mask_i64gather_pd( #[target_feature = "+avx2"] #[cfg_attr(test, assert_instr(vgatherqpd, scale = 1))] pub unsafe fn _mm256_i64gather_pd( - slice: *const f64, offsets: i64x4, scale: i8 + slice: *const f64, offsets: i64x4, scale: i32 ) -> f64x4 { macro_rules! call { ($imm8:expr) => (vpgatherqpd(f64x4::splat(0.0), slice as *const i8, offsets, f64x4::splat(-1.0), $imm8)) @@ -1194,7 +1194,7 @@ pub unsafe fn _mm256_i64gather_pd( #[target_feature = "+avx2"] #[cfg_attr(test, assert_instr(vgatherqpd, scale = 1))] pub unsafe fn _mm256_mask_i64gather_pd( - src: f64x4, slice: *const f64, offsets: i64x4, mask: f64x4, scale: i8 + src: f64x4, slice: *const f64, offsets: i64x4, mask: f64x4, scale: i32 ) -> f64x4 { macro_rules! call { ($imm8:expr) => (vpgatherqpd(src, slice as *const i8, offsets, mask, $imm8)) @@ -2656,6 +2656,48 @@ pub unsafe fn _mm256_xor_si256(a: __m256i, b: __m256i) -> __m256i { __m256i::from(i8x32::from(a) ^ i8x32::from(b)) } +/// Extract an 8-bit integer from `a`, selected with `imm8`. Returns a 32-bit +/// integer containing the zero-extended integer data. +/// +/// See [LLVM commit D20468][https://reviews.llvm.org/D20468]. +#[inline(always)] +#[target_feature = "+avx2"] +// This intrinsic has no corresponding instruction. +pub unsafe fn _mm256_extract_epi8(a: i8x32, imm8: i32) -> i8 { + let imm8 = (imm8 & 31) as u32; + a.extract_unchecked(imm8) +} + +/// Extract a 16-bit integer from `a`, selected with `imm8`. Returns a 32-bit +/// integer containing the zero-extended integer data. +/// +/// See [LLVM commit D20468][https://reviews.llvm.org/D20468]. +#[inline(always)] +#[target_feature = "+avx2"] +// This intrinsic has no corresponding instruction. +pub unsafe fn _mm256_extract_epi16(a: i16x16, imm8: i32) -> i16 { + let imm8 = (imm8 & 15) as u32; + a.extract_unchecked(imm8) +} + +/// Extract a 32-bit integer from `a`, selected with `imm8`. +#[inline(always)] +#[target_feature = "+avx2"] +// This intrinsic has no corresponding instruction. +pub unsafe fn _mm256_extract_epi32(a: i32x8, imm8: i32) -> i32 { + let imm8 = (imm8 & 7) as u32; + a.extract_unchecked(imm8) +} + +/// Extract a 64-bit integer from `a`, selected with `imm8`. +#[inline(always)] +#[target_feature = "+avx2"] +// This intrinsic has no corresponding instruction. +pub unsafe fn _mm256_extract_epi64(a: i64x4, imm8: i32) -> i64 { + let imm8 = (imm8 & 3) as u32; + a.extract_unchecked(imm8) +} + #[allow(improper_ctypes)] extern "C" { #[link_name = "llvm.x86.avx2.pabs.b"] @@ -4923,4 +4965,44 @@ mod tests { assert_eq!(r, f64x4::new(0.0, 16.0, 64.0, 256.0)); } + #[simd_test = "avx"] + unsafe fn _mm256_extract_epi8() { + #[cfg_attr(rustfmt, rustfmt_skip)] + let a = i8x32::new( + -1, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31 + ); + let r1 = avx2::_mm256_extract_epi8(a, 0); + let r2 = avx2::_mm256_extract_epi8(a, 35); + assert_eq!(r1, -1); + assert_eq!(r2, 3); + } + + #[simd_test = "avx2"] + unsafe fn _mm256_extract_epi16() { + let a = + i16x16::new(-1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + let r1 = avx2::_mm256_extract_epi16(a, 0); + let r2 = avx2::_mm256_extract_epi16(a, 19); + assert_eq!(r1, -1); + assert_eq!(r2, 3); + } + + #[simd_test = "avx2"] + unsafe fn _mm256_extract_epi32() { + let a = i32x8::new(-1, 1, 2, 3, 4, 5, 6, 7); + let r1 = avx2::_mm256_extract_epi32(a, 0); + let r2 = avx2::_mm256_extract_epi32(a, 11); + assert_eq!(r1, -1); + assert_eq!(r2, 3); + } + + #[simd_test = "avx2"] + unsafe fn _mm256_extract_epi64() { + let a = i64x4::new(0, 1, 2, 3); + let r = avx2::_mm256_extract_epi64(a, 3); + assert_eq!(r, 3); + } } diff --git a/coresimd/src/x86/i586/bmi.rs b/coresimd/src/x86/i586/bmi.rs index 5f00a7c67f..f51a6d2f24 100644 --- a/coresimd/src/x86/i586/bmi.rs +++ b/coresimd/src/x86/i586/bmi.rs @@ -27,8 +27,8 @@ pub unsafe fn _bextr_u32(a: u32, start: u32, len: u32) -> u32 { #[target_feature = "+bmi"] #[cfg_attr(test, assert_instr(bextr))] #[cfg(not(target_arch = "x86"))] -pub unsafe fn _bextr_u64(a: u64, start: u64, len: u64) -> u64 { - _bextr2_u64(a, (start & 0xff_u64) | ((len & 0xff_u64) << 8_u64)) +pub unsafe fn _bextr_u64(a: u64, start: u32, len: u32) -> u64 { + _bextr2_u64(a, ((start & 0xff) | ((len & 0xff) << 8)) as u64) } /// Extracts bits of `a` specified by `control` into @@ -127,16 +127,6 @@ pub unsafe fn _blsr_u64(x: u64) -> u64 { x & (x.wrapping_sub(1)) } -/// Counts the number of trailing least significant zero bits. -/// -/// When the source operand is 0, it returns its size in bits. -#[inline(always)] -#[target_feature = "+bmi"] -#[cfg_attr(test, assert_instr(tzcnt))] -pub unsafe fn _tzcnt_u16(x: u16) -> u16 { - x.trailing_zeros() as u16 -} - /// Counts the number of trailing least significant zero bits. /// /// When the source operand is 0, it returns its size in bits. @@ -163,8 +153,8 @@ pub unsafe fn _tzcnt_u64(x: u64) -> u64 { #[inline(always)] #[target_feature = "+bmi"] #[cfg_attr(test, assert_instr(tzcnt))] -pub unsafe fn _mm_tzcnt_u32(x: u32) -> u32 { - x.trailing_zeros() +pub unsafe fn _mm_tzcnt_32(x: u32) -> i32 { + x.trailing_zeros() as i32 } /// Counts the number of trailing least significant zero bits. @@ -173,8 +163,8 @@ pub unsafe fn _mm_tzcnt_u32(x: u32) -> u32 { #[inline(always)] #[target_feature = "+bmi"] #[cfg_attr(test, assert_instr(tzcnt))] -pub unsafe fn _mm_tzcnt_u64(x: u64) -> u64 { - x.trailing_zeros() as u64 +pub unsafe fn _mm_tzcnt_64(x: u64) -> i64 { + x.trailing_zeros() as i64 } #[allow(dead_code)] @@ -290,13 +280,6 @@ mod tests { assert_eq!(r, 0b0010_0000u64); } - #[simd_test = "bmi"] - unsafe fn _tzcnt_u16() { - assert_eq!(bmi::_tzcnt_u16(0b0000_0001u16), 0u16); - assert_eq!(bmi::_tzcnt_u16(0b0000_0000u16), 16u16); - assert_eq!(bmi::_tzcnt_u16(0b1001_0000u16), 4u16); - } - #[simd_test = "bmi"] unsafe fn _tzcnt_u32() { assert_eq!(bmi::_tzcnt_u32(0b0000_0001u32), 0u32); diff --git a/coresimd/src/x86/i586/bmi2.rs b/coresimd/src/x86/i586/bmi2.rs index f32778063a..3eebc4114b 100644 --- a/coresimd/src/x86/i586/bmi2.rs +++ b/coresimd/src/x86/i586/bmi2.rs @@ -22,10 +22,10 @@ use stdsimd_test::assert_instr; #[cfg_attr(all(test, target_arch = "x86_64"), assert_instr(imul))] #[cfg_attr(all(test, target_arch = "x86"), assert_instr(mulx))] #[target_feature = "+bmi2"] -pub unsafe fn _mulx_u32(a: u32, b: u32) -> (u32, u32) { +pub unsafe fn _mulx_u32(a: u32, b: u32, hi: &mut u32) -> u32 { let result: u64 = (a as u64) * (b as u64); - let hi = (result >> 32) as u32; - (result as u32, hi) + *hi = (result >> 32) as u32; + result as u32 } /// Unsigned multiply without affecting flags. @@ -36,10 +36,10 @@ pub unsafe fn _mulx_u32(a: u32, b: u32) -> (u32, u32) { #[cfg_attr(test, assert_instr(mulx))] #[target_feature = "+bmi2"] #[cfg(not(target_arch = "x86"))] // calls an intrinsic -pub unsafe fn _mulx_u64(a: u64, b: u64) -> (u64, u64) { +pub unsafe fn _mulx_u64(a: u64, b: u64, hi: &mut u64) -> u64 { let result: u128 = (a as u128) * (b as u128); - let hi = (result >> 64) as u64; - (result as u64, hi) + *hi = (result >> 64) as u64; + result as u64 } /// Zero higher bits of `a` >= `index`. @@ -55,8 +55,8 @@ pub unsafe fn _bzhi_u32(a: u32, index: u32) -> u32 { #[target_feature = "+bmi2"] #[cfg_attr(test, assert_instr(bzhi))] #[cfg(not(target_arch = "x86"))] -pub unsafe fn _bzhi_u64(a: u64, index: u64) -> u64 { - x86_bmi2_bzhi_64(a, index) +pub unsafe fn _bzhi_u64(a: u64, index: u32) -> u64 { + x86_bmi2_bzhi_64(a, index as u64) } /// Scatter contiguous low order bits of `a` to the result at the positions @@ -196,7 +196,8 @@ mod tests { unsafe fn _mulx_u32() { let a: u32 = 4_294_967_200; let b: u32 = 2; - let (lo, hi): (u32, u32) = bmi2::_mulx_u32(a, b); + let mut hi = 0; + let lo = bmi2::_mulx_u32(a, b, &mut hi); /* result = 8589934400 = 0b0001_1111_1111_1111_1111_1111_1111_0100_0000u64 @@ -212,7 +213,8 @@ result = 8589934400 unsafe fn _mulx_u64() { let a: u64 = 9_223_372_036_854_775_800; let b: u64 = 100; - let (lo, hi): (u64, u64) = bmi2::_mulx_u64(a, b); + let mut hi = 0; + let lo = bmi2::_mulx_u64(a, b, &mut hi); /* result = 922337203685477580000 = 0b00110001_1111111111111111_1111111111111111_1111111111111111_1111110011100000 diff --git a/coresimd/src/x86/i586/sse.rs b/coresimd/src/x86/i586/sse.rs index 45f2d0ef70..35e231d621 100644 --- a/coresimd/src/x86/i586/sse.rs +++ b/coresimd/src/x86/i586/sse.rs @@ -5,7 +5,7 @@ use core::ptr; use simd_llvm::simd_shuffle4; use v128::*; -use v64::f32x2; +use v64::*; #[cfg(test)] use stdsimd_test::assert_instr; @@ -764,7 +764,7 @@ pub unsafe fn _mm_setzero_ps() -> f32x4 { #[inline(always)] #[target_feature = "+sse"] #[cfg_attr(test, assert_instr(shufps, mask = 3))] -pub unsafe fn _mm_shuffle_ps(a: f32x4, b: f32x4, mask: i32) -> f32x4 { +pub unsafe fn _mm_shuffle_ps(a: f32x4, b: f32x4, mask: u32) -> f32x4 { let mask = (mask & 0xFF) as u8; macro_rules! shuffle_done { @@ -884,7 +884,7 @@ pub unsafe fn _mm_movemask_ps(a: f32x4) -> i32 { /// let a = f32x4::new(1.0, 2.0, 3.0, 4.0); /// let data: [f32; 4] = [5.0, 6.0, 7.0, 8.0]; /// -/// let r = unsafe { _mm_loadh_pi(a, data[..].as_ptr()) }; +/// let r = unsafe { _mm_loadh_pi(a, data[..].as_ptr() as *const _) }; /// /// assert_eq!(r, f32x4::new(1.0, 2.0, 5.0, 6.0)); /// # @@ -906,7 +906,7 @@ pub unsafe fn _mm_movemask_ps(a: f32x4) -> i32 { assert_instr(unpcklps))] // TODO: This function is actually not limited to floats, but that's what // what matches the C type most closely: (__m128, *const __m64) -> __m128 -pub unsafe fn _mm_loadh_pi(a: f32x4, p: *const f32) -> f32x4 { +pub unsafe fn _mm_loadh_pi(a: f32x4, p: *const __m64) -> f32x4 { let q = p as *const f32x2; let b: f32x2 = *q; let bb = simd_shuffle4(b, b, [0, 1, 0, 1]); @@ -936,7 +936,7 @@ pub unsafe fn _mm_loadh_pi(a: f32x4, p: *const f32) -> f32x4 { /// let a = f32x4::new(1.0, 2.0, 3.0, 4.0); /// let data: [f32; 4] = [5.0, 6.0, 7.0, 8.0]; /// -/// let r = unsafe { _mm_loadl_pi(a, data[..].as_ptr()) }; +/// let r = unsafe { _mm_loadl_pi(a, data[..].as_ptr() as *const _) }; /// /// assert_eq!(r, f32x4::new(5.0, 6.0, 3.0, 4.0)); /// # @@ -957,7 +957,7 @@ pub unsafe fn _mm_loadh_pi(a: f32x4, p: *const f32) -> f32x4 { #[cfg_attr(all(test, target_arch = "x86", not(target_feature = "sse2")), assert_instr(movss))] // TODO: Like _mm_loadh_pi, this also isn't limited to floats. -pub unsafe fn _mm_loadl_pi(a: f32x4, p: *const f32) -> f32x4 { +pub unsafe fn _mm_loadl_pi(a: f32x4, p: *const __m64) -> f32x4 { let q = p as *const f32x2; let b: f32x2 = *q; let bb = simd_shuffle4(b, b, [0, 1, 0, 1]); @@ -1070,14 +1070,14 @@ pub unsafe fn _mm_loadr_ps(p: *const f32) -> f32x4 { // On i586 (no SSE2) it just generates plain MOV instructions. #[cfg_attr(all(test, any(target_arch = "x86_64", target_feature = "sse2")), assert_instr(movhpd))] -pub unsafe fn _mm_storeh_pi(p: *mut u64, a: f32x4) { +pub unsafe fn _mm_storeh_pi(p: *mut __m64, a: f32x4) { #[cfg(target_arch = "x86")] { // If this is a `f64x2` then on i586, LLVM generates fldl & fstpl which // is just silly let a64: u64x2 = mem::transmute(a); let a_hi = a64.extract(1); - *p = a_hi; + *(p as *mut u64) = a_hi; } #[cfg(target_arch = "x86_64")] { @@ -1103,14 +1103,14 @@ pub unsafe fn _mm_storeh_pi(p: *mut u64, a: f32x4) { #[cfg_attr(all(test, any(target_arch = "x86_64", target_feature = "sse2"), target_family = "windows"), assert_instr(movsd))] -pub unsafe fn _mm_storel_pi(p: *mut u64, a: f32x4) { +pub unsafe fn _mm_storel_pi(p: *mut __m64, a: f32x4) { #[cfg(target_arch = "x86")] { // Same as for _mm_storeh_pi: i586 code gen would use floating point // stack. let a64: u64x2 = mem::transmute(a); let a_hi = a64.extract(0); - *p = a_hi; + *(p as *mut u64) = a_hi; } #[cfg(target_arch = "x86_64")] { @@ -1671,6 +1671,8 @@ extern "C" { fn prefetch(p: *const u8, rw: i32, loc: i32, ty: i32); #[link_name = "llvm.x86.sse.cmp.ss"] fn cmpss(a: f32x4, b: f32x4, imm8: i8) -> f32x4; + #[link_name = "llvm.x86.mmx.movnt.dq"] + fn movntdq(a: *mut __m64, b: __m64); } /// Stores `a` into the memory at `mem_addr` using a non-temporal memory hint. @@ -1687,14 +1689,10 @@ pub unsafe fn _mm_stream_ps(mem_addr: *mut f32, a: f32x4) { /// Store 64-bits of integer data from a into memory using a non-temporal /// memory hint. #[inline(always)] -#[target_feature = "+sse"] -// generates movnti on i686 and x86_64 but just a mov on i586 -#[cfg_attr(all(test, - any(target_arch = "x86_64", - all(target_arch = "x86", target_feature = "sse2"))), - assert_instr(movnti))] -pub unsafe fn _mm_stream_pi(mem_addr: *mut i64, a: i64) { - ::core::intrinsics::nontemporal_store(mem_addr, a); +#[target_feature = "+sse,+mmx"] +#[cfg_attr(test, assert_instr(movntq))] +pub unsafe fn _mm_stream_pi(mem_addr: *mut __m64, a: __m64) { + movntdq(mem_addr, a) } #[cfg(test)] @@ -2967,20 +2965,22 @@ mod tests { } #[simd_test = "sse"] + #[cfg(not(windows))] // FIXME "unknown codeview register" in LLVM unsafe fn _mm_loadh_pi() { let a = f32x4::new(1.0, 2.0, 3.0, 4.0); let x: [f32; 4] = [5.0, 6.0, 7.0, 8.0]; let p = x[..].as_ptr(); - let r = sse::_mm_loadh_pi(a, p); + let r = sse::_mm_loadh_pi(a, p as *const _); assert_eq!(r, f32x4::new(1.0, 2.0, 5.0, 6.0)); } #[simd_test = "sse"] + #[cfg(not(windows))] // FIXME "unknown codeview register" in LLVM unsafe fn _mm_loadl_pi() { let a = f32x4::new(1.0, 2.0, 3.0, 4.0); let x: [f32; 4] = [5.0, 6.0, 7.0, 8.0]; let p = x[..].as_ptr(); - let r = sse::_mm_loadl_pi(a, p); + let r = sse::_mm_loadl_pi(a, p as *const _); assert_eq!(r, f32x4::new(5.0, 6.0, 3.0, 4.0)); } @@ -3049,10 +3049,11 @@ mod tests { } #[simd_test = "sse"] + #[cfg(not(windows))] // FIXME "unknown codeview register" in LLVM unsafe fn _mm_storeh_pi() { let mut vals = [0.0f32; 8]; let a = f32x4::new(1.0, 2.0, 3.0, 4.0); - sse::_mm_storeh_pi(vals.as_mut_ptr() as *mut f32 as *mut u64, a); + sse::_mm_storeh_pi(vals.as_mut_ptr() as *mut _, a); assert_eq!(vals[0], 3.0); assert_eq!(vals[1], 4.0); @@ -3060,10 +3061,11 @@ mod tests { } #[simd_test = "sse"] + #[cfg(not(windows))] // FIXME "unknown codeview register" in LLVM unsafe fn _mm_storel_pi() { let mut vals = [0.0f32; 8]; let a = f32x4::new(1.0, 2.0, 3.0, 4.0); - sse::_mm_storel_pi(vals.as_mut_ptr() as *mut f32 as *mut u64, a); + sse::_mm_storel_pi(vals.as_mut_ptr() as *mut _, a); assert_eq!(vals[0], 1.0); assert_eq!(vals[1], 2.0); @@ -3295,11 +3297,15 @@ mod tests { } } - #[simd_test = "sse"] + #[simd_test = "sse,mmx"] + #[cfg(not(windows))] // FIXME "unknown codeview register" in LLVM unsafe fn _mm_stream_pi() { - let a: i64 = 7; - let mut mem = ::std::boxed::Box::::new(-1); - sse::_mm_stream_pi(&mut *mem as *mut i64, a); + use std::mem; + use v64::*; + + let a = mem::transmute(i8x8::new(0, 0, 0, 0, 0, 0, 0, 7)); + let mut mem = ::std::boxed::Box::<__m64>::new(mem::transmute(i8x8::splat(1))); + sse::_mm_stream_pi(&mut *mem as *mut _ as *mut _, a); assert_eq!(a, *mem); } } diff --git a/coresimd/src/x86/i586/sse2.rs b/coresimd/src/x86/i586/sse2.rs index c4560cd186..9fd5940514 100644 --- a/coresimd/src/x86/i586/sse2.rs +++ b/coresimd/src/x86/i586/sse2.rs @@ -2933,9 +2933,9 @@ mod tests { let b = i8x16::new(15, 14, 2, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); let r = sse2::_mm_cmpeq_epi8(a, b); + #[cfg_attr(rustfmt, rustfmt_skip)] assert_eq!( r, - #[cfg_attr(rustfmt, rustfmt_skip)] i8x16::new( 0, 0, 0xFFu8 as i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ) @@ -3219,9 +3219,9 @@ mod tests { let a = i16x8::new(0x80, -0x81, 0, 0, 0, 0, 0, 0); let b = i16x8::new(0, 0, 0, 0, 0, 0, -0x81, 0x80); let r = sse2::_mm_packs_epi16(a, b); + #[cfg_attr(rustfmt, rustfmt_skip)] assert_eq!( r, - #[cfg_attr(rustfmt, rustfmt_skip)] i8x16::new( 0x7F, -0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -0x80, 0x7F ) @@ -3268,7 +3268,8 @@ mod tests { #[simd_test = "sse2"] unsafe fn _mm_movemask_epi8() { - let a = i8x16::from(#[cfg_attr(rustfmt, rustfmt_skip)] u8x16::new( + #[cfg_attr(rustfmt, rustfmt_skip)] + let a = i8x16::from(u8x16::new( 0b1000_0000, 0b0, 0b1000_0000, 0b01, 0b0101, 0b1111_0000, 0, 0, 0, 0, 0b1111_0000, 0b0101, 0b01, 0b1000_0000, 0b0, 0b1000_0000, )); let r = sse2::_mm_movemask_epi8(a); diff --git a/coresimd/src/x86/i586/sse41.rs b/coresimd/src/x86/i586/sse41.rs index d3fb5135bf..60f972f4d0 100644 --- a/coresimd/src/x86/i586/sse41.rs +++ b/coresimd/src/x86/i586/sse41.rs @@ -61,7 +61,7 @@ pub unsafe fn _mm_blendv_epi8(a: i8x16, b: i8x16, mask: i8x16) -> i8x16 { #[inline(always)] #[target_feature = "+sse4.1"] #[cfg_attr(test, assert_instr(pblendw, imm8 = 0xF0))] -pub unsafe fn _mm_blend_epi16(a: i16x8, b: i16x8, imm8: u8) -> i16x8 { +pub unsafe fn _mm_blend_epi16(a: i16x8, b: i16x8, imm8: i32) -> i16x8 { macro_rules! call { ($imm8:expr) => { pblendw(a, b, $imm8) } } @@ -91,7 +91,7 @@ pub unsafe fn _mm_blendv_ps(a: f32x4, b: f32x4, mask: f32x4) -> f32x4 { #[inline(always)] #[target_feature = "+sse4.1"] #[cfg_attr(test, assert_instr(blendpd, imm2 = 0b10))] -pub unsafe fn _mm_blend_pd(a: f64x2, b: f64x2, imm2: u8) -> f64x2 { +pub unsafe fn _mm_blend_pd(a: f64x2, b: f64x2, imm2: i32) -> f64x2 { macro_rules! call { ($imm2:expr) => { blendpd(a, b, $imm2) } } @@ -103,7 +103,7 @@ pub unsafe fn _mm_blend_pd(a: f64x2, b: f64x2, imm2: u8) -> f64x2 { #[inline(always)] #[target_feature = "+sse4.1"] #[cfg_attr(test, assert_instr(blendps, imm4 = 0b0101))] -pub unsafe fn _mm_blend_ps(a: f32x4, b: f32x4, imm4: u8) -> f32x4 { +pub unsafe fn _mm_blend_ps(a: f32x4, b: f32x4, imm4: i32) -> f32x4 { macro_rules! call { ($imm4:expr) => { blendps(a, b, $imm4) } } @@ -116,7 +116,7 @@ pub unsafe fn _mm_blend_ps(a: f32x4, b: f32x4, imm4: u8) -> f32x4 { #[target_feature = "+sse4.1"] // TODO: Add test for Windows #[cfg_attr(all(test, not(windows)), assert_instr(extractps, imm8 = 0))] -pub unsafe fn _mm_extract_ps(a: f32x4, imm8: u8) -> i32 { +pub unsafe fn _mm_extract_ps(a: f32x4, imm8: i32) -> i32 { mem::transmute(a.extract(imm8 as u32 & 0b11)) } @@ -167,7 +167,7 @@ pub unsafe fn _mm_extract_epi32(a: i32x4, imm8: i32) -> i32 { #[inline(always)] #[target_feature = "+sse4.1"] #[cfg_attr(test, assert_instr(insertps, imm8 = 0b1010))] -pub unsafe fn _mm_insert_ps(a: f32x4, b: f32x4, imm8: u8) -> f32x4 { +pub unsafe fn _mm_insert_ps(a: f32x4, b: f32x4, imm8: i32) -> f32x4 { macro_rules! call { ($imm8:expr) => { insertps(a, b, $imm8) } } @@ -179,7 +179,7 @@ pub unsafe fn _mm_insert_ps(a: f32x4, b: f32x4, imm8: u8) -> f32x4 { #[inline(always)] #[target_feature = "+sse4.1"] #[cfg_attr(test, assert_instr(pinsrb, imm8 = 0))] -pub unsafe fn _mm_insert_epi8(a: i8x16, i: i8, imm8: u8) -> i8x16 { +pub unsafe fn _mm_insert_epi8(a: i8x16, i: i8, imm8: i32) -> i8x16 { a.replace((imm8 & 0b1111) as u32, i) } @@ -188,7 +188,7 @@ pub unsafe fn _mm_insert_epi8(a: i8x16, i: i8, imm8: u8) -> i8x16 { #[inline(always)] #[target_feature = "+sse4.1"] #[cfg_attr(test, assert_instr(pinsrd, imm8 = 0))] -pub unsafe fn _mm_insert_epi32(a: i32x4, i: i32, imm8: u8) -> i32x4 { +pub unsafe fn _mm_insert_epi32(a: i32x4, i: i32, imm8: i32) -> i32x4 { a.replace((imm8 & 0b11) as u32, i) } @@ -391,7 +391,7 @@ pub unsafe fn _mm_cvtepu32_epi64(a: u32x4) -> i64x2 { #[inline(always)] #[target_feature = "+sse4.1"] #[cfg_attr(test, assert_instr(dppd, imm8 = 0))] -pub unsafe fn _mm_dp_pd(a: f64x2, b: f64x2, imm8: u8) -> f64x2 { +pub unsafe fn _mm_dp_pd(a: f64x2, b: f64x2, imm8: i32) -> f64x2 { macro_rules! call { ($imm8:expr) => { dppd(a, b, $imm8) } } @@ -408,7 +408,7 @@ pub unsafe fn _mm_dp_pd(a: f64x2, b: f64x2, imm8: u8) -> f64x2 { #[inline(always)] #[target_feature = "+sse4.1"] #[cfg_attr(test, assert_instr(dpps, imm8 = 0))] -pub unsafe fn _mm_dp_ps(a: f32x4, b: f32x4, imm8: u8) -> f32x4 { +pub unsafe fn _mm_dp_ps(a: f32x4, b: f32x4, imm8: i32) -> f32x4 { macro_rules! call { ($imm8:expr) => { dpps(a, b, $imm8) } } @@ -705,7 +705,7 @@ pub unsafe fn _mm_mullo_epi32(a: i32x4, b: i32x4) -> i32x4 { #[inline(always)] #[target_feature = "+sse4.1"] #[cfg_attr(test, assert_instr(mpsadbw, imm8 = 0))] -pub unsafe fn _mm_mpsadbw_epu8(a: u8x16, b: u8x16, imm8: u8) -> u16x8 { +pub unsafe fn _mm_mpsadbw_epu8(a: u8x16, b: u8x16, imm8: i32) -> u16x8 { macro_rules! call { ($imm8:expr) => { mpsadbw(a, b, $imm8) } } diff --git a/coresimd/src/x86/i586/sse42.rs b/coresimd/src/x86/i586/sse42.rs index caf2e36267..2e9b7dec38 100644 --- a/coresimd/src/x86/i586/sse42.rs +++ b/coresimd/src/x86/i586/sse42.rs @@ -8,49 +8,49 @@ use stdsimd_test::assert_instr; use v128::*; /// String contains unsigned 8-bit characters *(Default)* -pub const _SIDD_UBYTE_OPS: i8 = 0b0000_0000; +pub const _SIDD_UBYTE_OPS: i32 = 0b0000_0000; /// String contains unsigned 16-bit characters -pub const _SIDD_UWORD_OPS: i8 = 0b0000_0001; +pub const _SIDD_UWORD_OPS: i32 = 0b0000_0001; /// String contains signed 8-bit characters -pub const _SIDD_SBYTE_OPS: i8 = 0b0000_0010; +pub const _SIDD_SBYTE_OPS: i32 = 0b0000_0010; /// String contains unsigned 16-bit characters -pub const _SIDD_SWORD_OPS: i8 = 0b0000_0011; +pub const _SIDD_SWORD_OPS: i32 = 0b0000_0011; /// For each character in `a`, find if it is in `b` *(Default)* -pub const _SIDD_CMP_EQUAL_ANY: i8 = 0b0000_0000; +pub const _SIDD_CMP_EQUAL_ANY: i32 = 0b0000_0000; /// For each character in `a`, determine if /// `b[0] <= c <= b[1] or b[1] <= c <= b[2]...` -pub const _SIDD_CMP_RANGES: i8 = 0b0000_0100; +pub const _SIDD_CMP_RANGES: i32 = 0b0000_0100; /// The strings defined by `a` and `b` are equal -pub const _SIDD_CMP_EQUAL_EACH: i8 = 0b0000_1000; +pub const _SIDD_CMP_EQUAL_EACH: i32 = 0b0000_1000; /// Search for the defined substring in the target -pub const _SIDD_CMP_EQUAL_ORDERED: i8 = 0b0000_1100; +pub const _SIDD_CMP_EQUAL_ORDERED: i32 = 0b0000_1100; /// Do not negate results *(Default)* -pub const _SIDD_POSITIVE_POLARITY: i8 = 0b0000_0000; +pub const _SIDD_POSITIVE_POLARITY: i32 = 0b0000_0000; /// Negate results -pub const _SIDD_NEGATIVE_POLARITY: i8 = 0b0001_0000; +pub const _SIDD_NEGATIVE_POLARITY: i32 = 0b0001_0000; /// Do not negate results before the end of the string -pub const _SIDD_MASKED_POSITIVE_POLARITY: i8 = 0b0010_0000; +pub const _SIDD_MASKED_POSITIVE_POLARITY: i32 = 0b0010_0000; /// Negate results only before the end of the string -pub const _SIDD_MASKED_NEGATIVE_POLARITY: i8 = 0b0011_0000; +pub const _SIDD_MASKED_NEGATIVE_POLARITY: i32 = 0b0011_0000; /// **Index only**: return the least significant bit *(Default)* -pub const _SIDD_LEAST_SIGNIFICANT: i8 = 0b0000_0000; +pub const _SIDD_LEAST_SIGNIFICANT: i32 = 0b0000_0000; /// **Index only**: return the most significant bit -pub const _SIDD_MOST_SIGNIFICANT: i8 = 0b0100_0000; +pub const _SIDD_MOST_SIGNIFICANT: i32 = 0b0100_0000; /// **Mask only**: return the bit mask -pub const _SIDD_BIT_MASK: i8 = 0b0000_0000; +pub const _SIDD_BIT_MASK: i32 = 0b0000_0000; /// **Mask only**: return the byte mask -pub const _SIDD_UNIT_MASK: i8 = 0b0100_0000; +pub const _SIDD_UNIT_MASK: i32 = 0b0100_0000; /// Compare packed strings with implicit lengths in `a` and `b` using the /// control in `imm8`, and return the generated mask. #[inline(always)] #[target_feature = "+sse4.2"] #[cfg_attr(test, assert_instr(pcmpistrm, imm8 = 0))] -pub unsafe fn _mm_cmpistrm(a: __m128i, b: __m128i, imm8: i8) -> __m128i { +pub unsafe fn _mm_cmpistrm(a: __m128i, b: __m128i, imm8: i32) -> __m128i { macro_rules! call { ($imm8:expr) => { __m128i::from(pcmpistrm128(i8x16::from(a), i8x16::from(b), $imm8)) } } @@ -270,7 +270,7 @@ pub unsafe fn _mm_cmpistrm(a: __m128i, b: __m128i, imm8: i8) -> __m128i { #[inline(always)] #[target_feature = "+sse4.2"] #[cfg_attr(test, assert_instr(pcmpistri, imm8 = 0))] -pub unsafe fn _mm_cmpistri(a: __m128i, b: __m128i, imm8: i8) -> i32 { +pub unsafe fn _mm_cmpistri(a: __m128i, b: __m128i, imm8: i32) -> i32 { macro_rules! call { ($imm8:expr) => { pcmpistri128(i8x16::from(a), i8x16::from(b), $imm8) } } @@ -283,7 +283,7 @@ pub unsafe fn _mm_cmpistri(a: __m128i, b: __m128i, imm8: i8) -> i32 { #[inline(always)] #[target_feature = "+sse4.2"] #[cfg_attr(test, assert_instr(pcmpistri, imm8 = 0))] -pub unsafe fn _mm_cmpistrz(a: __m128i, b: __m128i, imm8: i8) -> i32 { +pub unsafe fn _mm_cmpistrz(a: __m128i, b: __m128i, imm8: i32) -> i32 { macro_rules! call { ($imm8:expr) => { pcmpistriz128(i8x16::from(a), i8x16::from(b), @@ -298,7 +298,7 @@ pub unsafe fn _mm_cmpistrz(a: __m128i, b: __m128i, imm8: i8) -> i32 { #[inline(always)] #[target_feature = "+sse4.2"] #[cfg_attr(test, assert_instr(pcmpistri, imm8 = 0))] -pub unsafe fn _mm_cmpistrc(a: __m128i, b: __m128i, imm8: i8) -> i32 { +pub unsafe fn _mm_cmpistrc(a: __m128i, b: __m128i, imm8: i32) -> i32 { macro_rules! call { ($imm8:expr) => { pcmpistric128(i8x16::from(a), i8x16::from(b), $imm8) } } @@ -311,7 +311,7 @@ pub unsafe fn _mm_cmpistrc(a: __m128i, b: __m128i, imm8: i8) -> i32 { #[inline(always)] #[target_feature = "+sse4.2"] #[cfg_attr(test, assert_instr(pcmpistri, imm8 = 0))] -pub unsafe fn _mm_cmpistrs(a: __m128i, b: __m128i, imm8: i8) -> i32 { +pub unsafe fn _mm_cmpistrs(a: __m128i, b: __m128i, imm8: i32) -> i32 { macro_rules! call { ($imm8:expr) => { pcmpistris128(i8x16::from(a), i8x16::from(b), $imm8) } } @@ -323,7 +323,7 @@ pub unsafe fn _mm_cmpistrs(a: __m128i, b: __m128i, imm8: i8) -> i32 { #[inline(always)] #[target_feature = "+sse4.2"] #[cfg_attr(test, assert_instr(pcmpistri, imm8 = 0))] -pub unsafe fn _mm_cmpistro(a: __m128i, b: __m128i, imm8: i8) -> i32 { +pub unsafe fn _mm_cmpistro(a: __m128i, b: __m128i, imm8: i32) -> i32 { macro_rules! call { ($imm8:expr) => { pcmpistrio128(i8x16::from(a), i8x16::from(b), $imm8) } } @@ -336,7 +336,7 @@ pub unsafe fn _mm_cmpistro(a: __m128i, b: __m128i, imm8: i8) -> i32 { #[inline(always)] #[target_feature = "+sse4.2"] #[cfg_attr(test, assert_instr(pcmpistri, imm8 = 0))] -pub unsafe fn _mm_cmpistra(a: __m128i, b: __m128i, imm8: i8) -> i32 { +pub unsafe fn _mm_cmpistra(a: __m128i, b: __m128i, imm8: i32) -> i32 { macro_rules! call { ($imm8:expr) => { pcmpistria128(i8x16::from(a), i8x16::from(b), $imm8) } } @@ -349,7 +349,7 @@ pub unsafe fn _mm_cmpistra(a: __m128i, b: __m128i, imm8: i8) -> i32 { #[target_feature = "+sse4.2"] #[cfg_attr(test, assert_instr(pcmpestrm, imm8 = 0))] pub unsafe fn _mm_cmpestrm( - a: __m128i, la: i32, b: __m128i, lb: i32, imm8: i8 + a: __m128i, la: i32, b: __m128i, lb: i32, imm8: i32 ) -> __m128i { macro_rules! call { ($imm8:expr) => { __m128i::from(pcmpestrm128(i8x16::from(a), la, @@ -445,7 +445,7 @@ pub unsafe fn _mm_cmpestrm( #[target_feature = "+sse4.2"] #[cfg_attr(test, assert_instr(pcmpestri, imm8 = 0))] pub unsafe fn _mm_cmpestri( - a: __m128i, la: i32, b: __m128i, lb: i32, imm8: i8 + a: __m128i, la: i32, b: __m128i, lb: i32, imm8: i32 ) -> i32 { macro_rules! call { ($imm8:expr) => { pcmpestri128(i8x16::from(a), la, i8x16::from(b), lb, $imm8) } @@ -460,7 +460,7 @@ pub unsafe fn _mm_cmpestri( #[target_feature = "+sse4.2"] #[cfg_attr(test, assert_instr(pcmpestri, imm8 = 0))] pub unsafe fn _mm_cmpestrz( - a: __m128i, la: i32, b: __m128i, lb: i32, imm8: i8 + a: __m128i, la: i32, b: __m128i, lb: i32, imm8: i32 ) -> i32 { macro_rules! call { ($imm8:expr) => { pcmpestriz128(i8x16::from(a), la, i8x16::from(b), lb, $imm8) } @@ -475,7 +475,7 @@ pub unsafe fn _mm_cmpestrz( #[target_feature = "+sse4.2"] #[cfg_attr(test, assert_instr(pcmpestri, imm8 = 0))] pub unsafe fn _mm_cmpestrc( - a: __m128i, la: i32, b: __m128i, lb: i32, imm8: i8 + a: __m128i, la: i32, b: __m128i, lb: i32, imm8: i32 ) -> i32 { macro_rules! call { ($imm8:expr) => { pcmpestric128(i8x16::from(a), la, i8x16::from(b), lb, $imm8) } @@ -490,7 +490,7 @@ pub unsafe fn _mm_cmpestrc( #[target_feature = "+sse4.2"] #[cfg_attr(test, assert_instr(pcmpestri, imm8 = 0))] pub unsafe fn _mm_cmpestrs( - a: __m128i, la: i32, b: __m128i, lb: i32, imm8: i8 + a: __m128i, la: i32, b: __m128i, lb: i32, imm8: i32 ) -> i32 { macro_rules! call { ($imm8:expr) => { pcmpestris128(i8x16::from(a), la, i8x16::from(b), lb, $imm8) } @@ -505,7 +505,7 @@ pub unsafe fn _mm_cmpestrs( #[target_feature = "+sse4.2"] #[cfg_attr(test, assert_instr(pcmpestri, imm8 = 0))] pub unsafe fn _mm_cmpestro( - a: __m128i, la: i32, b: __m128i, lb: i32, imm8: i8 + a: __m128i, la: i32, b: __m128i, lb: i32, imm8: i32 ) -> i32 { macro_rules! call { ($imm8:expr) => { pcmpestrio128(i8x16::from(a), la, i8x16::from(b), lb, $imm8) } @@ -521,7 +521,7 @@ pub unsafe fn _mm_cmpestro( #[target_feature = "+sse4.2"] #[cfg_attr(test, assert_instr(pcmpestri, imm8 = 0))] pub unsafe fn _mm_cmpestra( - a: __m128i, la: i32, b: __m128i, lb: i32, imm8: i8 + a: __m128i, la: i32, b: __m128i, lb: i32, imm8: i32 ) -> i32 { macro_rules! call { ($imm8:expr) => { pcmpestria128(i8x16::from(a), la, i8x16::from(b), lb, $imm8) } diff --git a/coresimd/src/x86/i586/xsave.rs b/coresimd/src/x86/i586/xsave.rs index 8317c3ebd1..15e1f8b8ad 100644 --- a/coresimd/src/x86/i586/xsave.rs +++ b/coresimd/src/x86/i586/xsave.rs @@ -36,7 +36,7 @@ extern "C" { #[inline(always)] #[target_feature = "+xsave"] #[cfg_attr(test, assert_instr(xsave))] -pub unsafe fn _xsave(mem_addr: *mut u8, save_mask: u64) -> () { +pub unsafe fn _xsave(mem_addr: *mut u8, save_mask: u64) { xsave(mem_addr, (save_mask >> 32) as u32, save_mask as u32); } @@ -49,7 +49,7 @@ pub unsafe fn _xsave(mem_addr: *mut u8, save_mask: u64) -> () { #[inline(always)] #[target_feature = "+xsave"] #[cfg_attr(test, assert_instr(xrstor))] -pub unsafe fn _xrstor(mem_addr: *const u8, rs_mask: u64) -> () { +pub unsafe fn _xrstor(mem_addr: *const u8, rs_mask: u64) { xrstor(mem_addr, (rs_mask >> 32) as u32, rs_mask as u32); } @@ -65,7 +65,7 @@ const _XCR_XFEATURE_ENABLED_MASK: u32 = 0; #[inline(always)] #[target_feature = "+xsave"] #[cfg_attr(test, assert_instr(xsetbv))] -pub unsafe fn _xsetbv(a: u32, val: u64) -> () { +pub unsafe fn _xsetbv(a: u32, val: u64) { xsetbv(a, (val >> 32) as u32, val as u32); } @@ -88,7 +88,7 @@ pub unsafe fn _xgetbv(xcr_no: u32) -> u64 { #[inline(always)] #[target_feature = "+xsave,+xsaveopt"] #[cfg_attr(test, assert_instr(xsaveopt))] -pub unsafe fn _xsaveopt(mem_addr: *mut u8, save_mask: u64) -> () { +pub unsafe fn _xsaveopt(mem_addr: *mut u8, save_mask: u64) { xsaveopt(mem_addr, (save_mask >> 32) as u32, save_mask as u32); } @@ -101,7 +101,7 @@ pub unsafe fn _xsaveopt(mem_addr: *mut u8, save_mask: u64) -> () { #[inline(always)] #[target_feature = "+xsave,+xsavec"] #[cfg_attr(test, assert_instr(xsavec))] -pub unsafe fn _xsavec(mem_addr: *mut u8, save_mask: u64) -> () { +pub unsafe fn _xsavec(mem_addr: *mut u8, save_mask: u64) { xsavec(mem_addr, (save_mask >> 32) as u32, save_mask as u32); } @@ -115,7 +115,7 @@ pub unsafe fn _xsavec(mem_addr: *mut u8, save_mask: u64) -> () { #[inline(always)] #[target_feature = "+xsave,+xsaves"] #[cfg_attr(test, assert_instr(xsaves))] -pub unsafe fn _xsaves(mem_addr: *mut u8, save_mask: u64) -> () { +pub unsafe fn _xsaves(mem_addr: *mut u8, save_mask: u64) { xsaves(mem_addr, (save_mask >> 32) as u32, save_mask as u32); } @@ -131,7 +131,7 @@ pub unsafe fn _xsaves(mem_addr: *mut u8, save_mask: u64) -> () { #[inline(always)] #[target_feature = "+xsave,+xsaves"] #[cfg_attr(test, assert_instr(xrstors))] -pub unsafe fn _xrstors(mem_addr: *const u8, rs_mask: u64) -> () { +pub unsafe fn _xrstors(mem_addr: *const u8, rs_mask: u64) { xrstors(mem_addr, (rs_mask >> 32) as u32, rs_mask as u32); } diff --git a/coresimd/src/x86/i686/mmx.rs b/coresimd/src/x86/i686/mmx.rs index d4aa35b20a..df9cb98336 100644 --- a/coresimd/src/x86/i686/mmx.rs +++ b/coresimd/src/x86/i686/mmx.rs @@ -8,7 +8,7 @@ //! //! [intel64_ref]: http://www.intel.de/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-software-developer-instruction-set-reference-manual-325383.pdf -use v64::{__m64, i16x4, i32x2, i8x8}; +use v64::*; use core::mem; #[cfg(test)] diff --git a/coresimd/src/x86/i686/sse.rs b/coresimd/src/x86/i686/sse.rs index a5bd78b285..7ddb6e7fec 100644 --- a/coresimd/src/x86/i686/sse.rs +++ b/coresimd/src/x86/i686/sse.rs @@ -1,7 +1,7 @@ //! `i686` Streaming SIMD Extensions (SSE) use v128::f32x4; -use v64::{__m64, i16x4, i32x2, i8x8, u16x4, u8x8}; +use v64::*; use core::mem; use x86::i586; use x86::i686::mmx; @@ -184,7 +184,7 @@ pub unsafe fn _m_pavgw(a: u16x4, b: u16x4) -> u16x4 { #[inline(always)] #[target_feature = "+sse"] #[cfg_attr(test, assert_instr(psadbw))] -pub unsafe fn _mm_sad_pu8(a: u8x8, b: u8x8) -> u64 { +pub unsafe fn _mm_sad_pu8(a: u8x8, b: u8x8) -> __m64 { mem::transmute(psadbw(mem::transmute(a), mem::transmute(b))) } @@ -195,8 +195,8 @@ pub unsafe fn _mm_sad_pu8(a: u8x8, b: u8x8) -> u64 { #[inline(always)] #[target_feature = "+sse"] #[cfg_attr(test, assert_instr(psadbw))] -pub unsafe fn _m_psadbw(a: u8x8, b: u8x8) -> u64 { - _mm_sad_pu8(a, b) +pub unsafe fn _m_psadbw(a: u8x8, b: u8x8) -> __m64 { + mem::transmute(_mm_sad_pu8(a, b)) } /// Converts two elements of a 64-bit vector of [2 x i32] into two @@ -330,7 +330,7 @@ pub unsafe fn _m_pmovmskb(a: i16x4) -> i32 { #[inline(always)] #[target_feature = "+sse"] #[cfg_attr(test, assert_instr(pshufw, imm8 = 0))] -pub unsafe fn _mm_shuffle_pi16(a: i16x4, imm8: i8) -> i16x4 { +pub unsafe fn _mm_shuffle_pi16(a: i16x4, imm8: i32) -> i16x4 { macro_rules! call { ($imm8:expr) => { mem::transmute(pshufw(mem::transmute(a), $imm8)) } } @@ -342,7 +342,7 @@ pub unsafe fn _mm_shuffle_pi16(a: i16x4, imm8: i8) -> i16x4 { #[inline(always)] #[target_feature = "+sse"] #[cfg_attr(test, assert_instr(pshufw, imm8 = 0))] -pub unsafe fn _m_pshufw(a: i16x4, imm8: i8) -> i16x4 { +pub unsafe fn _m_pshufw(a: i16x4, imm8: i32) -> i16x4 { _mm_shuffle_pi16(a, imm8) } @@ -408,6 +408,9 @@ pub unsafe fn _mm_cvtps_pi8(a: f32x4) -> i8x8 { #[cfg(test)] mod tests { + #[cfg(not(windows))] + use std::mem; + use v128::f32x4; use v64::{i16x4, i32x2, i8x8, u16x4, u8x8}; use x86::i686::sse; @@ -481,14 +484,15 @@ mod tests { } #[simd_test = "sse"] + #[cfg(not(windows))] // FIXME "unknown codeview register" in LLVM unsafe fn _mm_sad_pu8() { let a = u8x8::new(255, 254, 253, 252, 1, 2, 3, 4); let b = u8x8::new(0, 0, 0, 0, 2, 1, 2, 1); let r = sse::_mm_sad_pu8(a, b); - assert_eq!(r, 1020); + assert_eq!(r, mem::transmute(u16x4::new(1020, 0, 0, 0))); let r = sse::_m_psadbw(a, b); - assert_eq!(r, 1020); + assert_eq!(r, mem::transmute(u16x4::new(1020, 0, 0, 0))); } #[simd_test = "sse"] diff --git a/coresimd/src/x86/i686/sse2.rs b/coresimd/src/x86/i686/sse2.rs index c416655c05..1ea531b5eb 100644 --- a/coresimd/src/x86/i686/sse2.rs +++ b/coresimd/src/x86/i686/sse2.rs @@ -2,7 +2,7 @@ use core::mem; use v128::*; -use v64::{__m64, i32x2, u32x2}; +use v64::*; #[cfg(test)] use stdsimd_test::assert_instr; @@ -22,8 +22,8 @@ pub unsafe fn _mm_add_si64(a: __m64, b: __m64) -> __m64 { #[inline(always)] #[target_feature = "+sse2"] #[cfg_attr(test, assert_instr(pmuludq))] -pub unsafe fn _mm_mul_su32(a: u32x2, b: u32x2) -> u64 { - mem::transmute(pmuludq(mem::transmute(a), mem::transmute(b))) +pub unsafe fn _mm_mul_su32(a: u32x2, b: u32x2) -> __m64 { + pmuludq(mem::transmute(a), mem::transmute(b)) } /// Subtracts signed or unsigned 64-bit integer values and writes the @@ -102,8 +102,8 @@ pub unsafe fn _mm_cvtsi128_si64x(a: i64x2) -> i64 { #[inline(always)] #[target_feature = "+sse2"] // no particular instruction to test -pub unsafe fn _mm_set_epi64(e1: i64, e0: i64) -> i64x2 { - i64x2::new(e0, e1) +pub unsafe fn _mm_set_epi64(e1: __m64, e0: __m64) -> i64x2 { + i64x2::new(mem::transmute(e0), mem::transmute(e1)) } /// Initializes both values in a 128-bit vector of [2 x i64] with the @@ -111,8 +111,8 @@ pub unsafe fn _mm_set_epi64(e1: i64, e0: i64) -> i64x2 { #[inline(always)] #[target_feature = "+sse2"] // no particular instruction to test -pub unsafe fn _mm_set1_epi64(a: i64) -> i64x2 { - i64x2::new(a, a) +pub unsafe fn _mm_set1_epi64(a: __m64) -> i64x2 { + i64x2::new(mem::transmute(a), mem::transmute(a)) } /// Constructs a 128-bit integer vector, initialized in reverse order @@ -120,26 +120,26 @@ pub unsafe fn _mm_set1_epi64(a: i64) -> i64x2 { #[inline(always)] #[target_feature = "+sse2"] // no particular instruction to test -pub unsafe fn _mm_setr_epi64(e1: i64, e0: i64) -> i64x2 { - i64x2::new(e1, e0) +pub unsafe fn _mm_setr_epi64(e1: __m64, e0: __m64) -> i64x2 { + i64x2::new(mem::transmute(e1), mem::transmute(e0)) } /// Returns the lower 64 bits of a 128-bit integer vector as a 64-bit /// integer. #[inline(always)] #[target_feature = "+sse2"] -// no particular instruction to test -pub unsafe fn _mm_movepi64_pi64(a: i64x2) -> i64 { - a.extract(0) +// #[cfg_attr(test, assert_instr(movdq2q))] // FIXME: llvm codegens wrong instr? +pub unsafe fn _mm_movepi64_pi64(a: i64x2) -> __m64 { + mem::transmute(a.extract(0)) } /// Moves the 64-bit operand to a 128-bit integer vector, zeroing the /// upper bits. #[inline(always)] #[target_feature = "+sse2"] -// #[cfg_attr(test, assert_instr(movq2dq))] FIXME -pub unsafe fn _mm_movpi64_epi64(a: i64) -> i64x2 { - i64x2::new(a, 0) +// #[cfg_attr(test, assert_instr(movq2dq))] // FIXME: llvm codegens wrong instr? +pub unsafe fn _mm_movpi64_epi64(a: __m64) -> i64x2 { + i64x2::new(mem::transmute(a), 0) } /// Converts the two double-precision floating-point elements of a @@ -182,15 +182,12 @@ extern "C" { #[cfg(test)] mod tests { + use std::mem; + use stdsimd_test::simd_test; - #[cfg(not(windows))] - use core::mem; use v128::*; - #[cfg(not(windows))] - use v64::{__m64, i32x2, u32x2}; - #[cfg(windows)] - use v64::i32x2; + use v64::*; use x86::i686::sse2; #[simd_test = "sse2"] @@ -210,7 +207,7 @@ mod tests { let b = u32x2::new(3, 4); let expected = 3u64; let r = sse2::_mm_mul_su32(a, b); - assert_eq!(r, expected); + assert_eq!(r, mem::transmute(expected)); } #[simd_test = "sse2"] @@ -252,31 +249,31 @@ mod tests { #[simd_test = "sse2"] unsafe fn _mm_set_epi64() { - let r = sse2::_mm_set_epi64(1, 2); + let r = sse2::_mm_set_epi64(mem::transmute(1i64), mem::transmute(2i64)); assert_eq!(r, i64x2::new(2, 1)); } #[simd_test = "sse2"] unsafe fn _mm_set1_epi64() { - let r = sse2::_mm_set1_epi64(1); + let r = sse2::_mm_set1_epi64(mem::transmute(1i64)); assert_eq!(r, i64x2::new(1, 1)); } #[simd_test = "sse2"] unsafe fn _mm_setr_epi64() { - let r = sse2::_mm_setr_epi64(1, 2); + let r = sse2::_mm_setr_epi64(mem::transmute(1i64), mem::transmute(2i64)); assert_eq!(r, i64x2::new(1, 2)); } #[simd_test = "sse2"] unsafe fn _mm_movepi64_pi64() { let r = sse2::_mm_movepi64_pi64(i64x2::new(5, 0)); - assert_eq!(r, 5); + assert_eq!(r, mem::transmute(i8x8::new(5, 0, 0, 0, 0, 0, 0, 0))); } #[simd_test = "sse2"] unsafe fn _mm_movpi64_epi64() { - let r = sse2::_mm_movpi64_epi64(5); + let r = sse2::_mm_movpi64_epi64(mem::transmute(i8x8::new(5, 0, 0, 0, 0, 0, 0, 0))); assert_eq!(r, i64x2::new(5, 0)); } diff --git a/coresimd/src/x86/i686/sse41.rs b/coresimd/src/x86/i686/sse41.rs index 28c2d78f7f..9a9810b916 100644 --- a/coresimd/src/x86/i686/sse41.rs +++ b/coresimd/src/x86/i686/sse41.rs @@ -15,31 +15,6 @@ extern "C" { fn ptestnzc(a: i64x2, mask: i64x2) -> i32; } -/// Extract an 64-bit integer from `a` selected with `imm8` -#[inline(always)] -#[target_feature = "+sse4.1"] -// TODO: Add test for Windows -#[cfg_attr(all(test, not(windows), target_arch = "x86_64"), - assert_instr(pextrq, imm8 = 1))] -// On x86 this emits 2 pextrd instructions -#[cfg_attr(all(test, not(windows), target_arch = "x86"), - assert_instr(pextrd, imm8 = 1))] -pub unsafe fn _mm_extract_epi64(a: i64x2, imm8: i32) -> i64 { - let imm8 = (imm8 & 1) as u32; - a.extract_unchecked(imm8) -} - -/// Return a copy of `a` with the 64-bit integer from `i` inserted at a -/// location specified by `imm8`. -#[inline(always)] -#[target_feature = "+sse4.1"] -#[cfg_attr(all(test, target_arch = "x86_64"), assert_instr(pinsrq, imm8 = 0))] -// On x86 this emits 2 pinsrd instructions -#[cfg_attr(all(test, target_arch = "x86"), assert_instr(pinsrd, imm8 = 0))] -pub unsafe fn _mm_insert_epi64(a: i64x2, i: i64, imm8: u8) -> i64x2 { - a.replace((imm8 & 0b1) as u32, i) -} - /// Tests whether the specified bits in a 128-bit integer vector are all /// zeros. /// @@ -165,25 +140,6 @@ mod tests { use x86::i686::sse41; use v128::*; - #[simd_test = "sse4.1"] - unsafe fn _mm_extract_epi64() { - let a = i64x2::new(0, 1); - let r = sse41::_mm_extract_epi64(a, 1); - assert_eq!(r, 1); - let r = sse41::_mm_extract_epi64(a, 3); - assert_eq!(r, 1); - } - - #[simd_test = "sse4.1"] - unsafe fn _mm_insert_epi64() { - let a = i64x2::splat(0); - let e = i64x2::splat(0).replace(1, 32); - let r = sse41::_mm_insert_epi64(a, 32, 1); - assert_eq!(r, e); - let r = sse41::_mm_insert_epi64(a, 32, 3); - assert_eq!(r, e); - } - #[simd_test = "sse4.1"] unsafe fn _mm_testz_si128() { let a = i8x16::splat(1); diff --git a/coresimd/src/x86/i686/ssse3.rs b/coresimd/src/x86/i686/ssse3.rs index d3eea79af2..e117ef441c 100644 --- a/coresimd/src/x86/i686/ssse3.rs +++ b/coresimd/src/x86/i686/ssse3.rs @@ -47,7 +47,7 @@ pub unsafe fn _mm_shuffle_pi8(a: u8x8, b: u8x8) -> u8x8 { #[inline(always)] #[target_feature = "+ssse3"] #[cfg_attr(test, assert_instr(palignr, n = 15))] -pub unsafe fn _mm_alignr_pi8(a: u8x8, b: u8x8, n: u8) -> u8x8 { +pub unsafe fn _mm_alignr_pi8(a: u8x8, b: u8x8, n: i32) -> u8x8 { macro_rules! call { ($imm8:expr) => { mem::transmute(palignrb(mem::transmute(a), mem::transmute(b), $imm8)) diff --git a/coresimd/src/x86/x86_64/mod.rs b/coresimd/src/x86/x86_64/mod.rs index b5456b71ee..7225e7bf05 100644 --- a/coresimd/src/x86/x86_64/mod.rs +++ b/coresimd/src/x86/x86_64/mod.rs @@ -9,6 +9,9 @@ pub use self::sse::*; mod sse2; pub use self::sse2::*; +mod sse41; +pub use self::sse41::*; + mod sse42; pub use self::sse42::*; diff --git a/coresimd/src/x86/x86_64/sse2.rs b/coresimd/src/x86/x86_64/sse2.rs index 25ab58c11d..b5c1fe4207 100644 --- a/coresimd/src/x86/x86_64/sse2.rs +++ b/coresimd/src/x86/x86_64/sse2.rs @@ -58,6 +58,40 @@ pub unsafe fn _mm_stream_si64(mem_addr: *mut i64, a: i64) { ::core::intrinsics::nontemporal_store(mem_addr, a); } +/// Return a vector whose lowest element is `a` and all higher elements are +/// `0`. +#[inline(always)] +#[target_feature = "+sse2"] +#[cfg_attr(all(test, not(windows)), assert_instr(movq))] +pub unsafe fn _mm_cvtsi64_si128(a: i64) -> i64x2 { + i64x2::new(a, 0) +} + +/// Return a vector whose lowest element is `a` and all higher elements are +/// `0`. +#[inline(always)] +#[target_feature = "+sse2"] +#[cfg_attr(all(test, not(windows)), assert_instr(movq))] +pub unsafe fn _mm_cvtsi64x_si128(a: i64) -> i64x2 { + _mm_cvtsi64_si128(a) +} + +/// Return the lowest element of `a`. +#[inline(always)] +#[target_feature = "+sse2"] +#[cfg_attr(all(test, not(windows)), assert_instr(movq))] +pub unsafe fn _mm_cvtsi128_si64(a: i64x2) -> i64 { + a.extract(0) +} + +/// Return the lowest element of `a`. +#[inline(always)] +#[target_feature = "+sse2"] +#[cfg_attr(all(test, not(windows)), assert_instr(movq))] +pub unsafe fn _mm_cvtsi128_si64x(a: i64x2) -> i64 { + _mm_cvtsi128_si64(a) +} + #[cfg(test)] mod tests { use stdsimd_test::simd_test; @@ -107,4 +141,16 @@ mod tests { sse2::_mm_stream_si64(&mut *mem as *mut i64, a); assert_eq!(a, *mem); } + + #[simd_test = "sse2"] + unsafe fn _mm_cvtsi64_si128() { + let r = sse2::_mm_cvtsi64_si128(5); + assert_eq!(r, i64x2::new(5, 0)); + } + + #[simd_test = "sse2"] + unsafe fn _mm_cvtsi128_si64() { + let r = sse2::_mm_cvtsi128_si64(i64x2::new(5, 0)); + assert_eq!(r, 5); + } } diff --git a/coresimd/src/x86/x86_64/sse41.rs b/coresimd/src/x86/x86_64/sse41.rs new file mode 100644 index 0000000000..b2417a8e59 --- /dev/null +++ b/coresimd/src/x86/x86_64/sse41.rs @@ -0,0 +1,49 @@ +use v128::*; + +#[cfg(test)] +use stdsimd_test::assert_instr; + +/// Extract an 64-bit integer from `a` selected with `imm8` +#[inline(always)] +#[target_feature = "+sse4.1"] +// TODO: Add test for Windows +#[cfg_attr(all(test, not(windows)), assert_instr(pextrq, imm8 = 1))] +pub unsafe fn _mm_extract_epi64(a: i64x2, imm8: i32) -> i64 { + let imm8 = (imm8 & 1) as u32; + a.extract_unchecked(imm8) +} + +/// Return a copy of `a` with the 64-bit integer from `i` inserted at a +/// location specified by `imm8`. +#[inline(always)] +#[target_feature = "+sse4.1"] +#[cfg_attr(test, assert_instr(pinsrq, imm8 = 0))] +pub unsafe fn _mm_insert_epi64(a: i64x2, i: i64, imm8: i32) -> i64x2 { + a.replace((imm8 & 0b1) as u32, i) +} + +#[cfg(test)] +mod tests { + use stdsimd_test::simd_test; + use x86::x86_64::sse41; + use v128::*; + + #[simd_test = "sse4.1"] + unsafe fn _mm_extract_epi64() { + let a = i64x2::new(0, 1); + let r = sse41::_mm_extract_epi64(a, 1); + assert_eq!(r, 1); + let r = sse41::_mm_extract_epi64(a, 3); + assert_eq!(r, 1); + } + + #[simd_test = "sse4.1"] + unsafe fn _mm_insert_epi64() { + let a = i64x2::splat(0); + let e = i64x2::splat(0).replace(1, 32); + let r = sse41::_mm_insert_epi64(a, 32, 1); + assert_eq!(r, e); + let r = sse41::_mm_insert_epi64(a, 32, 3); + assert_eq!(r, e); + } +} diff --git a/coresimd/src/x86/x86_64/xsave.rs b/coresimd/src/x86/x86_64/xsave.rs index 03e6df59a1..6f8eaa6446 100644 --- a/coresimd/src/x86/x86_64/xsave.rs +++ b/coresimd/src/x86/x86_64/xsave.rs @@ -32,7 +32,7 @@ extern "C" { #[inline(always)] #[target_feature = "+xsave"] #[cfg_attr(test, assert_instr(xsave64))] -pub unsafe fn _xsave64(mem_addr: *mut u8, save_mask: u64) -> () { +pub unsafe fn _xsave64(mem_addr: *mut u8, save_mask: u64) { xsave64(mem_addr, (save_mask >> 32) as u32, save_mask as u32); } @@ -45,7 +45,7 @@ pub unsafe fn _xsave64(mem_addr: *mut u8, save_mask: u64) -> () { #[inline(always)] #[target_feature = "+xsave"] #[cfg_attr(test, assert_instr(xrstor64))] -pub unsafe fn _xrstor64(mem_addr: *const u8, rs_mask: u64) -> () { +pub unsafe fn _xrstor64(mem_addr: *const u8, rs_mask: u64) { xrstor64(mem_addr, (rs_mask >> 32) as u32, rs_mask as u32); } @@ -59,7 +59,7 @@ pub unsafe fn _xrstor64(mem_addr: *const u8, rs_mask: u64) -> () { #[inline(always)] #[target_feature = "+xsave,+xsaveopt"] #[cfg_attr(test, assert_instr(xsaveopt64))] -pub unsafe fn _xsaveopt64(mem_addr: *mut u8, save_mask: u64) -> () { +pub unsafe fn _xsaveopt64(mem_addr: *mut u8, save_mask: u64) { xsaveopt64(mem_addr, (save_mask >> 32) as u32, save_mask as u32); } @@ -72,7 +72,7 @@ pub unsafe fn _xsaveopt64(mem_addr: *mut u8, save_mask: u64) -> () { #[inline(always)] #[target_feature = "+xsave,+xsavec"] #[cfg_attr(test, assert_instr(xsavec64))] -pub unsafe fn _xsavec64(mem_addr: *mut u8, save_mask: u64) -> () { +pub unsafe fn _xsavec64(mem_addr: *mut u8, save_mask: u64) { xsavec64(mem_addr, (save_mask >> 32) as u32, save_mask as u32); } @@ -86,7 +86,7 @@ pub unsafe fn _xsavec64(mem_addr: *mut u8, save_mask: u64) -> () { #[inline(always)] #[target_feature = "+xsave,+xsaves"] #[cfg_attr(test, assert_instr(xsaves64))] -pub unsafe fn _xsaves64(mem_addr: *mut u8, save_mask: u64) -> () { +pub unsafe fn _xsaves64(mem_addr: *mut u8, save_mask: u64) { xsaves64(mem_addr, (save_mask >> 32) as u32, save_mask as u32); } @@ -102,7 +102,7 @@ pub unsafe fn _xsaves64(mem_addr: *mut u8, save_mask: u64) -> () { #[inline(always)] #[target_feature = "+xsave,+xsaves"] #[cfg_attr(test, assert_instr(xrstors64))] -pub unsafe fn _xrstors64(mem_addr: *const u8, rs_mask: u64) -> () { +pub unsafe fn _xrstors64(mem_addr: *const u8, rs_mask: u64) { xrstors64(mem_addr, (rs_mask >> 32) as u32, rs_mask as u32); } diff --git a/stdsimd-test/assert-instr-macro/src/lib.rs b/stdsimd-test/assert-instr-macro/src/lib.rs index 5dd70f6793..a6e32a26e9 100644 --- a/stdsimd-test/assert-instr-macro/src/lib.rs +++ b/stdsimd-test/assert-instr-macro/src/lib.rs @@ -40,10 +40,10 @@ pub fn assert_instr( }; let name = &func.ident; let assert_name = syn::Ident::from( - &format!("assert_{}_{}", name.sym.as_str(), instr.sym.as_str())[..], + &format!("assert_{}_{}", name.as_ref(), instr.as_ref())[..], ); let shim_name = - syn::Ident::from(&format!("{}_shim", name.sym.as_str())[..]); + syn::Ident::from(format!("{}_shim", name.as_ref())); let (to_test, test_name) = if invoc.args.len() == 0 { (TokenStream::empty(), &func.ident) } else { @@ -59,7 +59,7 @@ pub fn assert_instr( syn::Pat::Ident(ref i) => &i.ident, _ => panic!("must have bare arguments"), }; - match invoc.args.iter().find(|a| a.0 == ident.sym.as_str()) { + match invoc.args.iter().find(|a| a.0 == ident.as_ref()) { Some(&(_, ref tts)) => { input_vals.push(quote! { #tts }); } @@ -78,8 +78,7 @@ pub fn assert_instr( .get(0) .item() .ident - .sym - .as_str() + .as_ref() .starts_with("target") }) .collect::>(); diff --git a/stdsimd-verify/.gitattributes b/stdsimd-verify/.gitattributes new file mode 100644 index 0000000000..621fdea6f7 --- /dev/null +++ b/stdsimd-verify/.gitattributes @@ -0,0 +1 @@ +*.xml binary diff --git a/stdsimd-verify/Cargo.toml b/stdsimd-verify/Cargo.toml new file mode 100644 index 0000000000..04ea7d1dce --- /dev/null +++ b/stdsimd-verify/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "stdsimd-verify" +version = "0.1.0" +authors = ["Alex Crichton "] + +[dependencies] +proc-macro2 = { version = "0.1", features = ["unstable"] } +quote = { git = 'https://github.com/dtolnay/quote' } +syn = { git = 'https://github.com/dtolnay/syn', features =["full"] } + +[lib] +proc-macro = true +test = false + +[dev-dependencies] +serde = "1.0" +serde_derive = "1.0" +serde-xml-rs = "0.2" diff --git a/stdsimd-verify/build.rs b/stdsimd-verify/build.rs new file mode 100644 index 0000000000..3273777679 --- /dev/null +++ b/stdsimd-verify/build.rs @@ -0,0 +1,24 @@ +use std::path::Path; + +fn main() { + let dir = Path::new(env!("CARGO_MANIFEST_DIR")); + let root = dir.parent().unwrap(); + let root = root.join("coresimd/src/x86"); + walk(&root); +} + +fn walk(root: &Path) { + for file in root.read_dir().unwrap() { + let file = file.unwrap(); + if file.file_type().unwrap().is_dir() { + walk(&file.path()); + continue + } + let path = file.path(); + if path.extension().and_then(|s| s.to_str()) != Some("rs") { + continue + } + + println!("cargo:rerun-if-changed={}", path.display()); + } +} diff --git a/stdsimd-verify/src/lib.rs b/stdsimd-verify/src/lib.rs new file mode 100644 index 0000000000..faea0e598a --- /dev/null +++ b/stdsimd-verify/src/lib.rs @@ -0,0 +1,244 @@ +#![feature(proc_macro)] + +extern crate proc_macro; +extern crate proc_macro2; +extern crate syn; +#[macro_use] +extern crate quote; + +use std::path::Path; +use std::fs::File; +use std::io::Read; + +use proc_macro::TokenStream; +use quote::Tokens; + +macro_rules! my_quote { + ($($t:tt)*) => (quote_spanned!(proc_macro2::Span::call_site(), $($t)*)) +} + +#[proc_macro] +pub fn x86_functions(input: TokenStream) -> TokenStream { + let dir = Path::new(env!("CARGO_MANIFEST_DIR")); + let root = dir.parent().unwrap(); + let root = root.join("coresimd/src/x86"); + + let mut files = Vec::new(); + walk(&root, &mut files); + + let mut functions = Vec::new(); + for file in files { + for item in file.items { + match item { + syn::Item::Fn(f) => functions.push(f), + _ => {} + } + } + } + + functions.retain(|f| { + match f.vis { + syn::Visibility::Public(_) => {} + _ => return false, + } + if f.unsafety.is_none() { + return false + } + f.attrs.iter() + .filter_map(|a| a.meta_item()) + .any(|a| { + match a { + syn::MetaItem::NameValue(i) => i.ident == "target_feature", + _ => false, + } + }) + }); + + let input = proc_macro2::TokenStream::from(input); + + let functions = functions.iter() + .map(|f| { + let name = f.ident; + // println!("{}", name); + let mut arguments = Vec::new(); + for input in f.decl.inputs.iter().map(|s| s.into_item()) { + let ty = match *input { + syn::FnArg::Captured(ref c) => &c.ty, + _ => panic!("invalid argument on {}", name), + }; + arguments.push(to_type(ty)); + } + let ret = match f.decl.output { + syn::ReturnType::Default => my_quote! { None }, + syn::ReturnType::Type(_, ref t) => { + let ty = to_type(t); + my_quote! { Some(#ty) } + } + }; + let instrs = find_instrs(&f.attrs); + let target_feature = find_target_feature(f.ident, &f.attrs); + my_quote! { + Function { + name: stringify!(#name), + arguments: &[#(#arguments),*], + ret: #ret, + target_feature: #target_feature, + instrs: &[#(stringify!(#instrs)),*], + } + } + }) + .collect::>(); + + let ret = my_quote! { #input: &[Function] = &[#(#functions),*]; }; + // println!("{}", ret); + ret.into() +} + +fn to_type(t: &syn::Type) -> Tokens { + match *t { + syn::Type::Path(ref p) => { + match extract_path_ident(&p.path).as_ref() { + "__m128i" => my_quote! { &I8x16 }, + "__m256i" => my_quote! { &I8x32 }, + "__m64" => my_quote! { &I8x8 }, + "bool" => my_quote! { &BOOL }, + "f32" => my_quote! { &F32 }, + "f32x4" => my_quote! { &F32x4 }, + "f32x8" => my_quote! { &F32x8 }, + "f64" => my_quote! { &F64 }, + "f64x2" => my_quote! { &F64x2 }, + "f64x4" => my_quote! { &F64x4 }, + "i16" => my_quote! { &I16 }, + "i16x16" => my_quote! { &I16x16 }, + "i16x4" => my_quote! { &I16x4 }, + "i16x8" => my_quote! { &I16x8 }, + "i32" => my_quote! { &I32 }, + "i32x2" => my_quote! { &I32x2 }, + "i32x4" => my_quote! { &I32x4 }, + "i32x8" => my_quote! { &I32x8 }, + "i64" => my_quote! { &I64 }, + "i64x2" => my_quote! { &I64x2 }, + "i64x4" => my_quote! { &I64x4 }, + "i8" => my_quote! { &I8 }, + "i8x16" => my_quote! { &I8x16 }, + "i8x32" => my_quote! { &I8x32 }, + "i8x8" => my_quote! { &I8x8 }, + "u16x4" => my_quote! { &U16x4 }, + "u16x8" => my_quote! { &U16x8 }, + "u32" => my_quote! { &U32 }, + "u32x2" => my_quote! { &U32x2 }, + "u32x4" => my_quote! { &U32x4 }, + "u32x8" => my_quote! { &U32x8 }, + "u64" => my_quote! { &U64 }, + "u64x2" => my_quote! { &U64x2 }, + "u64x4" => my_quote! { &U64x4 }, + "u8" => my_quote! { &U8 }, + "u16" => my_quote! { &U16 }, + "u8x16" => my_quote! { &U8x16 }, + "u8x32" => my_quote! { &U8x32 }, + "u16x16" => my_quote! { &U16x16 }, + "u8x8" => my_quote! { &U8x8 }, + s => panic!("unspported type: {}", s), + } + } + syn::Type::Ptr(syn::TypePtr { ref elem, .. }) | + syn::Type::Reference(syn::TypeReference { ref elem, .. }) => { + let tokens = to_type(&elem); + my_quote! { &Type::Ptr(#tokens) } + } + syn::Type::Slice(_) => panic!("unsupported slice"), + syn::Type::Array(_) => panic!("unsupported array"), + syn::Type::Tuple(_) => panic!("unsupported tup"), + _ => panic!("unsupported type"), + } +} + +fn extract_path_ident(path: &syn::Path) -> syn::Ident { + if path.leading_colon.is_some() { + panic!("unsupported leading colon in path") + } + if path.segments.len() != 1 { + panic!("unsupported path that needs name resolution") + } + match path.segments.get(0).item().arguments { + syn::PathArguments::None => {} + _ => panic!("unsupported path that has path arguments") + } + path.segments.get(0).item().ident +} + +fn walk(root: &Path, files: &mut Vec) { + for file in root.read_dir().unwrap() { + let file = file.unwrap(); + if file.file_type().unwrap().is_dir() { + walk(&file.path(), files); + continue + } + let path = file.path(); + if path.extension().and_then(|s| s.to_str()) != Some("rs") { + continue + } + + let mut contents = String::new(); + File::open(&path).unwrap().read_to_string(&mut contents).unwrap(); + + files.push(syn::parse_str::(&contents).expect("failed to parse")); + } +} + +fn find_instrs(attrs: &[syn::Attribute]) -> Vec { + attrs.iter() + .filter_map(|a| a.meta_item()) + .filter_map(|a| { + match a { + syn::MetaItem::List(i) => { + if i.ident == "cfg_attr" { + Some(i.nested.into_vec()) + } else { + None + } + } + _ => None, + } + }) + .filter_map(|list| list.into_iter().nth(1)) + .filter_map(|nested| { + match nested { + syn::NestedMetaItem::MetaItem(syn::MetaItem::List(i)) => { + if i.ident == "assert_instr" { + Some(i.nested.into_vec()) + } else { + None + } + } + _ => None, + } + }) + .filter_map(|list| list.into_iter().next()) + .filter_map(|nested| { + match nested { + syn::NestedMetaItem::MetaItem(syn::MetaItem::Term(i)) => Some(i), + _ => None, + } + }) + .collect() +} + +fn find_target_feature(name: syn::Ident, attrs: &[syn::Attribute]) -> syn::Lit { + attrs.iter() + .filter_map(|a| a.meta_item()) + .filter_map(|a| { + match a { + syn::MetaItem::NameValue(i) => { + if i.ident == "target_feature" { + Some(i.lit) + } else { + None + } + } + _ => None, + } + }) + .next() + .expect(&format!("failed to find target_feature for {}",name)) +} diff --git a/stdsimd-verify/tests/x86-intel.rs b/stdsimd-verify/tests/x86-intel.rs new file mode 100644 index 0000000000..b4a8a4b2b9 --- /dev/null +++ b/stdsimd-verify/tests/x86-intel.rs @@ -0,0 +1,310 @@ +#![feature(proc_macro)] +#![allow(bad_style)] + +#[macro_use] +extern crate serde_derive; +extern crate serde_xml_rs; +extern crate stdsimd_verify; + +use std::collections::HashMap; + +use stdsimd_verify::x86_functions; + +struct Function { + name: &'static str, + arguments: &'static [&'static Type], + ret: Option<&'static Type>, + target_feature: &'static str, + instrs: &'static [&'static str], +} + +static BOOL: Type = Type::Bool; +static F32: Type = Type::PrimFloat(32); +static F32x4: Type = Type::Float(32, 4); +static F32x8: Type = Type::Float(32, 8); +static F64: Type = Type::PrimFloat(64); +static F64x2: Type = Type::Float(64, 2); +static F64x4: Type = Type::Float(64, 4); +static I16: Type = Type::PrimSigned(16); +static I16x16: Type = Type::Signed(16, 16); +static I16x4: Type = Type::Signed(16, 4); +static I16x8: Type = Type::Signed(16, 8); +static I32: Type = Type::PrimSigned(32); +static I32x2: Type = Type::Signed(32, 2); +static I32x4: Type = Type::Signed(32, 4); +static I32x8: Type = Type::Signed(32, 8); +static I64: Type = Type::PrimSigned(64); +static I64x2: Type = Type::Signed(64, 2); +static I64x4: Type = Type::Signed(64, 4); +static I8: Type = Type::PrimSigned(8); +static I8x16: Type = Type::Signed(8, 16); +static I8x32: Type = Type::Signed(8, 32); +static I8x8: Type = Type::Signed(8, 8); +static U16: Type = Type::PrimUnsigned(16); +static U16x16: Type = Type::Unsigned(16, 16); +static U16x4: Type = Type::Unsigned(16, 4); +static U16x8: Type = Type::Unsigned(16, 8); +static U32: Type = Type::PrimUnsigned(32); +static U32x2: Type = Type::Unsigned(32, 2); +static U32x4: Type = Type::Unsigned(32, 4); +static U32x8: Type = Type::Unsigned(32, 8); +static U64: Type = Type::PrimUnsigned(64); +static U64x2: Type = Type::Unsigned(64, 2); +static U64x4: Type = Type::Unsigned(64, 4); +static U8: Type = Type::PrimUnsigned(8); +static U8x16: Type = Type::Unsigned(8, 16); +static U8x32: Type = Type::Unsigned(8, 32); +static U8x8: Type = Type::Unsigned(8, 8); + +#[derive(Debug)] +enum Type { + Float(u8, u8), + PrimFloat(u8), + PrimSigned(u8), + PrimUnsigned(u8), + Ptr(&'static Type), + Signed(u8, u8), + Unsigned(u8, u8), + Bool, +} + +x86_functions!(static FUNCTIONS); + +#[derive(Deserialize)] +struct Data { + #[serde(rename = "intrinsic", default)] + intrinsics: Vec, +} + +#[derive(Deserialize)] +struct Intrinsic { + rettype: String, + name: String, + tech: String, + #[serde(rename = "CPUID", default)] + cpuid: Vec, + #[serde(rename = "parameter", default)] + parameters: Vec, + #[serde(default)] + instruction: Vec, +} + +#[derive(Deserialize)] +struct Parameter { + #[serde(rename = "type")] + type_: String, +} + +#[derive(Deserialize)] +struct Instruction { + name: String, +} + +#[test] +fn verify_all_signatures() { + // This XML document was downloaded from Intel's site. To update this you + // can visit intel's intrinsics guide online documentation: + // + // https://software.intel.com/sites/landingpage/IntrinsicsGuide/# + // + // Open up the network console and you'll see an xml file was downloaded + // (currently called data-3.4.xml). That's the file we downloaded + // here. + let xml = include_bytes!("../x86-intel.xml"); + + let xml = &xml[..]; + let data: Data = serde_xml_rs::deserialize(xml).expect("failed to deserialize xml"); + let mut map = HashMap::new(); + for intrinsic in data.intrinsics.iter() { + // This intrinsic has multiple definitions in the XML, so just ignore it. + if intrinsic.name == "_mm_prefetch" { + continue + } + + // These'll need to get added eventually, but right now they have some + // duplicate names in the XML which we're not dealing with yet + if intrinsic.tech == "AVX-512" { + continue + } + + assert!(map.insert(&intrinsic.name[..], intrinsic).is_none()); + } + + for rust in FUNCTIONS { + // This was ignored above, we ignore it here as well. + if rust.name == "_mm_prefetch" { + continue + } + + // these are all AMD-specific intrinsics + if rust.target_feature.contains("sse4a") || + rust.target_feature.contains("tbm") { + continue + } + + let intel = match map.get(rust.name) { + Some(i) => i, + None => panic!("missing intel definition for {}", rust.name), + }; + + // Verify that all `#[target_feature]` annotations are correct, ensuring + // that we've actually enabled the right instruction set for this + // intrinsic. + assert!(intel.cpuid.len() > 0, "missing cpuid for {}", rust.name); + for cpuid in intel.cpuid.iter() { + // this is needed by _xsave and probably some related intrinsics, + // but let's just skip it for now. + if *cpuid == "XSS" { + continue + } + + let cpuid = cpuid + .chars() + .flat_map(|c| c.to_lowercase()) + .collect::(); + + // Normalize `bmi1` to `bmi` as apparently that's what we're calling + // it. + let cpuid = if cpuid == "bmi1" { + String::from("bmi") + } else { + cpuid + }; + + assert!(rust.target_feature.contains(&cpuid), + "intel cpuid `{}` not in `{}` for {}", + cpuid, + rust.target_feature, + rust.name); + } + + // TODO: we should test this, but it generates too many failures right + // now + if false { + if rust.instrs.len() == 0 { + assert_eq!(intel.instruction.len(), 0, + "instruction not listed for {}", rust.name); + + // If intel doesn't list any instructions and we do then don't + // bother trying to look for instructions in intel, we've just got + // some extra assertions on our end. + } else if intel.instruction.len() > 0 { + for instr in rust.instrs.iter() { + assert!(intel.instruction.iter().any(|a| a.name.starts_with(instr)), + "intel failed to list `{}` as an instruction for `{}`", + instr, rust.name); + } + } + } + + // Make sure we've got the right return type. + match rust.ret { + Some(t) => equate(t, &intel.rettype, &rust.name), + None => { + assert!(intel.rettype == "" || intel.rettype == "void", + "{} returns `{}` with intel, void in rust", + rust.name, intel.rettype); + } + } + + // If there's no arguments on Rust's side intel may list one "void" + // argument, so handle that here. + if rust.arguments.len() == 0 { + if intel.parameters.len() == 1 { + assert_eq!(intel.parameters[0].type_, "void"); + continue + } + } + + // Otherwise we want all parameters to be exactly the same + assert_eq!(rust.arguments.len(), intel.parameters.len(), + "wrong number of arguments on {}", rust.name); + for (a, b) in intel.parameters.iter().zip(rust.arguments) { + equate(b, &a.type_, &intel.name); + } + } +} + +fn equate(t: &Type, intel: &str, intrinsic: &str) { + let intel = intel.replace(" *", "*"); + let intel = intel.replace(" const*", "*"); + match (t, &intel[..]) { + (&Type::PrimFloat(32), "float") => {} + (&Type::PrimFloat(64), "double") => {} + (&Type::PrimSigned(16), "__int16") => {} + (&Type::PrimSigned(16), "short") => {} + (&Type::PrimSigned(32), "__int32") => {} + (&Type::PrimSigned(32), "const int") => {} + (&Type::PrimSigned(32), "int") => {} + (&Type::PrimSigned(64), "__int64") => {} + (&Type::PrimSigned(64), "long long") => {} + (&Type::PrimSigned(8), "__int8") => {} + (&Type::PrimSigned(8), "char") => {} + (&Type::PrimUnsigned(16), "unsigned short") => {} + (&Type::PrimUnsigned(32), "unsigned int") => {} + (&Type::PrimUnsigned(64), "unsigned __int64") => {} + (&Type::PrimUnsigned(8), "unsigned char") => {} + + (&Type::Ptr(&Type::PrimFloat(32)), "float*") => {} + (&Type::Ptr(&Type::PrimFloat(64)), "double*") => {} + (&Type::Ptr(&Type::PrimSigned(32)), "int*") => {} + (&Type::Ptr(&Type::PrimSigned(64)), "__int64*") => {} + (&Type::Ptr(&Type::PrimSigned(8)), "char*") => {} + (&Type::Ptr(&Type::PrimUnsigned(32)), "unsigned int*") => {} + (&Type::Ptr(&Type::PrimUnsigned(64)), "unsigned __int64*") => {} + (&Type::Ptr(&Type::PrimUnsigned(8)), "const void*") => {} + (&Type::Ptr(&Type::PrimUnsigned(8)), "void*") => {} + + (&Type::Signed(a, b), "__m128i") | + (&Type::Unsigned(a, b), "__m128i") | + (&Type::Ptr(&Type::Signed(a, b)), "__m128i*") | + (&Type::Ptr(&Type::Unsigned(a, b)), "__m128i*") if a * b == 128 => {} + + (&Type::Signed(a, b), "__m256i") | + (&Type::Unsigned(a, b), "__m256i") | + (&Type::Ptr(&Type::Signed(a, b)), "__m256i*") | + (&Type::Ptr(&Type::Unsigned(a, b)), "__m256i*") if (a as u32) * (b as u32) == 256 => {} + + (&Type::Signed(a, b), "__m64") | + (&Type::Unsigned(a, b), "__m64") | + (&Type::Ptr(&Type::Signed(a, b)), "__m64*") | + (&Type::Ptr(&Type::Unsigned(a, b)), "__m64*") if a * b == 64 => {} + + (&Type::Float(32, 4), "__m128") => {} + (&Type::Ptr(&Type::Float(32, 4)), "__m128*") => {} + + (&Type::Float(64, 2), "__m128d") => {} + (&Type::Ptr(&Type::Float(64, 2)), "__m128d*") => {} + + (&Type::Float(32, 8), "__m256") => {} + (&Type::Ptr(&Type::Float(32, 8)), "__m256*") => {} + + (&Type::Float(64, 4), "__m256d") => {} + (&Type::Ptr(&Type::Float(64, 4)), "__m256d*") => {} + + // These two intrinsics return a 16-bit element but in Intel's + // intrinsics they're listed as returning an `int`. + (&Type::PrimSigned(16), "int") if intrinsic == "_mm_extract_pi16" => {} + (&Type::PrimSigned(16), "int") if intrinsic == "_m_pextrw" => {} + + // This intrinsic takes an `i8` to get inserted into an i8 vector, but + // Intel says the argument is i32... + (&Type::PrimSigned(8), "int") if intrinsic == "_mm_insert_epi8" => {} + + // This is a macro (?) in C which seems to mutate its arguments, but that + // means that we're taking pointers to arguments in rust as we're not + // exposing it as a macro. + (&Type::Ptr(&Type::Float(32, 4)), "__m128") if intrinsic == "_MM_TRANSPOSE4_PS" => {} + + // These intrinsics return an `int` in C but they're always either the + // bit 1 or 0 so we switch it to returning `bool` in rust + (&Type::Bool, "int") + if intrinsic.starts_with("_mm_comi") && intrinsic.ends_with("_sd") + => {} + (&Type::Bool, "int") + if intrinsic.starts_with("_mm_ucomi") && intrinsic.ends_with("_sd") + => {} + + _ => panic!("failed to equate: `{}` and {:?} for {}", intel, t, intrinsic), + } +} diff --git a/stdsimd-verify/x86-intel.xml b/stdsimd-verify/x86-intel.xml new file mode 100644 index 0000000000..c22a3adaec --- /dev/null +++ b/stdsimd-verify/x86-intel.xml @@ -0,0 +1,134861 @@ + + + Integer + MMX + Convert + + Copy 64-bit integer "a" to "dst". + +dst[63:0] := a[63:0] + + +
mmintrin.h
+
+ + Integer + MMX + Convert + + Copy 64-bit integer "a" to "dst". + +dst[63:0] := a[63:0] + + +
mmintrin.h
+
+ + MMX + General Support + + Empty the MMX state, which marks the x87 FPU registers as available for use by x87 instructions. This instruction must be used at the end of all MMX technology procedures. + +
mmintrin.h
+
+ + Integer + MMX + Convert + + Copy 32-bit integer "a" to the lower elements of "dst", and zero the upper element of "dst". + +dst[31:0] := a[31:0] +dst[63:32] := 0 + + +
mmintrin.h
+
+ + Integer + MMX + Convert + + Copy the lower 32-bit integer in "a" to "dst". + +dst[31:0] := a[31:0] + + +
mmintrin.h
+
+ + Integer + MMX + Miscellaneous + + + Convert packed 16-bit integers from "a" and "b" to packed 8-bit integers using signed saturation, and store the results in "dst". + + +dst[7:0] := Saturate_Int16_To_Int8 (a[15:0]) +dst[15:8] := Saturate_Int16_To_Int8 (a[31:16]) +dst[23:16] := Saturate_Int16_To_Int8 (a[47:32]) +dst[31:24] := Saturate_Int16_To_Int8 (a[63:48]) +dst[39:32] := Saturate_Int16_To_Int8 (b[15:0]) +dst[47:40] := Saturate_Int16_To_Int8 (b[31:16]) +dst[55:48] := Saturate_Int16_To_Int8 (b[47:32]) +dst[63:56] := Saturate_Int16_To_Int8 (b[63:48]) + + +
mmintrin.h
+
+ + Integer + MMX + Miscellaneous + + + Convert packed 32-bit integers from "a" and "b" to packed 16-bit integers using signed saturation, and store the results in "dst". + +dst[15:0] := Saturate_Int32_To_Int16 (a[31:0]) +dst[31:16] := Saturate_Int32_To_Int16 (a[63:32]) +dst[47:32] := Saturate_Int32_To_Int16 (b[31:0]) +dst[63:48] := Saturate_Int32_To_Int16 (b[63:32]) + + +
mmintrin.h
+
+ + Integer + MMX + Miscellaneous + + + Convert packed 16-bit integers from "a" and "b" to packed 8-bit integers using unsigned saturation, and store the results in "dst". + +dst[7:0] := Saturate_Int16_To_UnsignedInt8 (a[15:0]) +dst[15:8] := Saturate_Int16_To_UnsignedInt8 (a[31:16]) +dst[23:16] := Saturate_Int16_To_UnsignedInt8 (a[47:32]) +dst[31:24] := Saturate_Int16_To_UnsignedInt8 (a[63:48]) +dst[39:32] := Saturate_Int16_To_UnsignedInt8 (b[15:0]) +dst[47:40] := Saturate_Int16_To_UnsignedInt8 (b[31:16]) +dst[55:48] := Saturate_Int16_To_UnsignedInt8 (b[47:32]) +dst[63:56] := Saturate_Int16_To_UnsignedInt8 (b[63:48]) + + +
mmintrin.h
+
+ + Integer + MMX + Swizzle + + + Unpack and interleave 8-bit integers from the high half of "a" and "b", and store the results in "dst". + +INTERLEAVE_HIGH_BYTES(src1[63:0], src2[63:0]){ + dst[7:0] := src1[39:32] + dst[15:8] := src2[39:32] + dst[23:16] := src1[47:40] + dst[31:24] := src2[47:40] + dst[39:32] := src1[55:48] + dst[47:40] := src2[55:48] + dst[55:48] := src1[63:56] + dst[63:56] := src2[63:56] + RETURN dst[63:0] +} + +dst[63:0] := INTERLEAVE_HIGH_BYTES(a[63:0], b[63:0]) + + +
mmintrin.h
+
+ + Integer + MMX + Swizzle + + + Unpack and interleave 16-bit integers from the high half of "a" and "b", and store the results in "dst". + +INTERLEAVE_HIGH_WORDS(src1[63:0], src2[63:0]){ + dst[15:0] := src1[47:32] + dst[31:16] := src2[47:32] + dst[47:32] := src1[63:48] + dst[63:48] := src2[63:48] + RETURN dst[63:0] +} + +dst[63:0] := INTERLEAVE_HIGH_WORDS(a[63:0], b[63:0]) + + +
mmintrin.h
+
+ + Integer + MMX + Swizzle + + + Unpack and interleave 32-bit integers from the high half of "a" and "b", and store the results in "dst". + +dst[31:0] := a[63:32] +dst[63:32] := b[63:32] + + +
mmintrin.h
+
+ + Integer + MMX + Swizzle + + + Unpack and interleave 8-bit integers from the low half of "a" and "b", and store the results in "dst". + +INTERLEAVE_BYTES(src1[63:0], src2[63:0]){ + dst[7:0] := src1[7:0] + dst[15:8] := src2[7:0] + dst[23:16] := src1[15:8] + dst[31:24] := src2[15:8] + dst[39:32] := src1[23:16] + dst[47:40] := src2[23:16] + dst[55:48] := src1[31:24] + dst[63:56] := src2[31:24] + RETURN dst[63:0] +} + +dst[63:0] := INTERLEAVE_BYTES(a[63:0], b[63:0]) + + +
mmintrin.h
+
+ + Integer + MMX + Swizzle + + + Unpack and interleave 16-bit integers from the low half of "a" and "b", and store the results in "dst". + +INTERLEAVE_WORDS(src1[63:0], src2[63:0]){ + dst[15:0] := src1[15:0] + dst[31:16] := src2[15:0] + dst[47:32] := src1[31:16] + dst[63:48] := src2[31:16] + RETURN dst[63:0] +} + +dst[63:0] := INTERLEAVE_WORDS(a[63:0], b[63:0]) + + +
mmintrin.h
+
+ + Integer + MMX + Swizzle + + + Unpack and interleave 32-bit integers from the low half of "a" and "b", and store the results in "dst". + +dst[31:0] := a[31:0] +dst[63:32] := b[31:0] + + +
mmintrin.h
+
+ + Integer + MMX + Arithmetic + + + Add packed 8-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 7 + i := j*8 + dst[i+7:i] := a[i+7:i] + b[i+7:i] +ENDFOR + + +
mmintrin.h
+
+ + Integer + MMX + Arithmetic + + + Add packed 16-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + dst[i+15:i] := a[i+15:i] + b[i+15:i] +ENDFOR + + +
mmintrin.h
+
+ + Integer + MMX + Arithmetic + + + Add packed 32-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 1 + i := j*32 + dst[i+31:i] := a[i+31:i] + b[i+31:i] +ENDFOR + + +
mmintrin.h
+
+ + Integer + MMX + Arithmetic + + + Add packed 8-bit integers in "a" and "b" using saturation, and store the results in "dst". + +FOR j := 0 to 7 + i := j*8 + dst[i+7:i] := Saturate_To_Int8( a[i+7:i] + b[i+7:i] ) +ENDFOR + + +
mmintrin.h
+
+ + Integer + MMX + Arithmetic + + + Add packed 16-bit integers in "a" and "b" using saturation, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + dst[i+15:i] := Saturate_To_Int16( a[i+15:i] + b[i+15:i] ) +ENDFOR + + +
mmintrin.h
+
+ + Integer + MMX + Arithmetic + + + Add packed unsigned 8-bit integers in "a" and "b" using saturation, and store the results in "dst". + +FOR j := 0 to 7 + i := j*8 + dst[i+7:i] := Saturate_To_UnsignedInt8( a[i+7:i] + b[i+7:i] ) +ENDFOR + + +
mmintrin.h
+
+ + Integer + MMX + Arithmetic + + + Add packed unsigned 16-bit integers in "a" and "b" using saturation, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + dst[i+15:i] := Saturate_To_UnsignedInt16( a[i+15:i] + b[i+15:i] ) +ENDFOR + + +
mmintrin.h
+
+ + Floating Point + Integer + MMX + Arithmetic + + + Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*8 + dst[i+7:i] := a[i+7:i] - b[i+7:i] +ENDFOR + + +
mmintrin.h
+
+ + Floating Point + Integer + MMX + Arithmetic + + + Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + dst[i+15:i] := a[i+15:i] - b[i+15:i] +ENDFOR + + +
mmintrin.h
+
+ + Floating Point + Integer + MMX + Arithmetic + + + Subtract packed 32-bit integers in "b" from packed 32-bit integers in "a", and store the results in "dst". + +FOR j := 0 to 1 + i := j*32 + dst[i+31:i] := a[i+31:i] - b[i+31:i] +ENDFOR + + +
mmintrin.h
+
+ + Floating Point + Integer + MMX + Arithmetic + + + Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a" using saturation, and store the results in "dst". + +FOR j := 0 to 7 + i := j*8 + dst[i+7:i] := Saturate_To_Int8(a[i+7:i] - b[i+7:i]) +ENDFOR + + +
mmintrin.h
+
+ + Floating Point + Integer + MMX + Arithmetic + + + Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a" using saturation, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + dst[i+15:i] := Saturate_To_Int16(a[i+15:i] - b[i+15:i]) +ENDFOR + + +
mmintrin.h
+
+ + Floating Point + Integer + MMX + Arithmetic + + + Subtract packed unsigned 8-bit integers in "b" from packed unsigned 8-bit integers in "a" using saturation, and store the results in "dst". + +FOR j := 0 to 7 + i := j*8 + dst[i+7:i] := Saturate_To_UnsignedInt8(a[i+7:i] - b[i+7:i]) +ENDFOR + + +
mmintrin.h
+
+ + Floating Point + Integer + MMX + Arithmetic + + + Subtract packed unsigned 16-bit integers in "b" from packed unsigned 16-bit integers in "a" using saturation, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + dst[i+15:i] := Saturate_To_UnsignedInt16(a[i+15:i] - b[i+15:i]) +ENDFOR + + +
mmintrin.h
+
+ + Integer + MMX + Arithmetic + + + Multiply packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, and pack the results in "dst". + + +FOR j := 0 to 1 + i := j*32 + st[i+31:i] := a[i+31:i+16]*b[i+31:i+16] + a[i+15:i]*b[i+15:i] +ENDFOR + + +
mmintrin.h
+
+ + Integer + MMX + Arithmetic + + + Multiply the packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst". + +FOR j := 0 to 3 + i := j*16 + tmp[31:0] := a[i+15:i] * b[i+15:i] + dst[i+15:i] := tmp[31:16] +ENDFOR + + +
mmintrin.h
+
+ + Integer + MMX + Arithmetic + + + Multiply the packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the low 16 bits of the intermediate integers in "dst". + +FOR j := 0 to 3 + i := j*16 + tmp[31:0] := a[i+15:i] * b[i+15:i] + dst[i+15:i] := tmp[15:0] +ENDFOR + + +
mmintrin.h
+
+ + Floating Point + Integer + MMX + Shift + + + Shift packed 16-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + IF count[63:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend(a[i+15:i] << count[63:0]) + FI +ENDFOR + + +
mmintrin.h
+
+ + Floating Point + Integer + MMX + Shift + + + Shift packed 16-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + IF imm8[7:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend(a[i+15:i] << imm8[7:0]) + FI +ENDFOR + + +
mmintrin.h
+
+ + Floating Point + Integer + MMX + Shift + + + Shift packed 32-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 1 + i := j*32 + IF count[63:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend(a[i+31:i] << count[63:0]) + FI +ENDFOR + + +
mmintrin.h
+
+ + Floating Point + Integer + MMX + Shift + + + Shift packed 32-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 1 + i := j*32 + IF imm8[7:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend(a[i+31:i] << imm8[7:0]) + FI +ENDFOR + + +
mmintrin.h
+
+ + Floating Point + Integer + MMX + Shift + + + Shift 64-bit integer "a" left by "count" while shifting in zeros, and store the result in "dst". + +IF count[63:0] > 63 + dst[63:0] := 0 +ELSE + dst[63:0] := ZeroExtend(a[63:0] << count[63:0]) +FI + + +
mmintrin.h
+
+ + Floating Point + Integer + MMX + Shift + + + Shift 64-bit integer "a" left by "imm8" while shifting in zeros, and store the result in "dst". + +IF imm8[7:0] > 63 + dst[63:0] := 0 +ELSE + dst[63:0] := ZeroExtend(a[63:0] << imm8[7:0]) +FI + + +
mmintrin.h
+
+ + Floating Point + Integer + MMX + Shift + + + Shift packed 16-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + IF count[63:0] > 15 + dst[i+15:i] := SignBit + ELSE + dst[i+15:i] := SignExtend(a[i+15:i] >> count[63:0]) + FI +ENDFOR + + +
mmintrin.h
+
+ + Floating Point + Integer + MMX + Shift + + + Shift packed 16-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + IF imm8[7:0] > 15 + dst[i+15:i] := SignBit + ELSE + dst[i+15:i] := SignExtend(a[i+15:i] >> imm8[7:0]) + FI +ENDFOR + + +
mmintrin.h
+
+ + Floating Point + Integer + MMX + Shift + + + Shift packed 32-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 1 + i := j*32 + IF count[63:0] > 31 + dst[i+31:i] := SignBit + ELSE + dst[i+31:i] := SignExtend(a[i+31:i] >> count[63:0]) + FI +ENDFOR + + +
mmintrin.h
+
+ + Floating Point + Integer + MMX + Shift + + + Shift packed 32-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 1 + i := j*32 + IF imm8[7:0] > 31 + dst[i+31:i] := SignBit + ELSE + dst[i+31:i] := SignExtend(a[i+31:i] >> imm8[7:0]) + FI +ENDFOR + + +
mmintrin.h
+
+ + Floating Point + Integer + MMX + Shift + + + Shift packed 16-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + IF count[63:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend(a[i+15:i] >> count[63:0]) + FI +ENDFOR + + +
mmintrin.h
+
+ + Floating Point + Integer + MMX + Shift + + + Shift packed 16-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + IF imm8[7:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend(a[i+15:i] >> imm8[7:0]) + FI +ENDFOR + + +
mmintrin.h
+
+ + Floating Point + Integer + MMX + Shift + + + Shift packed 32-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 1 + i := j*32 + IF count[63:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend(a[i+31:i] >> count[63:0]) + FI +ENDFOR + + +
mmintrin.h
+
+ + Floating Point + Integer + MMX + Shift + + + Shift packed 32-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 1 + i := j*32 + IF imm8[7:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend(a[i+31:i] >> imm8[7:0]) + FI +ENDFOR + + +
mmintrin.h
+
+ + Floating Point + Integer + MMX + Shift + + + Shift 64-bit integer "a" right by "count" while shifting in zeros, and store the result in "dst". + +IF count[63:0] > 63 + dst[63:0] := 0 +ELSE + dst[63:0] := ZeroExtend(a[63:0] >> count[63:0]) +FI + + +
mmintrin.h
+
+ + Floating Point + Integer + MMX + Shift + + + Shift 64-bit integer "a" right by "imm8" while shifting in zeros, and store the result in "dst". + +IF imm8[7:0] > 63 + dst[63:0] := 0 +ELSE + dst[63:0] := ZeroExtend(a[63:0] >> imm8[7:0]) +FI + + +
mmintrin.h
+
+ + Integer + MMX + Logical + + + Compute the bitwise AND of 64 bits (representing integer data) in "a" and "b", and store the result in "dst". + +dst[63:0] := (a[63:0] AND b[63:0]) + + +
mmintrin.h
+
+ + Integer + MMX + Logical + + + Compute the bitwise NOT of 64 bits (representing integer data) in "a" and then AND with "b", and store the result in "dst". + +dst[63:0] := ((NOT a[63:0]) AND b[63:0]) + + +
mmintrin.h
+
+ + Integer + MMX + Logical + + + Compute the bitwise OR of 64 bits (representing integer data) in "a" and "b", and store the result in "dst". + +dst[63:0] := (a[63:0] OR b[63:0]) + + +
mmintrin.h
+
+ + Integer + MMX + Logical + + + Compute the bitwise OR of 64 bits (representing integer data) in "a" and "b", and store the result in "dst". + +dst[63:0] := (a[63:0] XOR b[63:0]) + + +
mmintrin.h
+
+ + Integer + MMX + Compare + + + Compare packed 8-bit integers in "a" and "b" for equality, and store the results in "dst". + +FOR j := 0 to 7 + i := j*8 + dst[i+7:i] := ( a[i+7:i] == b[i+7:i] ) ? 0xFF : 0 +ENDFOR + + +
mmintrin.h
+
+ + Integer + MMX + Compare + + + Compare packed 16-bit integers in "a" and "b" for equality, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + dst[i+15:i] := ( a[i+15:i] == b[i+15:i] ) ? 0xFFFF : 0 +ENDFOR + + +
mmintrin.h
+
+ + Integer + MMX + Compare + + + Compare packed 32-bit integers in "a" and "b" for equality, and store the results in "dst". + +FOR j := 0 to 1 + i := j*32 + dst[i+31:i] := ( a[i+31:i] == b[i+31:i] ) ? 0xFFFFFFFF : 0 +ENDFOR + + +
mmintrin.h
+
+ + Integer + MMX + Compare + + + Compare packed 8-bit integers in "a" and "b" for greater-than, and store the results in "dst". + +FOR j := 0 to 7 + i := j*8 + dst[i+7:i] := ( a[i+7:i] > b[i+7:i] ) ? 0xFF : 0 +ENDFOR + + +
mmintrin.h
+
+ + Integer + MMX + Compare + + + Compare packed 16-bit integers in "a" and "b" for greater-than, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + dst[i+15:i] := ( a[i+15:i] > b[i+15:i] ) ? 0xFFFF : 0 +ENDFOR + + +
mmintrin.h
+
+ + Integer + MMX + Compare + + + Compare packed 32-bit integers in "a" and "b" for greater-than, and store the results in "dst". + +FOR j := 0 to 1 + i := j*32 + dst[i+31:i] := ( a[i+31:i] > b[i+31:i] ) ? 0xFFFFFFFF : 0 +ENDFOR + + +
mmintrin.h
+
+ + MMX + General Support + + Empty the MMX state, which marks the x87 FPU registers as available for use by x87 instructions. This instruction must be used at the end of all MMX technology procedures. + +
mmintrin.h
+
+ + Integer + MMX + Arithmetic + + + Add packed 8-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 7 + i := j*8 + dst[i+7:i] := a[i+7:i] + b[i+7:i] +ENDFOR + + +
mmintrin.h
+
+ + Integer + MMX + Arithmetic + + + Add packed 16-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + dst[i+15:i] := a[i+15:i] + b[i+15:i] +ENDFOR + + +
mmintrin.h
+
+ + Integer + MMX + Arithmetic + + + Add packed 32-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 1 + i := j*32 + dst[i+31:i] := a[i+31:i] + b[i+31:i] +ENDFOR + + +
mmintrin.h
+
+ + Integer + MMX + Arithmetic + + + Add packed 8-bit integers in "a" and "b" using saturation, and store the results in "dst". + +FOR j := 0 to 7 + i := j*8 + dst[i+7:i] := Saturate_To_Int8( a[i+7:i] + b[i+7:i] ) +ENDFOR + + +
mmintrin.h
+
+ + Integer + MMX + Arithmetic + + + Add packed 16-bit integers in "a" and "b" using saturation, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + dst[i+15:i] := Saturate_To_Int16( a[i+15:i] + b[i+15:i] ) +ENDFOR + + +
mmintrin.h
+
+ + Integer + MMX + Arithmetic + + + Add packed unsigned 8-bit integers in "a" and "b" using saturation, and store the results in "dst". + +FOR j := 0 to 7 + i := j*8 + dst[i+7:i] := Saturate_To_UnsignedInt8( a[i+7:i] + b[i+7:i] ) +ENDFOR + + +
mmintrin.h
+
+ + Integer + MMX + Arithmetic + + + Add packed unsigned 16-bit integers in "a" and "b" using saturation, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + dst[i+15:i] := Saturate_To_UnsignedInt16( a[i+15:i] + b[i+15:i] ) +ENDFOR + + +
mmintrin.h
+
+ + Integer + MMX + Arithmetic + + + Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*8 + dst[i+7:i] := a[i+7:i] - b[i+7:i] +ENDFOR + + +
mmintrin.h
+
+ + Integer + MMX + Arithmetic + + + Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + dst[i+15:i] := a[i+15:i] - b[i+15:i] +ENDFOR + + +
mmintrin.h
+
+ + Integer + MMX + Arithmetic + + + Subtract packed 32-bit integers in "b" from packed 32-bit integers in "a", and store the results in "dst". + +FOR j := 0 to 1 + i := j*32 + dst[i+31:i] := a[i+31:i] - b[i+31:i] +ENDFOR + + +
mmintrin.h
+
+ + Integer + MMX + Arithmetic + + + Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a" using saturation, and store the results in "dst". + +FOR j := 0 to 7 + i := j*8 + dst[i+7:i] := Saturate_To_Int8(a[i+7:i] - b[i+7:i]) +ENDFOR + + +
mmintrin.h
+
+ + Integer + MMX + Arithmetic + + + Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a" using saturation, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + dst[i+15:i] := Saturate_To_Int16(a[i+15:i] - b[i+15:i]) +ENDFOR + + +
mmintrin.h
+
+ + Integer + MMX + Arithmetic + + + Subtract packed unsigned 8-bit integers in "b" from packed unsigned 8-bit integers in "a" using saturation, and store the results in "dst". + +FOR j := 0 to 7 + i := j*8 + dst[i+7:i] := Saturate_To_UnsignedInt8(a[i+7:i] - b[i+7:i]) +ENDFOR + + +
mmintrin.h
+
+ + Integer + MMX + Arithmetic + + + Subtract packed unsigned 16-bit integers in "b" from packed unsigned 16-bit integers in "a" using saturation, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + dst[i+15:i] := Saturate_To_UnsignedInt16(a[i+15:i] - b[i+15:i]) +ENDFOR + + +
mmintrin.h
+
+ + Integer + MMX + Arithmetic + + + Multiply packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, and pack the results in "dst". + + +FOR j := 0 to 1 + i := j*32 + st[i+31:i] := a[i+31:i+16]*b[i+31:i+16] + a[i+15:i]*b[i+15:i] +ENDFOR + + +
mmintrin.h
+
+ + Integer + MMX + Arithmetic + + + Multiply the packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst". + +FOR j := 0 to 3 + i := j*16 + tmp[31:0] := a[i+15:i] * b[i+15:i] + dst[i+15:i] := tmp[31:16] +ENDFOR + + +
mmintrin.h
+
+ + Integer + MMX + Arithmetic + + + Multiply the packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the low 16 bits of the intermediate integers in "dst". + +FOR j := 0 to 3 + i := j*16 + tmp[31:0] := a[i+15:i] * b[i+15:i] + dst[i+15:i] := tmp[15:0] +ENDFOR + + +
mmintrin.h
+
+ + Integer + MMX + Shift + + + Shift packed 16-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + IF count[63:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend(a[i+15:i] << count[63:0]) + FI +ENDFOR + + +
mmintrin.h
+
+ + Integer + MMX + Shift + + + Shift packed 16-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + IF imm8[7:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend(a[i+15:i] << imm8[7:0]) + FI +ENDFOR + + +
mmintrin.h
+
+ + Integer + MMX + Shift + + + Shift packed 32-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 1 + i := j*32 + IF count[63:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend(a[i+31:i] << count[63:0]) + FI +ENDFOR + + +
mmintrin.h
+
+ + Integer + MMX + Shift + + + Shift packed 32-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 1 + i := j*32 + IF imm8[7:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend(a[i+31:i] << imm8[7:0]) + FI +ENDFOR + + +
mmintrin.h
+
+ + Integer + MMX + Shift + + + Shift 64-bit integer "a" left by "count" while shifting in zeros, and store the result in "dst". + +IF count[63:0] > 63 + dst[63:0] := 0 +ELSE + dst[63:0] := ZeroExtend(a[63:0] << count[63:0]) +FI + + +
mmintrin.h
+
+ + Integer + MMX + Shift + + + Shift 64-bit integer "a" left by "imm8" while shifting in zeros, and store the result in "dst". + +IF imm8[7:0] > 63 + dst[63:0] := 0 +ELSE + dst[63:0] := ZeroExtend(a[63:0] << imm8[7:0]) +FI + + +
mmintrin.h
+
+ + Integer + MMX + Shift + + + Shift packed 16-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + IF count[63:0] > 15 + dst[i+15:i] := SignBit + ELSE + dst[i+15:i] := SignExtend(a[i+15:i] >> count[63:0]) + FI +ENDFOR + + +
mmintrin.h
+
+ + Integer + MMX + Shift + + + Shift packed 16-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + IF imm8[7:0] > 15 + dst[i+15:i] := SignBit + ELSE + dst[i+15:i] := SignExtend(a[i+15:i] >> imm8[7:0]) + FI +ENDFOR + + +
mmintrin.h
+
+ + Integer + MMX + Shift + + + Shift packed 32-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 1 + i := j*32 + IF count[63:0] > 31 + dst[i+31:i] := SignBit + ELSE + dst[i+31:i] := SignExtend(a[i+31:i] >> count[63:0]) + FI +ENDFOR + + +
mmintrin.h
+
+ + Integer + MMX + Shift + + + Shift packed 32-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 1 + i := j*32 + IF imm8[7:0] > 31 + dst[i+31:i] := SignBit + ELSE + dst[i+31:i] := SignExtend(a[i+31:i] >> imm8[7:0]) + FI +ENDFOR + + +
mmintrin.h
+
+ + Integer + MMX + Shift + + + Shift packed 16-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + IF count[63:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend(a[i+15:i] >> count[63:0]) + FI +ENDFOR + + +
mmintrin.h
+
+ + Integer + MMX + Shift + + + Shift packed 16-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + IF imm8[7:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend(a[i+15:i] >> imm8[7:0]) + FI +ENDFOR + + +
mmintrin.h
+
+ + Integer + MMX + Shift + + + Shift packed 32-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 1 + i := j*32 + IF count[63:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend(a[i+31:i] >> count[63:0]) + FI +ENDFOR + + +
mmintrin.h
+
+ + Integer + MMX + Shift + + + Shift packed 32-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 1 + i := j*32 + IF imm8[7:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend(a[i+31:i] >> imm8[7:0]) + FI +ENDFOR + + +
mmintrin.h
+
+ + Integer + MMX + Shift + + + Shift 64-bit integer "a" right by "count" while shifting in zeros, and store the result in "dst". + +IF count[63:0] > 63 + dst[63:0] := 0 +ELSE + dst[63:0] := ZeroExtend(a[63:0] >> count[63:0]) +FI + + +
mmintrin.h
+
+ + Integer + MMX + Shift + + + Shift 64-bit integer "a" right by "imm8" while shifting in zeros, and store the result in "dst". + +IF imm8[7:0] > 63 + dst[63:0] := 0 +ELSE + dst[63:0] := ZeroExtend(a[63:0] >> imm8[7:0]) +FI + + +
mmintrin.h
+
+ + Integer + MMX + Logical + + + Compute the bitwise AND of 64 bits (representing integer data) in "a" and "b", and store the result in "dst". + +dst[63:0] := (a[63:0] AND b[63:0]) + + +
mmintrin.h
+
+ + Integer + MMX + Logical + + + Compute the bitwise NOT of 64 bits (representing integer data) in "a" and then AND with "b", and store the result in "dst". + +dst[63:0] := ((NOT a[63:0]) AND b[63:0]) + + +
mmintrin.h
+
+ + Integer + MMX + Logical + + + Compute the bitwise OR of 64 bits (representing integer data) in "a" and "b", and store the result in "dst". + +dst[63:0] := (a[63:0] OR b[63:0]) + + +
mmintrin.h
+
+ + Integer + MMX + Logical + + + Compute the bitwise XOR of 64 bits (representing integer data) in "a" and "b", and store the result in "dst". + +dst[63:0] := (a[63:0] XOR b[63:0]) + + +
mmintrin.h
+
+ + Integer + MMX + Compare + + + Compare packed 8-bit integers in "a" and "b" for equality, and store the results in "dst". + +FOR j := 0 to 7 + i := j*8 + dst[i+7:i] := ( a[i+7:i] == b[i+7:i] ) ? 0xFF : 0 +ENDFOR + + +
mmintrin.h
+
+ + Integer + MMX + Compare + + + Compare packed 16-bit integers in "a" and "b" for equality, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + dst[i+15:i] := ( a[i+15:i] == b[i+15:i] ) ? 0xFFFF : 0 +ENDFOR + + +
mmintrin.h
+
+ + Integer + MMX + Compare + + + Compare packed 32-bit integers in "a" and "b" for equality, and store the results in "dst". + +FOR j := 0 to 1 + i := j*32 + dst[i+31:i] := ( a[i+31:i] == b[i+31:i] ) ? 0xFFFFFFFF : 0 +ENDFOR + + +
mmintrin.h
+
+ + Integer + MMX + Compare + + + Compare packed 8-bit integers in "a" and "b" for greater-than, and store the results in "dst". + +FOR j := 0 to 7 + i := j*8 + dst[i+7:i] := ( a[i+7:i] > b[i+7:i] ) ? 0xFF : 0 +ENDFOR + + +
mmintrin.h
+
+ + Integer + MMX + Compare + + + Compare packed 16-bit integers in "a" and "b" for greater-than, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + dst[i+15:i] := ( a[i+15:i] > b[i+15:i] ) ? 0xFFFF : 0 +ENDFOR + + +
mmintrin.h
+
+ + Integer + MMX + Compare + + + Compare packed 32-bit integers in "a" and "b" for greater-than, and store the results in "dst". + +FOR j := 0 to 1 + i := j*32 + dst[i+31:i] := ( a[i+31:i] > b[i+31:i] ) ? 0xFFFFFFFF : 0 +ENDFOR + + +
mmintrin.h
+
+ + Integer + MMX + Convert + + Copy 32-bit integer "a" to the lower elements of "dst", and zero the upper element of "dst". + +dst[31:0] := a[31:0] +dst[63:32] := 0 + + +
mmintrin.h
+
+ + Integer + MMX + Convert + + Copy the lower 32-bit integer in "a" to "dst". + +dst[31:0] := a[31:0] + + +
mmintrin.h
+
+ + Integer + MMX + Convert + + Copy 64-bit integer "a" to "dst". + +dst[63:0] := a[63:0] + + +
mmintrin.h
+
+ + Integer + MMX + Convert + + Copy 64-bit integer "a" to "dst". + +dst[63:0] := a[63:0] + + +
mmintrin.h
+
+ + Integer + MMX + Set + + Return vector of type __m64 with all elements set to zero. + +dst[MAX:0] := 0 + + +
mmintrin.h
+
+ + Integer + MMX + Set + + + Set packed 32-bit integers in "dst" with the supplied values. + +dst[31:0] := e0 +dst[63:32] := e1 + +
mmintrin.h
+
+ + Integer + MMX + Set + + + + + Set packed 16-bit integers in "dst" with the supplied values. + +dst[15:0] := e0 +dst[31:16] := e1 +dst[47:32] := e2 +dst[63:48] := e3 + +
mmintrin.h
+
+ + Integer + MMX + Set + + + + + + + + + Set packed 8-bit integers in "dst" with the supplied values in reverse order. + +dst[7:0] := e0 +dst[15:8] := e1 +dst[23:16] := e2 +dst[31:24] := e3 +dst[39:32] := e4 +dst[47:40] := e5 +dst[55:48] := e6 +dst[63:56] := e7 + +
mmintrin.h
+
+ + Integer + MMX + Set + + Broadcast 32-bit integer "a" to all elements of "dst". + +FOR j := 0 to 1 + i := j*32 + dst[i+31:i] := a[31:0] +ENDFOR + +
mmintrin.h
+
+ + Integer + MMX + Set + + Broadcast 16-bit integer "a" to all all elements of "dst". + +FOR j := 0 to 3 + i := j*16 + dst[i+15:i] := a[15:0] +ENDFOR + +
mmintrin.h
+
+ + Integer + MMX + Set + + Broadcast 8-bit integer "a" to all elements of "dst". + +FOR j := 0 to 7 + i := j*8 + dst[i+7:i] := a[7:0] +ENDFOR + +
mmintrin.h
+
+ + Integer + MMX + Set + + + Set packed 32-bit integers in "dst" with the supplied values in reverse order. + +dst[31:0] := e1 +dst[63:32] := e0 + +
mmintrin.h
+
+ + Integer + MMX + Set + + + + + Set packed 16-bit integers in "dst" with the supplied values in reverse order. + +dst[15:0] := e3 +dst[31:16] := e2 +dst[47:32] := e1 +dst[63:48] := e0 + +
mmintrin.h
+
+ + Integer + MMX + Set + + + + + + + + + Set packed 8-bit integers in "dst" with the supplied values in reverse order. + +dst[7:0] := e7 +dst[15:8] := e6 +dst[23:16] := e5 +dst[31:24] := e4 +dst[39:32] := e3 +dst[47:40] := e2 +dst[55:48] := e1 +dst[63:56] := e0 + +
mmintrin.h
+
+ + Integer + MMX + Miscellaneous + + + Convert packed 16-bit integers from "a" and "b" to packed 8-bit integers using signed saturation, and store the results in "dst". + + +dst[7:0] := Saturate_Int16_To_Int8 (a[15:0]) +dst[15:8] := Saturate_Int16_To_Int8 (a[31:16]) +dst[23:16] := Saturate_Int16_To_Int8 (a[47:32]) +dst[31:24] := Saturate_Int16_To_Int8 (a[63:48]) +dst[39:32] := Saturate_Int16_To_Int8 (b[15:0]) +dst[47:40] := Saturate_Int16_To_Int8 (b[31:16]) +dst[55:48] := Saturate_Int16_To_Int8 (b[47:32]) +dst[63:56] := Saturate_Int16_To_Int8 (b[63:48]) + + +
mmintrin.h
+
+ + Integer + MMX + Miscellaneous + + + Convert packed 32-bit integers from "a" and "b" to packed 16-bit integers using signed saturation, and store the results in "dst". + +dst[15:0] := Saturate_Int32_To_Int16 (a[31:0]) +dst[31:16] := Saturate_Int32_To_Int16 (a[63:32]) +dst[47:32] := Saturate_Int32_To_Int16 (b[31:0]) +dst[63:48] := Saturate_Int32_To_Int16 (b[63:32]) + + +
mmintrin.h
+
+ + Integer + MMX + Miscellaneous + + + Convert packed 16-bit integers from "a" and "b" to packed 8-bit integers using unsigned saturation, and store the results in "dst". + +dst[7:0] := Saturate_Int16_To_UnsignedInt8 (a[15:0]) +dst[15:8] := Saturate_Int16_To_UnsignedInt8 (a[31:16]) +dst[23:16] := Saturate_Int16_To_UnsignedInt8 (a[47:32]) +dst[31:24] := Saturate_Int16_To_UnsignedInt8 (a[63:48]) +dst[39:32] := Saturate_Int16_To_UnsignedInt8 (b[15:0]) +dst[47:40] := Saturate_Int16_To_UnsignedInt8 (b[31:16]) +dst[55:48] := Saturate_Int16_To_UnsignedInt8 (b[47:32]) +dst[63:56] := Saturate_Int16_To_UnsignedInt8 (b[63:48]) + + +
mmintrin.h
+
+ + Integer + MMX + Swizzle + + + Unpack and interleave 8-bit integers from the high half of "a" and "b", and store the results in "dst". + +INTERLEAVE_HIGH_BYTES(src1[63:0], src2[63:0]){ + dst[7:0] := src1[39:32] + dst[15:8] := src2[39:32] + dst[23:16] := src1[47:40] + dst[31:24] := src2[47:40] + dst[39:32] := src1[55:48] + dst[47:40] := src2[55:48] + dst[55:48] := src1[63:56] + dst[63:56] := src2[63:56] + RETURN dst[63:0] +} + +dst[63:0] := INTERLEAVE_HIGH_BYTES(a[63:0], b[63:0]) + + +
mmintrin.h
+
+ + Integer + MMX + Swizzle + + + Unpack and interleave 16-bit integers from the high half of "a" and "b", and store the results in "dst". + +INTERLEAVE_HIGH_WORDS(src1[63:0], src2[63:0]){ + dst[15:0] := src1[47:32] + dst[31:16] := src2[47:32] + dst[47:32] := src1[63:48] + dst[63:48] := src2[63:48] + RETURN dst[63:0] +} + +dst[63:0] := INTERLEAVE_HIGH_WORDS(a[63:0], b[63:0]) + + +
mmintrin.h
+
+ + Integer + MMX + Swizzle + + + Unpack and interleave 32-bit integers from the high half of "a" and "b", and store the results in "dst". + +dst[31:0] := a[63:32] +dst[63:32] := b[63:32] + + +
mmintrin.h
+
+ + Integer + MMX + Swizzle + + + Unpack and interleave 8-bit integers from the low half of "a" and "b", and store the results in "dst". + +INTERLEAVE_BYTES(src1[63:0], src2[63:0]){ + dst[7:0] := src1[7:0] + dst[15:8] := src2[7:0] + dst[23:16] := src1[15:8] + dst[31:24] := src2[15:8] + dst[39:32] := src1[23:16] + dst[47:40] := src2[23:16] + dst[55:48] := src1[31:24] + dst[63:56] := src2[31:24] + RETURN dst[63:0] +} + +dst[63:0] := INTERLEAVE_BYTES(a[63:0], b[63:0]) + + +
mmintrin.h
+
+ + Integer + MMX + Swizzle + + + Unpack and interleave 16-bit integers from the low half of "a" and "b", and store the results in "dst". + +INTERLEAVE_WORDS(src1[63:0], src2[63:0]){ + dst[15:0] := src1[15:0] + dst[31:16] := src2[15:0] + dst[47:32] := src1[31:16] + dst[63:48] := src2[31:16] + RETURN dst[63:0] +} + +dst[63:0] := INTERLEAVE_WORDS(a[63:0], b[63:0]) + + +
mmintrin.h
+
+ + Integer + MMX + Swizzle + + + Unpack and interleave 32-bit integers from the low half of "a" and "b", and store the results in "dst". + +dst[31:0] := a[31:0] +dst[63:32] := b[31:0] + + +
mmintrin.h
+
+ + + SSE + Swizzle + + + + + Macro: Transpose the 4x4 matrix formed by the 4 rows of single-precision (32-bit) floating-point elements in "row0", "row1", "row2", and "row3", and store the transposed matrix in these vectors ("row0" now contains column 0, etc.). + +__m128 tmp3, tmp2, tmp1, tmp0; +tmp0 = _mm_unpacklo_ps(row0, row1); +tmp2 = _mm_unpacklo_ps(row2, row3); +tmp1 = _mm_unpackhi_ps(row0, row1); +tmp3 = _mm_unpackhi_ps(row2, row3); +row0 = _mm_movelh_ps(tmp0, tmp2); +row1 = _mm_movehl_ps(tmp2, tmp0); +row2 = _mm_movelh_ps(tmp1, tmp3); +row3 = _mm_movehl_ps(tmp3, tmp1); + +
xmmintrin.h
+
+ + SSE + General Support + + Get the unsigned 32-bit value of the MXCSR control and status register. + +dst[31:0] := MXCSR + + +
xmmintrin.h
+
+ + SSE + General Support + + Set the MXCSR control and status register with the value in unsigned 32-bit integer "a". + +MXCSR := a[31:0] + + +
xmmintrin.h
+
+ + SSE + General Support + Macro: Get the exception state bits from the MXCSR control and status register. The exception state may contain any of the following flags: _MM_EXCEPT_INVALID, _MM_EXCEPT_DIV_ZERO, _MM_EXCEPT_DENORM, _MM_EXCEPT_OVERFLOW, _MM_EXCEPT_UNDERFLOW, _MM_EXCEPT_INEXACT + +dst[31:0] := MXCSR & _MM_EXCEPT_MASK + +
xmmintrin.h
+
+ + SSE + General Support + + Macro: Set the exception state bits of the MXCSR control and status register to the value in unsigned 32-bit integer "a". The exception state may contain any of the following flags: _MM_EXCEPT_INVALID, _MM_EXCEPT_DIV_ZERO, _MM_EXCEPT_DENORM, _MM_EXCEPT_OVERFLOW, _MM_EXCEPT_UNDERFLOW, _MM_EXCEPT_INEXACT + +MXCSR := a[31:0] AND ~_MM_EXCEPT_MASK + +
xmmintrin.h
+
+ + SSE + General Support + Macro: Get the exception mask bits from the MXCSR control and status register. The exception mask may contain any of the following flags: _MM_MASK_INVALID, _MM_MASK_DIV_ZERO, _MM_MASK_DENORM, _MM_MASK_OVERFLOW, _MM_MASK_UNDERFLOW, _MM_MASK_INEXACT + +dst[31:0] := MXCSR & _MM_MASK_MASK + +
xmmintrin.h
+
+ + SSE + General Support + + Macro: Set the exception mask bits of the MXCSR control and status register to the value in unsigned 32-bit integer "a". The exception mask may contain any of the following flags: _MM_MASK_INVALID, _MM_MASK_DIV_ZERO, _MM_MASK_DENORM, _MM_MASK_OVERFLOW, _MM_MASK_UNDERFLOW, _MM_MASK_INEXACT + +MXCSR := a[31:0] AND ~_MM_MASK_MASK + +
xmmintrin.h
+
+ + SSE + General Support + Macro: Get the rounding mode bits from the MXCSR control and status register. The rounding mode may contain any of the following flags: _MM_ROUND_NEAREST, _MM_ROUND_DOWN, _MM_ROUND_UP, _MM_ROUND_TOWARD_ZERO + +dst[31:0] := MXCSR & _MM_ROUND_MASK + +
xmmintrin.h
+
+ + SSE + General Support + + Macro: Set the rounding mode bits of the MXCSR control and status register to the value in unsigned 32-bit integer "a". The rounding mode may contain any of the following flags: _MM_ROUND_NEAREST, _MM_ROUND_DOWN, _MM_ROUND_UP, _MM_ROUND_TOWARD_ZERO + +MXCSR := a[31:0] AND ~_MM_ROUND_MASK + +
xmmintrin.h
+
+ + SSE + General Support + Macro: Get the flush zero bits from the MXCSR control and status register. The flush zero may contain any of the following flags: _MM_FLUSH_ZERO_ON or _MM_FLUSH_ZERO_OFF + +dst[31:0] := MXCSR & _MM_FLUSH_MASK + +
xmmintrin.h
+
+ + SSE + General Support + + Macro: Set the flush zero bits of the MXCSR control and status register to the value in unsigned 32-bit integer "a". The flush zero may contain any of the following flags: _MM_FLUSH_ZERO_ON or _MM_FLUSH_ZERO_OFF + +MXCSR := a[31:0] AND ~_MM_FLUSH_MASK + +
xmmintrin.h
+
+ + SSE + General Support + + + Fetch the line of data from memory that contains address "p" to a location in the cache heirarchy specified by the locality hint "i". + + + + +
xmmintrin.h
+
+ + KNCNI + General Support + + + Fetch the line of data from memory that contains address "p" to a location in the cache heirarchy specified by the locality hint "i". + + + + + + + + +
xmmintrin.h
+
+ + PREFETCHWT1 + General Support + + + Fetch the line of data from memory that contains address "p" to a location in the cache heirarchy specified by the locality hint "i". + +
xmmintrin.h
+
+ + SSE + General Support + + Perform a serializing operation on all store-to-memory instructions that were issued prior to this instruction. Guarantees that every store instruction that precedes, in program order, is globally visible before any store instruction which follows the fence in program order. + +
xmmintrin.h
+
+ + Integer + SSE + Special Math Functions + + + Compare packed 16-bit integers in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 3 + i := j*16 + IF a[i+15:i] > b[i+15:i] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := b[i+15:i] + FI +ENDFOR + + +
xmmintrin.h
+
+ + Integer + SSE + Special Math Functions + + + Compare packed 16-bit integers in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 3 + i := j*16 + IF a[i+15:i] > b[i+15:i] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := b[i+15:i] + FI +ENDFOR + + +
xmmintrin.h
+
+ + Integer + SSE + Special Math Functions + + + Compare packed unsigned 8-bit integers in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 7 + i := j*8 + IF a[i+7:i] > b[i+7:i] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := b[i+7:i] + FI +ENDFOR + + +
xmmintrin.h
+
+ + Integer + SSE + Special Math Functions + + + Compare packed unsigned 8-bit integers in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 7 + i := j*8 + IF a[i+7:i] > b[i+7:i] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := b[i+7:i] + FI +ENDFOR + + +
xmmintrin.h
+
+ + Integer + SSE + Special Math Functions + + + Compare packed 16-bit integers in "a" and "b", and store packed minimum values in "dst". + +FOR j := 0 to 3 + i := j*16 + IF a[i+15:i] < b[i+15:i] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := b[i+15:i] + FI +ENDFOR + + +
xmmintrin.h
+
+ + Integer + SSE + Special Math Functions + + + Compare packed 16-bit integers in "a" and "b", and store packed minimum values in "dst". + +FOR j := 0 to 3 + i := j*16 + IF a[i+15:i] < b[i+15:i] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := b[i+15:i] + FI +ENDFOR + + +
xmmintrin.h
+
+ + Integer + SSE + Special Math Functions + + + Compare packed unsigned 8-bit integers in "a" and "b", and store packed minimum values in "dst". + +FOR j := 0 to 7 + i := j*8 + IF a[i+7:i] < b[i+7:i] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := b[i+7:i] + FI +ENDFOR + + +
xmmintrin.h
+
+ + Integer + SSE + Special Math Functions + + + Compare packed unsigned 8-bit integers in "a" and "b", and store packed minimum values in "dst". + +FOR j := 0 to 7 + i := j*8 + IF a[i+7:i] < b[i+7:i] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := b[i+7:i] + FI +ENDFOR + + +
xmmintrin.h
+
+ + Integer + SSE + Arithmetic + + + Multiply the packed unsigned 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst". + +FOR j := 0 to 3 + i := j*16 + tmp[31:0] := a[i+15:i] * b[i+15:i] + dst[i+15:i] := tmp[31:16] +ENDFOR + + +
xmmintrin.h
+
+ + Integer + SSE + Arithmetic + + + Multiply the packed unsigned 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst". + +FOR j := 0 to 3 + i := j*16 + tmp[31:0] := a[i+15:i] * b[i+15:i] + dst[i+15:i] := tmp[31:16] +ENDFOR + + +
xmmintrin.h
+
+ + Integer + SSE + Probability/Statistics + + + Average packed unsigned 8-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 7 + i := j*8 + dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) >> 1 +ENDFOR + + +
xmmintrin.h
+
+ + Integer + SSE + Probability/Statistics + + + Average packed unsigned 8-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 7 + i := j*8 + dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) >> 1 +ENDFOR + + +
xmmintrin.h
+
+ + Integer + SSE + Probability/Statistics + + + Average packed unsigned 16-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) >> 1 +ENDFOR + + +
xmmintrin.h
+
+ + Integer + SSE + Probability/Statistics + + + Average packed unsigned 16-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) >> 1 +ENDFOR + + +
xmmintrin.h
+
+ + Integer + SSE + Arithmetic + Miscellaneous + + + Compute the absolute differences of packed unsigned 8-bit integers in "a" and "b", then horizontally sum each consecutive 8 differences to produce four unsigned 16-bit integers, and pack these unsigned 16-bit integers in the low 16 bits of "dst". + +FOR j := 0 to 7 + i := j*8 + tmp[i+7:i] := ABS(a[i+7:i] - b[i+7:i]) +ENDFOR + +dst[15:0] := tmp[7:0] + tmp[15:8] + tmp[23:16] + tmp[31:24] + tmp[39:32] + tmp[47:40] + tmp[55:48] + tmp[63:56] +dst[63:16] := 0 + + +
xmmintrin.h
+
+ + Floating Point + Integer + SSE + Arithmetic + Miscellaneous + + + Compute the absolute differences of packed unsigned 8-bit integers in "a" and "b", then horizontally sum each consecutive 8 differences to produce four unsigned 16-bit integers, and pack these unsigned 16-bit integers in the low 16 bits of "dst". + +FOR j := 0 to 7 + i := j*8 + tmp[i+7:i] := ABS(a[i+7:i] - b[i+7:i]) +ENDFOR + +dst[15:0] := tmp[7:0] + tmp[15:8] + tmp[23:16] + tmp[31:24] + tmp[39:32] + tmp[47:40] + tmp[55:48] + tmp[63:56] +dst[63:16] := 0 + + +
xmmintrin.h
+
+ + Floating Point + SSE + Convert + + + Convert the 32-bit integer "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := Convert_Int32_To_FP32(b[31:0]) +dst[127:32] := a[127:32] + + +
xmmintrin.h
+
+ + Integer + SSE + Convert + + + Convert the 32-bit integer "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := Convert_Int32_To_FP32(b[31:0]) +dst[127:32] := a[127:32] + + +
xmmintrin.h
+
+ + Floating Point + Integer + SSE + Convert + + + Convert the 64-bit integer "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := Convert_Int64_To_FP32(b[63:0]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
xmmintrin.h
+
+ + Floating Point + SSE + Convert + + + Convert packed 32-bit integers in "b" to packed single-precision (32-bit) floating-point elements, store the results in the lower 2 elements of "dst", and copy the upper 2 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := Convert_Int32_To_FP32(b[31:0]) +dst[63:32] := Convert_Int32_To_FP32(b[63:32]) +dst[95:64] := a[95:64] +dst[127:96] := a[127:96] + + +
xmmintrin.h
+
+ + Integer + SSE + Convert + + + Convert packed 32-bit integers in "b" to packed single-precision (32-bit) floating-point elements, store the results in the lower 2 elements of "dst", and copy the upper 2 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := Convert_Int32_To_FP32(b[31:0]) +dst[63:32] := Convert_Int32_To_FP32(b[63:32]) +dst[95:64] := a[95:64] +dst[127:96] := a[127:96] + + +
xmmintrin.h
+
+ + Floating Point + SSE + Convert + + Convert packed 16-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + m := j*32 + dst[m+31:m] := Convert_Int16_To_FP32(a[i+15:i]) +ENDFOR + +
xmmintrin.h
+
+ + Floating Point + Integer + SSE + Convert + + Convert packed unsigned 16-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + m := j*32 + dst[m+31:m] := Convert_UnsignedInt16_To_FP32(a[i+15:i]) +ENDFOR + +
xmmintrin.h
+
+ + Floating Point + SSE + Convert + + Convert the lower packed 8-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 3 + i := j*8 + m := j*32 + dst[m+31:m] := Convert_Int8_To_FP32(a[i+7:i]) +ENDFOR + +
xmmintrin.h
+
+ + Floating Point + Integer + SSE + Convert + + Convert the lower packed unsigned 8-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 3 + i := j*8 + m := j*32 + dst[m+31:m] := Convert_UnsignedInt8_To_FP32(a[i+7:i]) +ENDFOR + +
xmmintrin.h
+
+ + Floating Point + SSE + Convert + + + Convert packed 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, store the results in the lower 2 elements of "dst", then covert the packed 32-bit integers in "a" to single-precision (32-bit) floating-point element, and store the results in the upper 2 elements of "dst". + +dst[31:0] := Convert_Int32_To_FP32(a[31:0]) +dst[63:32] := Convert_Int32_To_FP32(a[63:32]) +dst[95:64] := Convert_Int32_To_FP32(b[31:0]) +dst[127:96] := Convert_Int32_To_FP32(b[63:32]) + +
xmmintrin.h
+
+ + Integer + SSE + Store + + + Store 64-bits of integer data from "a" into memory using a non-temporal memory hint. + +MEM[mem_addr+63:mem_addr] := a[63:0] + + +
xmmintrin.h
+
+ + Integer + SSE + Store + + + + Conditionally store 8-bit integer elements from "a" into memory using "mask" (elements are not stored when the highest bit is not set in the corresponding element) and a non-temporal memory hint. + +FOR j := 0 to 7 + i := j*8 + IF mask[i+7] + MEM[mem_addr+i+7:mem_addr+i] := a[i+7:i] + FI +ENDFOR + + +
xmmintrin.h
+
+ + Integer + SSE + Store + + + + Conditionally store 8-bit integer elements from "a" into memory using "mask" (elements are not stored when the highest bit is not set in the corresponding element). + +FOR j := 0 to 7 + i := j*8 + IF mask[i+7] + MEM[mem_addr+i+7:mem_addr+i] := a[i+7:i] + FI +ENDFOR + + +
xmmintrin.h
+
+ + Integer + SSE + Swizzle + + + Extract a 16-bit integer from "a", selected with "imm8", and store the result in the lower element of "dst". + +dst[15:0] := (a[63:0] >> (imm8[1:0] * 16))[15:0] +dst[31:16] := 0 + + +
xmmintrin.h
+
+ + Integer + SSE + Swizzle + + + Extract a 16-bit integer from "a", selected with "imm8", and store the result in the lower element of "dst". + +dst[15:0] := (a[63:0] >> (imm8[1:0] * 16))[15:0] +dst[31:16] := 0 + + +
xmmintrin.h
+
+ + Integer + SSE + Swizzle + + + + Copy "a" to "dst", and insert the 16-bit integer "i" into "dst" at the location specified by "imm8". + +dst[63:0] := a[63:0] +sel := imm8[1:0]*16 +dst[sel+15:sel] := i[15:0] + + +
xmmintrin.h
+
+ + Integer + SSE + Swizzle + + + + Copy "a" to "dst", and insert the 16-bit integer "i" into "dst" at the location specified by "imm8". + +dst[63:0] := a[63:0] +sel := imm8[1:0]*16 +dst[sel+15:sel] := i[15:0] + + +
xmmintrin.h
+
+ + Integer + SSE + Miscellaneous + + Create mask from the most significant bit of each 8-bit element in "a", and store the result in "dst". + +FOR j := 0 to 7 + i := j*8 + dst[j] := a[i+7] +ENDFOR +dst[MAX:8] := 0 + + +
xmmintrin.h
+
+ + Integer + SSE + Miscellaneous + + Create mask from the most significant bit of each 8-bit element in "a", and store the result in "dst". + +FOR j := 0 to 7 + i := j*8 + dst[j] := a[i+7] +ENDFOR +dst[MAX:8] := 0 + + +
xmmintrin.h
+
+ + Integer + SSE + Swizzle + + + Shuffle 16-bit integers in "a" using the control in "imm8", and store the results in "dst". + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[15:0] := src[15:0] + 1: tmp[15:0] := src[31:16] + 2: tmp[15:0] := src[47:32] + 3: tmp[15:0] := src[63:48] + ESAC + RETURN tmp[15:0] +} + +dst[15:0] := SELECT4(a[63:0], imm8[1:0]) +dst[31:16] := SELECT4(a[63:0], imm8[3:2]) +dst[47:32] := SELECT4(a[63:0], imm8[5:4]) +dst[63:48] := SELECT4(a[63:0], imm8[7:6]) + + +
xmmintrin.h
+
+ + Floating Point + Integer + SSE + Swizzle + + + Shuffle 16-bit integers in "a" using the control in "imm8", and store the results in "dst". + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[15:0] := src[15:0] + 1: tmp[15:0] := src[31:16] + 2: tmp[15:0] := src[47:32] + 3: tmp[15:0] := src[63:48] + ESAC + RETURN tmp[15:0] +} + +dst[15:0] := SELECT4(a[63:0], imm8[1:0]) +dst[31:16] := SELECT4(a[63:0], imm8[3:2]) +dst[47:32] := SELECT4(a[63:0], imm8[5:4]) +dst[63:48] := SELECT4(a[63:0], imm8[7:6]) + + +
xmmintrin.h
+
+ + Floating Point + SSE + Arithmetic + + + Add the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := a[31:0] + b[31:0] +dst[127:32] := a[127:32] + + +
xmmintrin.h
+
+ + Floating Point + SSE + Arithmetic + + + Add packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := a[i+31:i] + b[i+31:i] +ENDFOR + + +
xmmintrin.h
+
+ + Floating Point + SSE + Arithmetic + + + Subtract the lower single-precision (32-bit) floating-point element in "b" from the lower single-precision (32-bit) floating-point element in "a", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := a[31:0] - b[31:0] +dst[127:32] := a[127:32] + + +
xmmintrin.h
+
+ + Floating Point + SSE + Arithmetic + + + Subtract packed single-precision (32-bit) floating-point elements in "b" from packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := a[i+31:i] - b[i+31:i] +ENDFOR + + +
xmmintrin.h
+
+ + Floating Point + SSE + Arithmetic + + + Multiply the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := a[31:0] * b[31:0] +dst[127:32] := a[127:32] + + +
xmmintrin.h
+
+ + Floating Point + SSE + Arithmetic + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst". + + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := a[i+31:i] * b[i+31:i] +ENDFOR + + +
xmmintrin.h
+
+ + Floating Point + SSE + Arithmetic + + + Divide the lower single-precision (32-bit) floating-point element in "a" by the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := a[31:0] / b[31:0] +dst[127:32] := a[127:32] + + +
xmmintrin.h
+
+ + Floating Point + SSE + Arithmetic + + + Divide packed single-precision (32-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst". + +FOR j := 0 to 3 + i := 32*j + dst[i+31:i] := a[i+31:i] / b[i+31:i] +ENDFOR + + +
xmmintrin.h
+
+ + Floating Point + SSE + Elementary Math Functions + + Compute the square root of the lower single-precision (32-bit) floating-point element in "a", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := SQRT(a[31:0]) +dst[127:32] := a[127:32] + + +
xmmintrin.h
+
+ + Floating Point + SSE + Elementary Math Functions + + Compute the square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := SQRT(a[i+31:i]) +ENDFOR + + +
xmmintrin.h
+
+ + Floating Point + SSE + Elementary Math Functions + + Compute the approximate reciprocal of the lower single-precision (32-bit) floating-point element in "a", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 1.5*2^-12. + +dst[31:0] := APPROXIMATE(1.0/a[31:0]) +dst[127:32] := a[127:32] + + +
xmmintrin.h
+
+ + Floating Point + SSE + Elementary Math Functions + + Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 1.5*2^-12. + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := APPROXIMATE(1.0/a[i+31:i]) +ENDFOR + + +
xmmintrin.h
+
+ + Floating Point + SSE + Elementary Math Functions + + Compute the approximate reciprocal square root of the lower single-precision (32-bit) floating-point element in "a", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 1.5*2^-12. + +dst[31:0] := APPROXIMATE(1.0 / SQRT(a[31:0])) +dst[127:32] := a[127:32] + + +
xmmintrin.h
+
+ + Floating Point + SSE + Elementary Math Functions + + Compute the approximate reciprocal square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 1.5*2^-12. + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := APPROXIMATE(1.0 / SQRT(a[i+31:i])) +ENDFOR + + +
xmmintrin.h
+
+ + Floating Point + SSE + Special Math Functions + + + Compare the lower single-precision (32-bit) floating-point elements in "a" and "b", store the minimum value in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[31:0] := MIN(a[31:0], b[31:0]) +dst[127:32] := a[127:32] + + +
xmmintrin.h
+
+ + Floating Point + SSE + Special Math Functions + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst". + + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) +ENDFOR + + +
xmmintrin.h
+
+ + Floating Point + SSE + Special Math Functions + + + Compare the lower single-precision (32-bit) floating-point elements in "a" and "b", store the maximum value in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[31:0] := MAX(a[31:0], b[31:0]) +dst[127:32] := a[127:32] + + +
xmmintrin.h
+
+ + Floating Point + SSE + Special Math Functions + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) +ENDFOR + + +
xmmintrin.h
+
+ + Floating Point + SSE + Logical + + + Compute the bitwise AND of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := (a[i+31:i] AND b[i+31:i]) +ENDFOR + + +
xmmintrin.h
+
+ + Floating Point + SSE + Logical + + + Compute the bitwise NOT of packed single-precision (32-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ((NOT a[i+31:i]) AND b[i+31:i]) +ENDFOR + + +
xmmintrin.h
+
+ + Floating Point + SSE + Logical + + + Compute the bitwise OR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst". + + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := a[i+31:i] BITWISE OR b[i+31:i] +ENDFOR + + +
xmmintrin.h
+
+ + Floating Point + SSE + Logical + + + Compute the bitwise XOR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst". + + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := a[i+31:i] XOR b[i+31:i] +ENDFOR + + +
xmmintrin.h
+
+ + Floating Point + SSE + Compare + + + Compare the lower single-precision (32-bit) floating-point elements in "a" and "b" for equality, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := ( a[31:0] == b[31:0] ) ? 0xffffffff : 0 +dst[127:32] := a[127:32] + + +
xmmintrin.h
+
+ + Floating Point + SSE + Compare + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for equality, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ( a[i+31:i] == b[i+31:i] ) ? 0xffffffff : 0 +ENDFOR + + +
xmmintrin.h
+
+ + Floating Point + SSE + Compare + + + Compare the lower single-precision (32-bit) floating-point elements in "a" and "b" for less-than, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := ( a[31:0] < b[31:0] ) ? 0xffffffff : 0 +dst[127:32] := a[127:32] + + +
xmmintrin.h
+
+ + Floating Point + SSE + Compare + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for less-than, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ( a[i+31:i] < b[i+31:i] ) ? 0xffffffff : 0 +ENDFOR + + +
xmmintrin.h
+
+ + Floating Point + SSE + Compare + + + Compare the lower single-precision (32-bit) floating-point elements in "a" and "b" for less-than-or-equal, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := ( a[31:0] <= b[31:0] ) ? 0xffffffff : 0 +dst[127:32] := a[127:32] + + +
xmmintrin.h
+
+ + Floating Point + SSE + Compare + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for less-than-or-equal, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ( a[i+31:i] <= b[i+31:i] ) ? 0xffffffff : 0 +ENDFOR + + +
xmmintrin.h
+
+ + Floating Point + SSE + Compare + + + Compare the lower single-precision (32-bit) floating-point elements in "a" and "b" for greater-than, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := ( a[31:0] > b[31:0] ) ? 0xffffffff : 0 +dst[127:32] := a[127:32] + + +
xmmintrin.h
+
+ + Floating Point + SSE + Compare + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for greater-than, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ( a[i+31:i] > b[i+31:i] ) ? 0xffffffff : 0 +ENDFOR + + +
xmmintrin.h
+
+ + Floating Point + SSE + Compare + + + Compare the lower single-precision (32-bit) floating-point elements in "a" and "b" for greater-than-or-equal, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := ( a[31:0] >= b[31:0] ) ? 0xffffffff : 0 +dst[127:32] := a[127:32] + + +
xmmintrin.h
+
+ + Floating Point + SSE + Compare + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for greater-than-or-equal, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ( a[i+31:i] >= b[i+31:i] ) ? 0xffffffff : 0 +ENDFOR + + +
xmmintrin.h
+
+ + Floating Point + SSE + Compare + + + Compare the lower single-precision (32-bit) floating-point elements in "a" and "b" for not-equal, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := ( a[31:0] != b[31:0] ) ? 0xffffffff : 0 +dst[127:32] := a[127:32] + + +
xmmintrin.h
+
+ + Floating Point + SSE + Compare + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for not-equal, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ( a[i+31:i] != b[i+31:i] ) ? 0xffffffff : 0 +ENDFOR + + +
xmmintrin.h
+
+ + Floating Point + SSE + Compare + + + Compare the lower single-precision (32-bit) floating-point elements in "a" and "b" for not-less-than, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := !( a[31:0] < b[31:0] ) ? 0xffffffff : 0 +dst[127:32] := a[127:32] + + +
xmmintrin.h
+
+ + Floating Point + SSE + Compare + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for not-less-than, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := !( a[i+31:i] < b[i+31:i] ) ? 0xffffffff : 0 +ENDFOR + + +
xmmintrin.h
+
+ + Floating Point + SSE + Compare + + + Compare the lower single-precision (32-bit) floating-point elements in "a" and "b" for not-less-than-or-equal, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := !( a[31:0] <= b[31:0] ) ? 0xffffffff : 0 +dst[127:32] := a[127:32] + + +
xmmintrin.h
+
+ + Floating Point + SSE + Compare + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for not-less-than-or-equal, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := !( a[i+31:i] <= b[i+31:i] ) ? 0xffffffff : 0 +ENDFOR + + +
xmmintrin.h
+
+ + Floating Point + SSE + Compare + + + Compare the lower single-precision (32-bit) floating-point elements in "a" and "b" for not-greater-than, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := !( a[31:0] > b[31:0] ) ? 0xffffffff : 0 +dst[127:32] := a[127:32] + + +
xmmintrin.h
+
+ + Floating Point + SSE + Compare + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for not-greater-than, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := !( a[i+31:i] > b[i+31:i] ) ? 0xffffffff : 0 +ENDFOR + + +
xmmintrin.h
+
+ + Floating Point + SSE + Compare + + + Compare the lower single-precision (32-bit) floating-point elements in "a" and "b" for not-greater-than-or-equal, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := !( a[31:0] >= b[31:0] ) ? 0xffffffff : 0 +dst[127:32] := a[127:32] + + +
xmmintrin.h
+
+ + Floating Point + SSE + Compare + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for not-greater-than-or-equal, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := !( a[i+31:i] >= b[i+31:i] ) ? 0xffffffff : 0 +ENDFOR + + +
xmmintrin.h
+
+ + Floating Point + SSE + Compare + + + Compare the lower single-precision (32-bit) floating-point elements in "a" and "b" to see if neither is NaN, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := ( a[31:0] != NaN AND b[31:0] != NaN ) ? 0xffffffff : 0 +dst[127:32] := a[127:32] + + +
xmmintrin.h
+
+ + Floating Point + SSE + Compare + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" to see if neither is NaN, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ( a[i+31:i] != NaN AND b[i+31:i] != NaN ) ? 0xffffffff : 0 +ENDFOR + + +
xmmintrin.h
+
+ + Floating Point + SSE + Compare + + + Compare the lower single-precision (32-bit) floating-point elements in "a" and "b" to see if either is NaN, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := ( a[31:0] == NaN OR b[31:0] == NaN ) ? 0xffffffff : 0 +dst[127:32] := a[127:32] + + +
xmmintrin.h
+
+ + Floating Point + SSE + Compare + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" to see if either is NaN, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ( a[i+31:i] == NaN OR b[i+31:i] == NaN ) ? 0xffffffff : 0 +ENDFOR + + +
xmmintrin.h
+
+ + Floating Point + SSE + Compare + + + Compare the lower single-precision (32-bit) floating-point element in "a" and "b" for equality, and return the boolean result (0 or 1). + +RETURN ( a[31:0] == b[31:0] ) ? 1 : 0 + + +
xmmintrin.h
+
+ + Floating Point + SSE + Compare + + + Compare the lower single-precision (32-bit) floating-point element in "a" and "b" for less-than, and return the boolean result (0 or 1). + +RETURN ( a[31:0] < b[31:0] ) ? 1 : 0 + + +
xmmintrin.h
+
+ + Floating Point + SSE + Compare + + + Compare the lower single-precision (32-bit) floating-point element in "a" and "b" for less-than-or-equal, and return the boolean result (0 or 1). + +RETURN ( a[31:0] <= b[31:0] ) ? 1 : 0 + + +
xmmintrin.h
+
+ + Floating Point + SSE + Compare + + + Compare the lower single-precision (32-bit) floating-point element in "a" and "b" for greater-than, and return the boolean result (0 or 1). + +RETURN ( a[31:0] > b[31:0] ) ? 1 : 0 + + +
xmmintrin.h
+
+ + Floating Point + SSE + Compare + + + Compare the lower single-precision (32-bit) floating-point element in "a" and "b" for greater-than-or-equal, and return the boolean result (0 or 1). + +RETURN ( a[31:0] >= b[31:0] ) ? 1 : 0 + + +
xmmintrin.h
+
+ + Floating Point + SSE + Compare + + + Compare the lower single-precision (32-bit) floating-point element in "a" and "b" for not-equal, and return the boolean result (0 or 1). + +RETURN ( a[31:0] != b[31:0] ) ? 1 : 0 + + +
xmmintrin.h
+
+ + Floating Point + SSE + Compare + + + Compare the lower single-precision (32-bit) floating-point element in "a" and "b" for equality, and return the boolean result (0 or 1). This instruction will not signal an exception for QNaNs. + +RETURN ( a[31:0] == b[31:0] ) ? 1 : 0 + + +
xmmintrin.h
+
+ + Floating Point + SSE + Compare + + + Compare the lower single-precision (32-bit) floating-point element in "a" and "b" for less-than, and return the boolean result (0 or 1). This instruction will not signal an exception for QNaNs. + +RETURN ( a[31:0] < b[31:0] ) ? 1 : 0 + + +
xmmintrin.h
+
+ + Floating Point + SSE + Compare + + + Compare the lower single-precision (32-bit) floating-point element in "a" and "b" for less-than-or-equal, and return the boolean result (0 or 1). This instruction will not signal an exception for QNaNs. + +RETURN ( a[31:0] <= b[31:0] ) ? 1 : 0 + + +
xmmintrin.h
+
+ + Floating Point + SSE + Compare + + + Compare the lower single-precision (32-bit) floating-point element in "a" and "b" for greater-than, and return the boolean result (0 or 1). This instruction will not signal an exception for QNaNs. + +RETURN ( a[31:0] > b[31:0] ) ? 1 : 0 + + +
xmmintrin.h
+
+ + Floating Point + SSE + Compare + + + Compare the lower single-precision (32-bit) floating-point element in "a" and "b" for greater-than-or-equal, and return the boolean result (0 or 1). This instruction will not signal an exception for QNaNs. + +RETURN ( a[31:0] >= b[31:0] ) ? 1 : 0 + + +
xmmintrin.h
+
+ + Floating Point + SSE + Compare + + + Compare the lower single-precision (32-bit) floating-point element in "a" and "b" for not-equal, and return the boolean result (0 or 1). This instruction will not signal an exception for QNaNs. + +RETURN ( a[31:0] != b[31:0] ) ? 1 : 0 + + +
xmmintrin.h
+
+ + Floating Point + Integer + SSE + Convert + + Convert the lower single-precision (32-bit) floating-point element in "a" to a 32-bit integer, and store the result in "dst". + +dst[31:0] := Convert_FP32_To_Int32(a[31:0]) + + +
xmmintrin.h
+
+ + Floating Point + SSE + Convert + + Convert the lower single-precision (32-bit) floating-point element in "a" to a 32-bit integer, and store the result in "dst". + +dst[31:0] := Convert_FP32_To_Int32(a[31:0]) + + +
xmmintrin.h
+
+ + Floating Point + Integer + SSE + Convert + + Convert the lower single-precision (32-bit) floating-point element in "a" to a 64-bit integer, and store the result in "dst". + +dst[63:0] := Convert_FP32_To_Int64(a[31:0]) + + +
xmmintrin.h
+
+ + Floating Point + SSE + Convert + + Copy the lower single-precision (32-bit) floating-point element of "a" to "dst". + dst[31:0] := a[31:0] + +
xmmintrin.h
+
+ + Floating Point + Integer + SSE + Convert + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst". + +FOR j := 0 to 1 + i := 32*j + dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i]) +ENDFOR + + +
xmmintrin.h
+
+ + Floating Point + SSE + Convert + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst". + +FOR j := 0 to 1 + i := 32*j + dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i]) +ENDFOR + + +
xmmintrin.h
+
+ + Floating Point + Integer + SSE + Convert + + Convert the lower single-precision (32-bit) floating-point element in "a" to a 32-bit integer with truncation, and store the result in "dst". + +dst[31:0] := Convert_FP32_To_Int32_Truncate(a[31:0]) + + +
xmmintrin.h
+
+ + Floating Point + SSE + Convert + + Convert the lower single-precision (32-bit) floating-point element in "a" to a 32-bit integer with truncation, and store the result in "dst". + +dst[31:0] := Convert_FP32_To_Int32_Truncate(a[31:0]) + + +
xmmintrin.h
+
+ + Floating Point + Integer + SSE + Convert + + Convert the lower single-precision (32-bit) floating-point element in "a" to a 64-bit integer with truncation, and store the result in "dst". + +dst[63:0] := Convert_FP64_To_Int32_Truncate(a[31:0]) + + +
xmmintrin.h
+
+ + Floating Point + Integer + SSE + Convert + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 1 + i := 32*j + dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i]) +ENDFOR + + +
xmmintrin.h
+
+ + Floating Point + SSE + Convert + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 1 + i := 32*j + dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i]) +ENDFOR + + +
xmmintrin.h
+
+ + Floating Point + Integer + SSE + Convert + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 16-bit integers, and store the results in "dst". + +FOR j := 0 to 3 + i := 16*j + k := 32*j + dst[i+15:i] := Convert_FP32_To_Int16(a[k+31:k]) +ENDFOR + +
xmmintrin.h
+
+ + Floating Point + Integer + SSE + Convert + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 8-bit integers, and store the results in lower 4 elements of "dst". + +FOR j := 0 to 3 + i := 8*j + k := 32*j + dst[i+7:i] := Convert_FP32_To_Int8(a[k+31:k]) +ENDFOR + +
xmmintrin.h
+
+ + Floating Point + SSE + Set + + Copy single-precision (32-bit) floating-point element "a" to the lower element of "dst", and zero the upper 3 elements. + +dst[31:0] := a[31:0] +dst[127:32] := 0 + +
xmmintrin.h
+
+ + Floating Point + SSE + Set + + Broadcast single-precision (32-bit) floating-point value "a" to all elements of "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := a[31:0] +ENDFOR + +
xmmintrin.h
+
+ + Floating Point + SSE + Set + + Broadcast single-precision (32-bit) floating-point value "a" to all elements of "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := a[31:0] +ENDFOR + +
xmmintrin.h
+
+ + Floating Point + SSE + Set + + + + + Set packed single-precision (32-bit) floating-point elements in "dst" with the supplied values. + +dst[31:0] := e0 +dst[63:32] := e1 +dst[95:64] := e2 +dst[127:96] := e3 + +
xmmintrin.h
+
+ + Floating Point + SSE + Set + + + + + Set packed single-precision (32-bit) floating-point elements in "dst" with the supplied values in reverse order. + +dst[31:0] := e3 +dst[63:32] := e2 +dst[95:64] := e1 +dst[127:96] := e0 + +
xmmintrin.h
+
+ + Floating Point + SSE + Set + + Return vector of type __m128 with all elements set to zero. + +dst[MAX:0] := 0 + + +
xmmintrin.h
+
+ + Integer + SSE + Load + + + Load 2 single-precision (32-bit) floating-point elements from memory into the upper 2 elements of "dst", and copy the lower 2 elements from "a" to "dst". "mem_addr" does not need to be aligned on any particular boundary. + +dst[31:0] := a[31:0] +dst[63:32] := a[63:32] +dst[95:64] := MEM[mem_addr+31:mem_addr] +dst[127:96] := MEM[mem_addr+63:mem_addr+32] + + +
xmmintrin.h
+
+ + Integer + SSE + Load + + + Load 2 single-precision (32-bit) floating-point elements from memory into the lower 2 elements of "dst", and copy the upper 2 elements from "a" to "dst". "mem_addr" does not need to be aligned on any particular boundary. + +dst[31:0] := MEM[mem_addr+31:mem_addr] +dst[63:32] := MEM[mem_addr+63:mem_addr+32] +dst[95:64] := a[95:64] +dst[127:96] := a[127:96] + + +
xmmintrin.h
+
+ + Floating Point + SSE + Load + + Load a single-precision (32-bit) floating-point element from memory into the lower of "dst", and zero the upper 3 elements. "mem_addr" does not need to be aligned on any particular boundary. + +dst[31:0] := MEM[mem_addr+31:mem_addr] +dst[127:32] := 0 + + +
xmmintrin.h
+
+ + Floating Point + SSE + Load + + Load a single-precision (32-bit) floating-point element from memory into all elements of "dst". + +dst[31:0] := MEM[mem_addr+31:mem_addr] +dst[63:32] := MEM[mem_addr+31:mem_addr] +dst[95:64] := MEM[mem_addr+31:mem_addr] +dst[127:96] := MEM[mem_addr+31:mem_addr] + +
xmmintrin.h
+
+ + Floating Point + SSE + Load + + Load a single-precision (32-bit) floating-point element from memory into all elements of "dst". + +dst[31:0] := MEM[mem_addr+31:mem_addr] +dst[63:32] := MEM[mem_addr+31:mem_addr] +dst[95:64] := MEM[mem_addr+31:mem_addr] +dst[127:96] := MEM[mem_addr+31:mem_addr] + +
xmmintrin.h
+
+ + Floating Point + SSE + Load + + Load 128-bits (composed of 4 packed single-precision (32-bit) floating-point elements) from memory into "dst". + "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +dst[127:0] := MEM[mem_addr+127:mem_addr] + + +
xmmintrin.h
+
+ + Floating Point + SSE + Load + + Load 128-bits (composed of 4 packed single-precision (32-bit) floating-point elements) from memory into "dst". + "mem_addr" does not need to be aligned on any particular boundary. + +dst[127:0] := MEM[mem_addr+127:mem_addr] + + +
xmmintrin.h
+
+ + Floating Point + SSE + Load + + Load 4 single-precision (32-bit) floating-point elements from memory into "dst" in reverse order. mem_addr must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +dst[31:0] := MEM[mem_addr+127:mem_addr+96] +dst[63:32] := MEM[mem_addr+95:mem_addr+64] +dst[95:64] := MEM[mem_addr+63:mem_addr+32] +dst[127:96] := MEM[mem_addr+31:mem_addr] + +
xmmintrin.h
+
+ + Floating Point + SSE + Store + + + Store 128-bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "a" into memory using a non-temporal memory hint. + "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+127:mem_addr] := a[127:0] + + +
xmmintrin.h
+
+ + Integer + SSE + Store + + + Store the upper 2 single-precision (32-bit) floating-point elements from "a" into memory. + +MEM[mem_addr+31:mem_addr] := a[95:64] +MEM[mem_addr+63:mem_addr+32] := a[127:96] + + +
xmmintrin.h
+
+ + Integer + SSE + Store + + + Store the lower 2 single-precision (32-bit) floating-point elements from "a" into memory. + +MEM[mem_addr+31:mem_addr] := a[31:0] +MEM[mem_addr+63:mem_addr+32] := a[63:32] + + +
xmmintrin.h
+
+ + Floating Point + SSE + Store + + + Store the lower single-precision (32-bit) floating-point element from "a" into memory. "mem_addr" does not need to be aligned on any particular boundary. + +MEM[mem_addr+31:mem_addr] := a[31:0] + + +
xmmintrin.h
+
+ + Floating Point + SSE + Store + + + Store the lower single-precision (32-bit) floating-point element from "a" into 4 contiguous elements in memory. "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+31:mem_addr] := a[31:0] +MEM[mem_addr+63:mem_addr+32] := a[31:0] +MEM[mem_addr+95:mem_addr+64] := a[31:0] +MEM[mem_addr+127:mem_addr+96] := a[31:0] + +
xmmintrin.h
+
+ + Floating Point + SSE + Store + + + Store the lower single-precision (32-bit) floating-point element from "a" into 4 contiguous elements in memory. "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+31:mem_addr] := a[31:0] +MEM[mem_addr+63:mem_addr+32] := a[31:0] +MEM[mem_addr+95:mem_addr+64] := a[31:0] +MEM[mem_addr+127:mem_addr+96] := a[31:0] + +
xmmintrin.h
+
+ + Floating Point + SSE + Store + + + Store 128-bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "a" into memory. + "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+127:mem_addr] := a[127:0] + + +
xmmintrin.h
+
+ + Floating Point + SSE + Store + + + Store 128-bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "a" into memory. + "mem_addr" does not need to be aligned on any particular boundary. + +MEM[mem_addr+127:mem_addr] := a[127:0] + + +
xmmintrin.h
+
+ + Floating Point + SSE + Store + + + Store 4 single-precision (32-bit) floating-point elements from "a" into memory in reverse order. + "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+31:mem_addr] := a[127:96] +MEM[mem_addr+63:mem_addr+32] := a[95:64] +MEM[mem_addr+95:mem_addr+64] := a[63:32] +MEM[mem_addr+127:mem_addr+96] := a[31:0] + + +
xmmintrin.h
+
+ + Floating Point + SSE + Move + + + Move the lower single-precision (32-bit) floating-point element from "b" to the lower element of "dst", and copy the upper 3 elements from "a" to the upper elements of "dst". + +dst[31:0] := b[31:0] +dst[63:32] := a[63:32] +dst[95:64] := a[95:64] +dst[127:96] := a[127:96] + + +
xmmintrin.h
+
+ + Floating Point + SSE + Swizzle + + + + Shuffle single-precision (32-bit) floating-point elements in "a" using the control in "imm8", and store the results in "dst". + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} + +dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +dst[95:64] := SELECT4(b[127:0], imm8[5:4]) +dst[127:96] := SELECT4(b[127:0], imm8[7:6]) + + +
xmmintrin.h
+
+ + Floating Point + SSE + Swizzle + + + Unpack and interleave single-precision (32-bit) floating-point elements from the high half "a" and "b", and store the results in "dst". + +INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]){ + dst[31:0] := src1[95:64] + dst[63:32] := src2[95:64] + dst[95:64] := src1[127:96] + dst[127:96] := src2[127:96] + RETURN dst[127:0] +} + +dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) + + +
xmmintrin.h
+
+ + Floating Point + SSE + Swizzle + + + Unpack and interleave single-precision (32-bit) floating-point elements from the low half of "a" and "b", and store the results in "dst". + +INTERLEAVE_DWORDS(src1[127:0], src2[127:0]){ + dst[31:0] := src1[31:0] + dst[63:32] := src2[31:0] + dst[95:64] := src1[63:32] + dst[127:96] := src2[63:32] + RETURN dst[127:0] +} + +dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) + + +
xmmintrin.h
+
+ + Floating Point + SSE + Move + + + Move the upper 2 single-precision (32-bit) floating-point elements from "b" to the lower 2 elements of "dst", and copy the upper 2 elements from "a" to the upper 2 elements of "dst". + +dst[31:0] := b[95:64] +dst[63:32] := b[127:96] +dst[95:64] := a[95:64] +dst[127:96] := a[127:96] + + +
xmmintrin.h
+
+ + Floating Point + SSE + Move + + + Move the lower 2 single-precision (32-bit) floating-point elements from "b" to the upper 2 elements of "dst", and copy the lower 2 elements from "a" to the lower 2 elements of "dst". + +dst[31:0] := a[31:0] +dst[63:32] := a[63:32] +dst[95:64] := b[31:0] +dst[127:96] := b[63:32] + + +
xmmintrin.h
+
+ + Floating Point + SSE + Miscellaneous + + Set each bit of mask "dst" based on the most significant bit of the corresponding packed single-precision (32-bit) floating-point element in "a". + +FOR j := 0 to 3 + i := j*32 + IF a[i+31] + dst[j] := 1 + ELSE + dst[j] := 0 + FI +ENDFOR +dst[MAX:4] := 0 + + +
xmmintrin.h
+
+ + SSE + General Support + + + Allocate "size" bytes of memory, aligned to the alignment specified in "align", and return a pointer to the allocated memory. "_mm_free" should be used to free memory that is allocated with "_mm_malloc". +
xmmintrin.h
+
+ + SSE + General Support + + Free aligned memory that was allocated with "_mm_malloc". +
xmmintrin.h
+
+ + + SSE2 + General Support + + Provide a hint to the processor that the code sequence is a spin-wait loop. This can help improve the performance and power consumption of spin-wait loops. + +
emmintrin.h
+
+ + SSE2 + General Support + + Invalidate and flush the cache line that contains "p" from all levels of the cache hierarchy. + +
emmintrin.h
+
+ + SSE2 + General Support + + Perform a serializing operation on all load-from-memory instructions that were issued prior to this instruction. Guarantees that every load instruction that precedes, in program order, is globally visible before any load instruction which follows the fence in program order. + +
emmintrin.h
+
+ + SSE2 + General Support + + Perform a serializing operation on all load-from-memory and store-to-memory instructions that were issued prior to this instruction. Guarantees that every memory access that precedes, in program order, the memory fence instruction is globally visible before any memory instruction which follows the fence in program order. + +
emmintrin.h
+
+ + Integer + SSE2 + Arithmetic + + + Add packed 8-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 15 + i := j*8 + dst[i+7:i] := a[i+7:i] + b[i+7:i] +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Arithmetic + + + Add packed 16-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := a[i+15:i] + b[i+15:i] +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Arithmetic + + + Add packed 32-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := a[i+31:i] + b[i+31:i] +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Arithmetic + + + Add 64-bit integers "a" and "b", and store the result in "dst". + +dst[63:0] := a[63:0] + b[63:0] + + +
emmintrin.h
+
+ + Integer + SSE2 + Arithmetic + + + Add packed 64-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := a[i+63:i] + b[i+63:i] +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Arithmetic + + + Add packed 8-bit integers in "a" and "b" using saturation, and store the results in "dst". + +FOR j := 0 to 15 + i := j*8 + dst[i+7:i] := Saturate_To_Int8( a[i+7:i] + b[i+7:i] ) +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Arithmetic + + + Add packed 16-bit integers in "a" and "b" using saturation, and store the results in "dst". + +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := Saturate_To_Int16( a[i+15:i] + b[i+15:i] ) +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Arithmetic + + + Add packed unsigned 8-bit integers in "a" and "b" using saturation, and store the results in "dst". + +FOR j := 0 to 15 + i := j*8 + dst[i+7:i] := Saturate_To_UnsignedInt8( a[i+7:i] + b[i+7:i] ) +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Arithmetic + + + Add packed unsigned 16-bit integers in "a" and "b" using saturation, and store the results in "dst". + +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := Saturate_To_UnsignedInt16( a[i+15:i] + b[i+15:i] ) +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Probability/Statistics + + + Average packed unsigned 8-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 15 + i := j*8 + dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) >> 1 +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Probability/Statistics + + + Average packed unsigned 16-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) >> 1 +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Arithmetic + + + Multiply packed signed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, and pack the results in "dst". + + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := a[i+31:i+16]*b[i+31:i+16] + a[i+15:i]*b[i+15:i] +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Special Math Functions + + + Compare packed 16-bit integers in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 7 + i := j*16 + IF a[i+15:i] > b[i+15:i] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := b[i+15:i] + FI +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Special Math Functions + + + Compare packed unsigned 8-bit integers in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 15 + i := j*8 + IF a[i+7:i] > b[i+7:i] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := b[i+7:i] + FI +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Special Math Functions + + + Compare packed 16-bit integers in "a" and "b", and store packed minimum values in "dst". + +FOR j := 0 to 7 + i := j*16 + IF a[i+15:i] < b[i+15:i] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := b[i+15:i] + FI +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Special Math Functions + + + Compare packed unsigned 8-bit integers in "a" and "b", and store packed minimum values in "dst". + +FOR j := 0 to 15 + i := j*8 + IF a[i+7:i] < b[i+7:i] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := b[i+7:i] + FI +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Arithmetic + + + Multiply the packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst". + +FOR j := 0 to 7 + i := j*16 + tmp[31:0] := a[i+15:i] * b[i+15:i] + dst[i+15:i] := tmp[31:16] +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Arithmetic + + + Multiply the packed unsigned 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst". + +FOR j := 0 to 7 + i := j*16 + tmp[31:0] := a[i+15:i] * b[i+15:i] + dst[i+15:i] := tmp[31:16] +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Arithmetic + + + Multiply the packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the low 16 bits of the intermediate integers in "dst". + +FOR j := 0 to 7 + i := j*16 + tmp[31:0] := a[i+15:i] * b[i+15:i] + dst[i+15:i] := tmp[15:0] +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Arithmetic + + + Multiply the low unsigned 32-bit integers from "a" and "b", and store the unsigned 64-bit result in "dst". + +dst[63:0] := a[31:0] * b[31:0] + + +
emmintrin.h
+
+ + Integer + SSE2 + Arithmetic + + + Multiply the low unsigned 32-bit integers from each packed 64-bit element in "a" and "b", and store the unsigned 64-bit results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := a[i+31:i] * b[i+31:i] +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Arithmetic + Miscellaneous + + + Compute the absolute differences of packed unsigned 8-bit integers in "a" and "b", then horizontally sum each consecutive 8 differences to produce two unsigned 16-bit integers, and pack these unsigned 16-bit integers in the low 16 bits of 64-bit elements in "dst". + +FOR j := 0 to 15 + i := j*8 + tmp[i+7:i] := ABS(a[i+7:i] - b[i+7:i]) +ENDFOR +FOR j := 0 to 1 + i := j*64 + dst[i+15:i] := tmp[i+7:i] + tmp[i+15:i+8] + tmp[i+23:i+16] + tmp[i+31:i+24] + + tmp[i+39:i+32] + tmp[i+47:i+40] + tmp[i+55:i+48] + tmp[i+63:i+56] + dst[i+63:i+16] := 0 +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Arithmetic + + + Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a", and store the results in "dst". + +FOR j := 0 to 15 + i := j*8 + dst[i+7:i] := a[i+7:i] - b[i+7:i] +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Arithmetic + + + Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := a[i+15:i] - b[i+15:i] +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Arithmetic + + + Subtract packed 32-bit integers in "b" from packed 32-bit integers in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := a[i+31:i] - b[i+31:i] +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Arithmetic + + + Subtract 64-bit integer "b" from 64-bit integer "a", and store the result in "dst". + +dst[63:0] := a[63:0] - b[63:0] + + +
emmintrin.h
+
+ + Integer + SSE2 + Arithmetic + + + Subtract packed 64-bit integers in "b" from packed 64-bit integers in "a", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := a[i+63:i] - b[i+63:i] +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Arithmetic + + + Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a" using saturation, and store the results in "dst". + +FOR j := 0 to 15 + i := j*8 + dst[i+7:i] := Saturate_To_Int8(a[i+7:i] - b[i+7:i]) +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Arithmetic + + + Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a" using saturation, and store the results in "dst". + +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := Saturate_To_Int16(a[i+15:i] - b[i+15:i]) +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Arithmetic + + + Subtract packed unsigned 8-bit integers in "b" from packed unsigned 8-bit integers in "a" using saturation, and store the results in "dst". + +FOR j := 0 to 15 + i := j*8 + dst[i+7:i] := Saturate_To_UnsignedInt8(a[i+7:i] - b[i+7:i]) +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Arithmetic + + + Subtract packed unsigned 16-bit integers in "b" from packed unsigned 16-bit integers in "a" using saturation, and store the results in "dst". + +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := Saturate_To_UnsignedInt16(a[i+15:i] - b[i+15:i]) +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Shift + + + Shift "a" left by "imm8" bytes while shifting in zeros, and store the results in "dst". + +tmp := imm8[7:0] +IF tmp > 15 + tmp := 16 +FI +dst[127:0] := a[127:0] << (tmp*8) + + +
emmintrin.h
+
+ + Integer + SSE2 + Shift + + + Shift "a" left by "imm8" bytes while shifting in zeros, and store the results in "dst". + +tmp := imm8[7:0] +IF tmp > 15 + tmp := 16 +FI +dst[127:0] := a[127:0] << (tmp*8) + + +
emmintrin.h
+
+ + Integer + SSE2 + Shift + + + Shift "a" right by "imm8" bytes while shifting in zeros, and store the results in "dst". + +tmp := imm8[7:0] +IF tmp > 15 + tmp := 16 +FI +dst[127:0] := a[127:0] >> (tmp*8) + + +
emmintrin.h
+
+ + Integer + SSE2 + Shift + + + Shift packed 16-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 7 + i := j*16 + IF imm8[7:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend(a[i+15:i] << imm8[7:0]) + FI +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Shift + + + Shift packed 16-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 7 + i := j*16 + IF count[63:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend(a[i+15:i] << count[63:0]) + FI +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Shift + + + Shift packed 32-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + IF imm8[7:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend(a[i+31:i] << imm8[7:0]) + FI +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Shift + + + Shift packed 32-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + IF count[63:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend(a[i+31:i] << count[63:0]) + FI +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Shift + + + Shift packed 64-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + IF imm8[7:0] > 63 + dst[i+63:i] := 0 + ELSE + dst[i+63:i] := ZeroExtend(a[i+63:i] << imm8[7:0]) + FI +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Shift + + + Shift packed 64-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + IF count[63:0] > 63 + dst[i+63:i] := 0 + ELSE + dst[i+63:i] := ZeroExtend(a[i+63:i] << count[63:0]) + FI +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Shift + + + Shift packed 16-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 7 + i := j*16 + IF imm8[7:0] > 15 + dst[i+15:i] := SignBit + ELSE + dst[i+15:i] := SignExtend(a[i+15:i] >> imm8[7:0]) + FI +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Shift + + + Shift packed 16-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 7 + i := j*16 + IF count[63:0] > 15 + dst[i+15:i] := SignBit + ELSE + dst[i+15:i] := SignExtend(a[i+15:i] >> count[63:0]) + FI +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Shift + + + Shift packed 32-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + IF imm8[7:0] > 31 + dst[i+31:i] := SignBit + ELSE + dst[i+31:i] := SignExtend(a[i+31:i] >> imm8[7:0]) + FI +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Shift + + + Shift packed 32-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + IF count[63:0] > 31 + dst[i+31:i] := SignBit + ELSE + dst[i+31:i] := SignExtend(a[i+31:i] >> count[63:0]) + FI +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Shift + + + Shift "a" right by "imm8" bytes while shifting in zeros, and store the results in "dst". + +tmp := imm8[7:0] +IF tmp > 15 + tmp := 16 +FI +dst[127:0] := a[127:0] >> (tmp*8) + + +
emmintrin.h
+
+ + Integer + SSE2 + Shift + + + Shift packed 16-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 7 + i := j*16 + IF imm8[7:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend(a[i+15:i] >> imm8[7:0]) + FI +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Shift + + + Shift packed 16-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 7 + i := j*16 + IF count[63:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend(a[i+15:i] >> count[63:0]) + FI +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Shift + + + Shift packed 32-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + IF imm8[7:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend(a[i+31:i] >> imm8[7:0]) + FI +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Shift + + + Shift packed 32-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + IF count[63:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend(a[i+31:i] >> count[63:0]) + FI +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Shift + + + Shift packed 64-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + IF imm8[7:0] > 63 + dst[i+63:i] := 0 + ELSE + dst[i+63:i] := ZeroExtend(a[i+63:i] >> imm8[7:0]) + FI +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Shift + + + Shift packed 64-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + IF count[63:0] > 63 + dst[i+63:i] := 0 + ELSE + dst[i+63:i] := ZeroExtend(a[i+63:i] >> count[63:0]) + FI +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Logical + + + Compute the bitwise AND of 128 bits (representing integer data) in "a" and "b", and store the result in "dst". + +dst[127:0] := (a[127:0] AND b[127:0]) + + +
emmintrin.h
+
+ + Integer + SSE2 + Logical + + + Compute the bitwise NOT of 128 bits (representing integer data) in "a" and then AND with "b", and store the result in "dst". + +dst[127:0] := ((NOT a[127:0]) AND b[127:0]) + + +
emmintrin.h
+
+ + Integer + SSE2 + Logical + + + Compute the bitwise OR of 128 bits (representing integer data) in "a" and "b", and store the result in "dst". + +dst[127:0] := (a[127:0] OR b[127:0]) + + +
emmintrin.h
+
+ + Integer + SSE2 + Logical + + + Compute the bitwise XOR of 128 bits (representing integer data) in "a" and "b", and store the result in "dst". + +dst[127:0] := (a[127:0] XOR b[127:0]) + + +
emmintrin.h
+
+ + Integer + SSE2 + Compare + + + Compare packed 8-bit integers in "a" and "b" for equality, and store the results in "dst". + +FOR j := 0 to 15 + i := j*8 + dst[i+7:i] := ( a[i+7:i] == b[i+7:i] ) ? 0xFF : 0 +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Compare + + + Compare packed 16-bit integers in "a" and "b" for equality, and store the results in "dst". + +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := ( a[i+15:i] == b[i+15:i] ) ? 0xFFFF : 0 +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Compare + + + Compare packed 32-bit integers in "a" and "b" for equality, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ( a[i+31:i] == b[i+31:i] ) ? 0xFFFFFFFF : 0 +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Compare + + + Compare packed 8-bit integers in "a" and "b" for greater-than, and store the results in "dst". + +FOR j := 0 to 15 + i := j*8 + dst[i+7:i] := ( a[i+7:i] > b[i+7:i] ) ? 0xFF : 0 +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Compare + + + Compare packed 16-bit integers in "a" and "b" for greater-than, and store the results in "dst". + +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := ( a[i+15:i] > b[i+15:i] ) ? 0xFFFF : 0 +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Compare + + + Compare packed 32-bit integers in "a" and "b" for greater-than, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ( a[i+31:i] > b[i+31:i] ) ? 0xFFFFFFFF : 0 +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Compare + + + Compare packed 8-bit integers in "a" and "b" for less-than, and store the results in "dst". Note: This intrinsic emits the pcmpgtb instruction with the order of the operands switched. + +FOR j := 0 to 15 + i := j*8 + dst[i+7:i] := ( a[i+7:i] < b[i+7:i] ) ? 0xFF : 0 +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Compare + + + Compare packed 16-bit integers in "a" and "b" for less-than, and store the results in "dst". Note: This intrinsic emits the pcmpgtw instruction with the order of the operands switched. + +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := ( a[i+15:i] < b[i+15:i] ) ? 0xFFFF : 0 +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Compare + + + Compare packed 32-bit integers in "a" and "b" for less-than, and store the results in "dst". Note: This intrinsic emits the pcmpgtd instruction with the order of the operands switched. + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ( a[i+31:i] < b[i+31:i] ) ? 0xFFFFFFFF : 0 +ENDFOR + + +
emmintrin.h
+
+ + Floating Point + Integer + SSE2 + Convert + + Convert packed 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 1 + i := j*32 + m := j*64 + dst[m+63:m] := Convert_Int32_To_FP64(a[i+31:i]) +ENDFOR + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Convert + + + Convert the 32-bit integer "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := Convert_Int32_To_FP64(b[31:0]) +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
emmintrin.h
+
+ + Floating Point + Integer + SSE2 + Convert + + + Convert the 64-bit integer "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := Convert_Int64_To_FP64(b[63:0]) +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
emmintrin.h
+
+ + Floating Point + Integer + SSE2 + Convert + + + Convert the 64-bit integer "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := Convert_Int64_To_FP64(b[63:0]) +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
emmintrin.h
+
+ + Floating Point + Integer + SSE2 + Convert + + Convert packed 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 3 + i := 32*j + dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i]) +ENDFOR + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Convert + + Convert packed 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 1 + i := j*32 + m := j*64 + dst[m+63:m] := Convert_Int32_To_FP64(a[i+31:i]) +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Convert + + Copy 32-bit integer "a" to the lower elements of "dst", and zero the upper elements of "dst". + +dst[31:0] := a[31:0] +dst[127:32] := 0 + + +
emmintrin.h
+
+ + Integer + SSE2 + Convert + + Copy 64-bit integer "a" to the lower element of "dst", and zero the upper element. + +dst[63:0] := a[63:0] +dst[127:64] := 0 + + +
emmintrin.h
+
+ + Integer + SSE2 + Convert + + Copy 64-bit integer "a" to the lower element of "dst", and zero the upper element. + +dst[63:0] := a[63:0] +dst[127:64] := 0 + + +
emmintrin.h
+
+ + Integer + SSE2 + Convert + + Copy the lower 32-bit integer in "a" to "dst". + +dst[31:0] := a[31:0] + + +
emmintrin.h
+
+ + Integer + SSE2 + Convert + + Copy the lower 64-bit integer in "a" to "dst". + +dst[63:0] := a[63:0] + + +
emmintrin.h
+
+ + Integer + SSE2 + Convert + + Copy the lower 64-bit integer in "a" to "dst". + +dst[63:0] := a[63:0] + + +
emmintrin.h
+
+ + Integer + SSE2 + Set + + + Set packed 64-bit integers in "dst" with the supplied values. + +dst[63:0] := e0 +dst[127:64] := e1 + +
emmintrin.h
+
+ + Integer + SSE2 + Set + + + Set packed 64-bit integers in "dst" with the supplied values. + +dst[63:0] := e0 +dst[127:64] := e1 + +
emmintrin.h
+
+ + Integer + SSE2 + Set + + + + + Set packed 32-bit integers in "dst" with the supplied values. + +dst[31:0] := e0 +dst[63:32] := e1 +dst[95:64] := e2 +dst[127:96] := e3 + +
emmintrin.h
+
+ + Integer + SSE2 + Set + + + + + + + + + Set packed 16-bit integers in "dst" with the supplied values. + +dst[15:0] := e0 +dst[31:16] := e1 +dst[47:32] := e2 +dst[63:48] := e3 +dst[79:64] := e4 +dst[95:80] := e5 +dst[111:96] := e6 +dst[127:112] := e7 + +
emmintrin.h
+
+ + Integer + SSE2 + Set + + + + + + + + + + + + + + + + + Set packed 8-bit integers in "dst" with the supplied values in reverse order. + +dst[7:0] := e0 +dst[15:8] := e1 +dst[23:16] := e2 +dst[31:24] := e3 +dst[39:32] := e4 +dst[47:40] := e5 +dst[55:48] := e6 +dst[63:56] := e7 +dst[71:64] := e8 +dst[79:72] := e9 +dst[87:80] := e10 +dst[95:88] := e11 +dst[103:96] := e12 +dst[111:104] := e13 +dst[119:112] := e14 +dst[127:120] := e15 + +
emmintrin.h
+
+ + Integer + SSE2 + Set + + Broadcast 64-bit integer "a" to all elements of "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := a[63:0] +ENDFOR + +
emmintrin.h
+
+ + Integer + SSE2 + Set + + Broadcast 64-bit integer "a" to all elements of "dst". This intrinsic may generate the "vpbroadcastq". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := a[63:0] +ENDFOR + +
emmintrin.h
+
+ + Integer + SSE2 + Set + + Broadcast 32-bit integer "a" to all elements of "dst". This intrinsic may generate "vpbroadcastd". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := a[31:0] +ENDFOR + +
emmintrin.h
+
+ + Integer + SSE2 + Set + + Broadcast 16-bit integer "a" to all all elements of "dst". This intrinsic may generate "vpbroadcastw". + +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := a[15:0] +ENDFOR + +
emmintrin.h
+
+ + Integer + SSE2 + Set + + Broadcast 8-bit integer "a" to all elements of "dst". This intrinsic may generate "vpbroadcastb". + +FOR j := 0 to 15 + i := j*8 + dst[i+7:i] := a[7:0] +ENDFOR + +
emmintrin.h
+
+ + Integer + SSE2 + Set + + + Set packed 64-bit integers in "dst" with the supplied values in reverse order. + +dst[63:0] := e1 +dst[127:64] := e0 + +
emmintrin.h
+
+ + Integer + SSE2 + Set + + + + + Set packed 32-bit integers in "dst" with the supplied values in reverse order. + +dst[31:0] := e3 +dst[63:32] := e2 +dst[95:64] := e1 +dst[127:96] := e0 + +
emmintrin.h
+
+ + Integer + SSE2 + Set + + + + + + + + + Set packed 16-bit integers in "dst" with the supplied values in reverse order. + +dst[15:0] := e7 +dst[31:16] := e6 +dst[47:32] := e5 +dst[63:48] := e4 +dst[79:64] := e3 +dst[95:80] := e2 +dst[111:96] := e1 +dst[127:112] := e0 + +
emmintrin.h
+
+ + Integer + SSE2 + Set + + + + + + + + + + + + + + + + + Set packed 8-bit integers in "dst" with the supplied values in reverse order. + +dst[7:0] := e15 +dst[15:8] := e14 +dst[23:16] := e13 +dst[31:24] := e12 +dst[39:32] := e11 +dst[47:40] := e10 +dst[55:48] := e9 +dst[63:56] := e8 +dst[71:64] := e7 +dst[79:72] := e6 +dst[87:80] := e5 +dst[95:88] := e4 +dst[103:96] := e3 +dst[111:104] := e2 +dst[119:112] := e1 +dst[127:120] := e0 + +
emmintrin.h
+
+ + Integer + SSE2 + Set + Return vector of type __m128i with all elements set to zero. + +dst[MAX:0] := 0 + + +
emmintrin.h
+
+ + Integer + SSE2 + Load + + Load 64-bit integer from memory into the first element of "dst". + +dst[63:0] := MEM[mem_addr+63:mem_addr] +dst[MAX:64] := 0 + + +
emmintrin.h
+
+ + Integer + SSE2 + Load + + Load 128-bits of integer data from memory into "dst". + "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +dst[127:0] := MEM[mem_addr+127:mem_addr] + + +
emmintrin.h
+
+ + Integer + SSE2 + Load + + Load 128-bits of integer data from memory into "dst". + "mem_addr" does not need to be aligned on any particular boundary. + + +dst[127:0] := MEM[mem_addr+127:mem_addr] + + +
emmintrin.h
+
+ + Integer + SSE2 + Store + + + + Conditionally store 8-bit integer elements from "a" into memory using "mask" (elements are not stored when the highest bit is not set in the corresponding element) and a non-temporal memory hint. "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 15 + i := j*8 + IF mask[i+7] + MEM[mem_addr+i+7:mem_addr+i] := a[i+7:i] + FI +ENDFOR + + +
emmintrin.h
+
+ + Integer + SSE2 + Store + + + Store 128-bits of integer data from "a" into memory. + "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+127:mem_addr] := a[127:0] + + +
emmintrin.h
+
+ + Integer + SSE2 + Store + + + Store 128-bits of integer data from "a" into memory. + "mem_addr" does not need to be aligned on any particular boundary. + + +MEM[mem_addr+127:mem_addr] := a[127:0] + + +
emmintrin.h
+
+ + Integer + SSE2 + Store + + + Store 64-bit integer from the first element of "a" into memory. + +MEM[mem_addr+63:mem_addr] := a[63:0] + + +
emmintrin.h
+
+ + Integer + SSE2 + Store + + + Store 128-bits of integer data from "a" into memory using a non-temporal memory hint. + "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+127:mem_addr] := a[127:0] + + +
emmintrin.h
+
+ + Integer + SSE2 + Store + + + Store 32-bit integer "a" into memory using a non-temporal hint to minimize cache pollution. If the cache line containing address "mem_addr" is already in the cache, the cache will be updated. + +MEM[mem_addr+31:mem_addr] := a[31:0] + + +
emmintrin.h
+
+ + Integer + SSE2 + Store + + + Store 64-bit integer "a" into memory using a non-temporal hint to minimize cache pollution. If the cache line containing address "mem_addr" is already in the cache, the cache will be updated. + +MEM[mem_addr+63:mem_addr] := a[63:0] + + +
emmintrin.h
+
+ + Integer + SSE2 + Miscellaneous + + Copy the lower 64-bit integer in "a" to "dst". + +dst[63:0] := a[63:0] + + +
emmintrin.h
+
+ + Integer + SSE2 + Move + + Copy the 64-bit integer "a" to the lower element of "dst", and zero the upper element. + +dst[63:0] := a[63:0] +dst[127:64] := 0 + + +
emmintrin.h
+
+ + Integer + SSE2 + Move + + Copy the lower 64-bit integer in "a" to the lower element of "dst", and zero the upper element. + +dst[63:0] := a[63:0] +dst[127:64] := 0 + + +
emmintrin.h
+
+ + Integer + SSE2 + Miscellaneous + + + Convert packed 16-bit integers from "a" and "b" to packed 8-bit integers using signed saturation, and store the results in "dst". + + +dst[7:0] := Saturate_Int16_To_Int8 (a[15:0]) +dst[15:8] := Saturate_Int16_To_Int8 (a[31:16]) +dst[23:16] := Saturate_Int16_To_Int8 (a[47:32]) +dst[31:24] := Saturate_Int16_To_Int8 (a[63:48]) +dst[39:32] := Saturate_Int16_To_Int8 (a[79:64]) +dst[47:40] := Saturate_Int16_To_Int8 (a[95:80]) +dst[55:48] := Saturate_Int16_To_Int8 (a[111:96]) +dst[63:56] := Saturate_Int16_To_Int8 (a[127:112]) +dst[71:64] := Saturate_Int16_To_Int8 (b[15:0]) +dst[79:72] := Saturate_Int16_To_Int8 (b[31:16]) +dst[87:80] := Saturate_Int16_To_Int8 (b[47:32]) +dst[95:88] := Saturate_Int16_To_Int8 (b[63:48]) +dst[103:96] := Saturate_Int16_To_Int8 (b[79:64]) +dst[111:104] := Saturate_Int16_To_Int8 (b[95:80]) +dst[119:112] := Saturate_Int16_To_Int8 (b[111:96]) +dst[127:120] := Saturate_Int16_To_Int8 (b[127:112]) + + +
emmintrin.h
+
+ + Integer + SSE2 + Miscellaneous + + + Convert packed 32-bit integers from "a" and "b" to packed 16-bit integers using signed saturation, and store the results in "dst". + +dst[15:0] := Saturate_Int32_To_Int16 (a[31:0]) +dst[31:16] := Saturate_Int32_To_Int16 (a[63:32]) +dst[47:32] := Saturate_Int32_To_Int16 (a[95:64]) +dst[63:48] := Saturate_Int32_To_Int16 (a[127:96]) +dst[79:64] := Saturate_Int32_To_Int16 (b[31:0]) +dst[95:80] := Saturate_Int32_To_Int16 (b[63:32]) +dst[111:96] := Saturate_Int32_To_Int16 (b[95:64]) +dst[127:112] := Saturate_Int32_To_Int16 (b[127:96]) + + +
emmintrin.h
+
+ + Integer + SSE2 + Miscellaneous + + + Convert packed 16-bit integers from "a" and "b" to packed 8-bit integers using unsigned saturation, and store the results in "dst". + +dst[7:0] := Saturate_Int16_To_UnsignedInt8 (a[15:0]) +dst[15:8] := Saturate_Int16_To_UnsignedInt8 (a[31:16]) +dst[23:16] := Saturate_Int16_To_UnsignedInt8 (a[47:32]) +dst[31:24] := Saturate_Int16_To_UnsignedInt8 (a[63:48]) +dst[39:32] := Saturate_Int16_To_UnsignedInt8 (a[79:64]) +dst[47:40] := Saturate_Int16_To_UnsignedInt8 (a[95:80]) +dst[55:48] := Saturate_Int16_To_UnsignedInt8 (a[111:96]) +dst[63:56] := Saturate_Int16_To_UnsignedInt8 (a[127:112]) +dst[71:64] := Saturate_Int16_To_UnsignedInt8 (b[15:0]) +dst[79:72] := Saturate_Int16_To_UnsignedInt8 (b[31:16]) +dst[87:80] := Saturate_Int16_To_UnsignedInt8 (b[47:32]) +dst[95:88] := Saturate_Int16_To_UnsignedInt8 (b[63:48]) +dst[103:96] := Saturate_Int16_To_UnsignedInt8 (b[79:64]) +dst[111:104] := Saturate_Int16_To_UnsignedInt8 (b[95:80]) +dst[119:112] := Saturate_Int16_To_UnsignedInt8 (b[111:96]) +dst[127:120] := Saturate_Int16_To_UnsignedInt8 (b[127:112]) + + +
emmintrin.h
+
+ + Integer + SSE2 + Swizzle + + + Extract a 16-bit integer from "a", selected with "imm8", and store the result in the lower element of "dst". + +dst[15:0] := (a[127:0] >> (imm8[2:0] * 16))[15:0] +dst[31:16] := 0 + + +
emmintrin.h
+
+ + Integer + SSE2 + Swizzle + + + + Copy "a" to "dst", and insert the 16-bit integer "i" into "dst" at the location specified by "imm8". + +dst[127:0] := a[127:0] +sel := imm8[2:0]*16 +dst[sel+15:sel] := i[15:0] + + +
emmintrin.h
+
+ + Integer + SSE2 + Miscellaneous + + Create mask from the most significant bit of each 8-bit element in "a", and store the result in "dst". + +FOR j := 0 to 15 + i := j*8 + dst[j] := a[i+7] +ENDFOR +dst[MAX:16] := 0 + + +
emmintrin.h
+
+ + Integer + SSE2 + Swizzle + + + Shuffle 32-bit integers in "a" using the control in "imm8", and store the results in "dst". + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} + +dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +dst[95:64] := SELECT4(a[127:0], imm8[5:4]) +dst[127:96] := SELECT4(a[127:0], imm8[7:6]) + + +
emmintrin.h
+
+ + Integer + SSE2 + Swizzle + + + Shuffle 16-bit integers in the high 64 bits of "a" using the control in "imm8". Store the results in the high 64 bits of "dst", with the low 64 bits being copied from from "a" to "dst". + +dst[63:0] := a[63:0] +dst[79:64] := (a >> (imm8[1:0] * 16))[79:64] +dst[95:80] := (a >> (imm8[3:2] * 16))[79:64] +dst[111:96] := (a >> (imm8[5:4] * 16))[79:64] +dst[127:112] := (a >> (imm8[7:6] * 16))[79:64] + + +
emmintrin.h
+
+ + Integer + SSE2 + Swizzle + + + Shuffle 16-bit integers in the low 64 bits of "a" using the control in "imm8". Store the results in the low 64 bits of "dst", with the high 64 bits being copied from from "a" to "dst". + +dst[15:0] := (a >> (imm8[1:0] * 16))[15:0] +dst[31:16] := (a >> (imm8[3:2] * 16))[15:0] +dst[47:32] := (a >> (imm8[5:4] * 16))[15:0] +dst[63:48] := (a >> (imm8[7:6] * 16))[15:0] +dst[127:64] := a[127:64] + + +
emmintrin.h
+
+ + Integer + SSE2 + Swizzle + + + Unpack and interleave 8-bit integers from the high half of "a" and "b", and store the results in "dst". + +INTERLEAVE_HIGH_BYTES(src1[127:0], src2[127:0]){ + dst[7:0] := src1[71:64] + dst[15:8] := src2[71:64] + dst[23:16] := src1[79:72] + dst[31:24] := src2[79:72] + dst[39:32] := src1[87:80] + dst[47:40] := src2[87:80] + dst[55:48] := src1[95:88] + dst[63:56] := src2[95:88] + dst[71:64] := src1[103:96] + dst[79:72] := src2[103:96] + dst[87:80] := src1[111:104] + dst[95:88] := src2[111:104] + dst[103:96] := src1[119:112] + dst[111:104] := src2[119:112] + dst[119:112] := src1[127:120] + dst[127:120] := src2[127:120] + RETURN dst[127:0] +} + +dst[127:0] := INTERLEAVE_HIGH_BYTES(a[127:0], b[127:0]) + + +
emmintrin.h
+
+ + Integer + SSE2 + Swizzle + + + Unpack and interleave 16-bit integers from the high half of "a" and "b", and store the results in "dst". + +INTERLEAVE_HIGH_WORDS(src1[127:0], src2[127:0]){ + dst[15:0] := src1[79:64] + dst[31:16] := src2[79:64] + dst[47:32] := src1[95:80] + dst[63:48] := src2[95:80] + dst[79:64] := src1[111:96] + dst[95:80] := src2[111:96] + dst[111:96] := src1[127:112] + dst[127:112] := src2[127:112] + RETURN dst[127:0] +} + +dst[127:0] := INTERLEAVE_HIGH_WORDS(a[127:0], b[127:0]) + + +
emmintrin.h
+
+ + Integer + SSE2 + Swizzle + + + Unpack and interleave 32-bit integers from the high half of "a" and "b", and store the results in "dst". + +INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]){ + dst[31:0] := src1[95:64] + dst[63:32] := src2[95:64] + dst[95:64] := src1[127:96] + dst[127:96] := src2[127:96] + RETURN dst[127:0] +} + +dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) + + +
emmintrin.h
+
+ + Integer + SSE2 + Swizzle + + + Unpack and interleave 64-bit integers from the high half of "a" and "b", and store the results in "dst". + +INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]){ + dst[63:0] := src1[127:64] + dst[127:64] := src2[127:64] + RETURN dst[127:0] +} + +dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) + + +
emmintrin.h
+
+ + Integer + SSE2 + Swizzle + + + Unpack and interleave 8-bit integers from the low half of "a" and "b", and store the results in "dst". + +INTERLEAVE_BYTES(src1[127:0], src2[127:0]){ + dst[7:0] := src1[7:0] + dst[15:8] := src2[7:0] + dst[23:16] := src1[15:8] + dst[31:24] := src2[15:8] + dst[39:32] := src1[23:16] + dst[47:40] := src2[23:16] + dst[55:48] := src1[31:24] + dst[63:56] := src2[31:24] + dst[71:64] := src1[39:32] + dst[79:72] := src2[39:32] + dst[87:80] := src1[47:40] + dst[95:88] := src2[47:40] + dst[103:96] := src1[55:48] + dst[111:104] := src2[55:48] + dst[119:112] := src1[63:56] + dst[127:120] := src2[63:56] + RETURN dst[127:0] +} + +dst[127:0] := INTERLEAVE_BYTES(a[127:0], b[127:0]) + + +
emmintrin.h
+
+ + Integer + SSE2 + Swizzle + + + Unpack and interleave 16-bit integers from the low half of "a" and "b", and store the results in "dst". + +INTERLEAVE_WORDS(src1[127:0], src2[127:0]){ + dst[15:0] := src1[15:0] + dst[31:16] := src2[15:0] + dst[47:32] := src1[31:16] + dst[63:48] := src2[31:16] + dst[79:64] := src1[47:32] + dst[95:80] := src2[47:32] + dst[111:96] := src1[63:48] + dst[127:112] := src2[63:48] + RETURN dst[127:0] +} + +dst[127:0] := INTERLEAVE_WORDS(a[127:0], b[127:0]) + + +
emmintrin.h
+
+ + Integer + SSE2 + Swizzle + + + Unpack and interleave 32-bit integers from the low half of "a" and "b", and store the results in "dst". + +INTERLEAVE_DWORDS(src1[127:0], src2[127:0]){ + dst[31:0] := src1[31:0] + dst[63:32] := src2[31:0] + dst[95:64] := src1[63:32] + dst[127:96] := src2[63:32] + RETURN dst[127:0] +} + +dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) + + +
emmintrin.h
+
+ + Integer + SSE2 + Swizzle + + + Unpack and interleave 64-bit integers from the low half of "a" and "b", and store the results in "dst". + +INTERLEAVE_QWORDS(src1[127:0], src2[127:0]){ + dst[63:0] := src1[63:0] + dst[127:64] := src2[63:0] + RETURN dst[127:0] +} + +dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Arithmetic + + + Add the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := a[63:0] + b[63:0] +dst[127:64] := a[127:64] + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Arithmetic + + + Add packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := a[i+63:i] + b[i+63:i] +ENDFOR + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Arithmetic + + + Divide the lower double-precision (64-bit) floating-point element in "a" by the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := a[63:0] 0 b[63:0] +dst[127:64] := a[127:64] + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Arithmetic + + + Divide packed double-precision (64-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst". + +FOR j := 0 to 1 + i := 64*j + dst[i+63:i] := a[i+63:i] / b[i+63:i] +ENDFOR + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Special Math Functions + + + Compare the lower double-precision (64-bit) floating-point elements in "a" and "b", store the maximum value in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := MAX(a[63:0], b[63:0]) +dst[127:64] := a[127:64] + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Special Math Functions + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) +ENDFOR + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Special Math Functions + + + Compare the lower double-precision (64-bit) floating-point elements in "a" and "b", store the minimum value in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := MIN(a[63:0], b[63:0]) +dst[127:64] := a[127:64] + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Special Math Functions + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst". + + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) +ENDFOR + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Arithmetic + + + Multiply the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := a[63:0] * b[63:0] +dst[127:64] := a[127:64] + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Arithmetic + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst". + + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := a[i+63:i] * b[i+63:i] +ENDFOR + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Elementary Math Functions + + + Compute the square root of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := SQRT(b[63:0]) +dst[127:64] := a[127:64] + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Elementary Math Functions + + Compute the square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := SQRT(a[i+63:i]) +ENDFOR + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Arithmetic + + + Subtract the lower double-precision (64-bit) floating-point element in "b" from the lower double-precision (64-bit) floating-point element in "a", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := a[63:0] - b[63:0] +dst[127:64] := a[127:64] + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Arithmetic + + + Subtract packed double-precision (64-bit) floating-point elements in "b" from packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := a[i+63:i] - b[i+63:i] +ENDFOR + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Logical + + + Compute the bitwise AND of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := (a[i+63:i] AND b[i+63:i]) +ENDFOR + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Logical + + + Compute the bitwise NOT of packed double-precision (64-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i]) +ENDFOR + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Logical + + + Compute the bitwise OR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst". + + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := a[i+63:i] BITWISE OR b[i+63:i] +ENDFOR + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Logical + + + Compute the bitwise XOR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst". + + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := a[i+63:i] XOR b[i+63:i] +ENDFOR + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Compare + + + Compare the lower double-precision (64-bit) floating-point elements in "a" and "b" for equality, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := (a[63:0] == b[63:0]) ? 0xFFFFFFFFFFFFFFFF : 0 +dst[127:64] := a[127:64] + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Compare + + + Compare the lower double-precision (64-bit) floating-point elements in "a" and "b" for less-than, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := (a[63:0] < b[63:0]) ? 0xFFFFFFFFFFFFFFFF : 0 +dst[127:64] := a[127:64] + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Compare + + + Compare the lower double-precision (64-bit) floating-point elements in "a" and "b" for less-than-or-equal, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := (a[63:0] <= b[63:0]) ? 0xFFFFFFFFFFFFFFFF : 0 +dst[127:64] := a[127:64] + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Compare + + + Compare the lower double-precision (64-bit) floating-point elements in "a" and "b" for greater-than, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := (a[63:0] > b[63:0]) ? 0xFFFFFFFFFFFFFFFF : 0 +dst[127:64] := a[127:64] + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Compare + + + Compare the lower double-precision (64-bit) floating-point elements in "a" and "b" for greater-than-or-equal, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := (a[63:0] >= b[63:0]) ? 0xFFFFFFFFFFFFFFFF : 0 +dst[127:64] := a[127:64] + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Compare + + + Compare the lower double-precision (64-bit) floating-point elements in "a" and "b" to see if neither is NaN, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := (a[63:0] != NaN AND b[63:0] != NaN) ? 0xFFFFFFFFFFFFFFFF : 0 +dst[127:64] := a[127:64] + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Compare + + + Compare the lower double-precision (64-bit) floating-point elements in "a" and "b" to see if either is NaN, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := (a[63:0] == NaN OR b[63:0] == NaN) ? 0xFFFFFFFFFFFFFFFF : 0 +dst[127:64] := a[127:64] + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Compare + + + Compare the lower double-precision (64-bit) floating-point elements in "a" and "b" for not-equal, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := (a[63:0] != b[63:0]) ? 0xFFFFFFFFFFFFFFFF : 0 +dst[127:64] := a[127:64] + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Compare + + + Compare the lower double-precision (64-bit) floating-point elements in "a" and "b" for not-less-than, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := !(a[63:0] < b[63:0]) ? 0xFFFFFFFFFFFFFFFF : 0 +dst[127:64] := a[127:64] + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Compare + + + Compare the lower double-precision (64-bit) floating-point elements in "a" and "b" for not-less-than-or-equal, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := !(a[63:0] <= b[63:0]) ? 0xFFFFFFFFFFFFFFFF : 0 +dst[127:64] := a[127:64] + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Compare + + + Compare the lower double-precision (64-bit) floating-point elements in "a" and "b" for not-greater-than, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := !(a[63:0] > b[63:0]) ? 0xFFFFFFFFFFFFFFFF : 0 +dst[127:64] := a[127:64] + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Compare + + + Compare the lower double-precision (64-bit) floating-point elements in "a" and "b" for not-greater-than-or-equal, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := !(a[63:0] >= b[63:0]) ? 0xFFFFFFFFFFFFFFFF : 0 +dst[127:64] := a[127:64] + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Compare + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for equality, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := (a[i+63:i] == b[i+63:i]) ? 0xFFFFFFFFFFFFFFFF : 0 +ENDFOR + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Compare + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for less-than, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := (a[i+63:i] < b[i+63:i]) ? 0xFFFFFFFFFFFFFFFF : 0 +ENDFOR + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Compare + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for less-than-or-equal, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := (a[i+63:i] <= b[i+63:i]) ? 0xFFFFFFFFFFFFFFFF : 0 +ENDFOR + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Compare + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for greater-than, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := (a[i+63:i] > b[i+63:i]) ? 0xFFFFFFFFFFFFFFFF : 0 +ENDFOR + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Compare + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for greater-than-or-equal, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := (a[i+63:i] >= b[i+63:i]) ? 0xFFFFFFFFFFFFFFFF : 0 +ENDFOR + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Compare + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" to see if neither is NaN, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := (a[i+63:i] != NaN AND b[i+63:i] != NaN) ? 0xFFFFFFFFFFFFFFFF : 0 +ENDFOR + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Compare + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" to see if either is NaN, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := (a[i+63:i] == NaN OR b[i+63:i] == NaN) ? 0xFFFFFFFFFFFFFFFF : 0 +ENDFOR + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Compare + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for not-equal, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := (a[i+63:i] != b[i+63:i]) ? 0xFFFFFFFFFFFFFFFF : 0 +ENDFOR + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Compare + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for not-less-than, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := !(a[i+63:i] < b[i+63:i]) ? 0xFFFFFFFFFFFFFFFF : 0 +ENDFOR + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Compare + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for not-less-than-or-equal, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := !(a[i+63:i] <= b[i+63:i]) ? 0xFFFFFFFFFFFFFFFF : 0 +ENDFOR + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Compare + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for not-greater-than, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := !(a[i+63:i] > b[i+63:i]) ? 0xFFFFFFFFFFFFFFFF : 0 +ENDFOR + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Compare + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for not-greater-than-or-equal, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := !(a[i+63:i] >= b[i+63:i]) ? 0xFFFFFFFFFFFFFFFF : 0 +ENDFOR + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Compare + + + Compare the lower double-precision (64-bit) floating-point element in "a" and "b" for equality, and return the boolean result (0 or 1). + +RETURN ( a[63:0] == b[63:0] ) ? 1 : 0 + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Compare + + + Compare the lower double-precision (64-bit) floating-point element in "a" and "b" for less-than, and return the boolean result (0 or 1). + +RETURN ( a[63:0] < b[63:0] ) ? 1 : 0 + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Compare + + + Compare the lower double-precision (64-bit) floating-point element in "a" and "b" for less-than-or-equal, and return the boolean result (0 or 1). + +RETURN ( a[63:0] <= b[63:0] ) ? 1 : 0 + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Compare + + + Compare the lower double-precision (64-bit) floating-point element in "a" and "b" for greater-than, and return the boolean result (0 or 1). + +RETURN ( a[63:0] > b[63:0] ) ? 1 : 0 + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Compare + + + Compare the lower double-precision (64-bit) floating-point element in "a" and "b" for greater-than-or-equal, and return the boolean result (0 or 1). + +RETURN ( a[63:0] >= b[63:0] ) ? 1 : 0 + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Compare + + + Compare the lower double-precision (64-bit) floating-point element in "a" and "b" for not-equal, and return the boolean result (0 or 1). + +RETURN ( a[63:0] != b[63:0] ) ? 1 : 0 + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Compare + + + Compare the lower double-precision (64-bit) floating-point element in "a" and "b" for equality, and return the boolean result (0 or 1). This instruction will not signal an exception for QNaNs. + +RETURN ( a[63:0] == b[63:0] ) ? 1 : 0 + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Compare + + + Compare the lower double-precision (64-bit) floating-point element in "a" and "b" for less-than, and return the boolean result (0 or 1). This instruction will not signal an exception for QNaNs. + +RETURN ( a[63:0] < b[63:0] ) ? 1 : 0 + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Compare + + + Compare the lower double-precision (64-bit) floating-point element in "a" and "b" for less-than-or-equal, and return the boolean result (0 or 1). This instruction will not signal an exception for QNaNs. + +RETURN ( a[63:0] <= b[63:0] ) ? 1 : 0 + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Compare + + + Compare the lower double-precision (64-bit) floating-point element in "a" and "b" for greater-than, and return the boolean result (0 or 1). This instruction will not signal an exception for QNaNs. + +RETURN ( a[63:0] > b[63:0] ) ? 1 : 0 + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Compare + + + Compare the lower double-precision (64-bit) floating-point element in "a" and "b" for greater-than-or-equal, and return the boolean result (0 or 1). This instruction will not signal an exception for QNaNs. + +RETURN ( a[63:0] >= b[63:0] ) ? 1 : 0 + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Compare + + + Compare the lower double-precision (64-bit) floating-point element in "a" and "b" for not-equal, and return the boolean result (0 or 1). This instruction will not signal an exception for QNaNs. + +RETURN ( a[63:0] != b[63:0] ) ? 1 : 0 + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Convert + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 1 + i := 32*j + k := 64*j + dst[i+31:i] := Convert_FP64_To_FP32(a[k+63:k]) +ENDFOR + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Convert + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 1 + i := 64*j + k := 32*j + dst[i+63:i] := Convert_FP32_To_FP64(a[k+31:k]) +ENDFOR + + +
emmintrin.h
+
+ + Floating Point + Integer + SSE2 + Convert + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst". + +FOR j := 0 to 1 + i := 32*j + k := 64*j + dst[i+31:i] := Convert_FP64_To_Int32(a[k+63:k]) +ENDFOR + + +
emmintrin.h
+
+ + Floating Point + Integer + SSE2 + Convert + + Convert the lower double-precision (64-bit) floating-point element in "a" to a 32-bit integer, and store the result in "dst". + +dst[31:0] := Convert_FP64_To_Int32(a[63:0]) + + +
emmintrin.h
+
+ + Floating Point + Integer + SSE2 + Convert + + Convert the lower double-precision (64-bit) floating-point element in "a" to a 64-bit integer, and store the result in "dst". + +dst[63:0] := Convert_FP64_To_Int64(a[63:0]) + + +
emmintrin.h
+
+ + Floating Point + Integer + SSE2 + Convert + + Convert the lower double-precision (64-bit) floating-point element in "a" to a 64-bit integer, and store the result in "dst". + +dst[63:0] := Convert_FP64_To_Int64(a[63:0]) + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Convert + + + Convert the lower double-precision (64-bit) floating-point element in "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + + +dst[31:0] := Convert_FP64_To_FP32(b[63:0]) +dst[127:32] := a[127:31] +dst[MAX:64] := 0 + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Convert + + Copy the lower double-precision (64-bit) floating-point element of "a" to "dst". + dst[63:0] := a[63:0] + +
emmintrin.h
+
+ + Floating Point + SSE2 + Convert + + + Convert the lower single-precision (32-bit) floating-point element in "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + + +dst[63:0] := Convert_FP32_To_FP64(b[31:0]) +dst[127:64] := a[127:64] +dst[MAX:64] := 0 + + +
emmintrin.h
+
+ + Floating Point + Integer + SSE2 + Convert + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 1 + i := 32*j + k := 64*j + dst[i+31:i] := Convert_FP64_To_Int32_Truncate(a[k+63:k]) +ENDFOR + + +
emmintrin.h
+
+ + Floating Point + Integer + SSE2 + Convert + + Convert the lower double-precision (64-bit) floating-point element in "a" to a 32-bit integer with truncation, and store the result in "dst". + +dst[31:0] := Convert_FP64_To_Int32_Truncate(a[63:0]) + + +
emmintrin.h
+
+ + Floating Point + Integer + SSE2 + Convert + + Convert the lower double-precision (64-bit) floating-point element in "a" to a 64-bit integer with truncation, and store the result in "dst". + +dst[63:0] := Convert_FP64_To_Int64_Truncate(a[63:0]) + + +
emmintrin.h
+
+ + Floating Point + Integer + SSE2 + Convert + + Convert the lower double-precision (64-bit) floating-point element in "a" to a 64-bit integer with truncation, and store the result in "dst". + +dst[63:0] := Convert_FP64_To_Int64_Truncate(a[63:0]) + + +
emmintrin.h
+
+ + Floating Point + Integer + SSE2 + Convert + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst". + +FOR j := 0 to 3 + i := 32*j + dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i]) +ENDFOR + + +
emmintrin.h
+
+ + Floating Point + Integer + SSE2 + Convert + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 3 + i := 32*j + dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i]) +ENDFOR + + +
emmintrin.h
+
+ + Floating Point + Integer + SSE2 + Convert + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst". + +FOR j := 0 to 1 + i := 32*j + k := 64*j + dst[i+31:i] := Convert_FP64_To_Int32(a[k+63:k]) +ENDFOR + + +
emmintrin.h
+
+ + Floating Point + Integer + SSE2 + Convert + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 1 + i := 32*j + k := 64*j + dst[i+31:i] := Convert_FP64_To_Int32_Truncate(a[k+63:k]) +ENDFOR + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Set + + Copy double-precision (64-bit) floating-point element "a" to the lower element of "dst", and zero the upper element. + +dst[63:0] := a[63:0] +dst[127:64] := 0 + +
emmintrin.h
+
+ + Floating Point + SSE2 + Set + + Broadcast double-precision (64-bit) floating-point value "a" to all elements of "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := a[63:0] +ENDFOR + +
emmintrin.h
+
+ + Floating Point + SSE2 + Set + + Broadcast double-precision (64-bit) floating-point value "a" to all elements of "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := a[63:0] +ENDFOR + +
emmintrin.h
+
+ + Floating Point + SSE2 + Set + + + Set packed double-precision (64-bit) floating-point elements in "dst" with the supplied values. + +dst[63:0] := e0 +dst[127:64] := e1 + +
emmintrin.h
+
+ + Floating Point + SSE2 + Set + + + Set packed double-precision (64-bit) floating-point elements in "dst" with the supplied values in reverse order. + +dst[63:0] := e1 +dst[127:64] := e0 + +
emmintrin.h
+
+ + Floating Point + SSE2 + Set + + Return vector of type __m128d with all elements set to zero. + +dst[MAX:0] := 0 + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Load + + Load 128-bits (composed of 2 packed double-precision (64-bit) floating-point elements) from memory into "dst". + "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +dst[127:0] := MEM[mem_addr+127:mem_addr] + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Load + + Load a double-precision (64-bit) floating-point element from memory into both elements of "dst". + +dst[63:0] := MEM[mem_addr+63:mem_addr] +dst[127:64] := MEM[mem_addr+63:mem_addr] + + +
emmintrin.h
+
+ + + + + Floating Point + SSE2 + Load + + Load a double-precision (64-bit) floating-point element from memory into both elements of "dst". + +dst[63:0] := MEM[mem_addr+63:mem_addr] +dst[127:64] := MEM[mem_addr+63:mem_addr] + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Load + + Load 2 double-precision (64-bit) floating-point elements from memory into "dst" in reverse order. mem_addr must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +dst[63:0] := MEM[mem_addr+127:mem_addr+64] +dst[127:64] := MEM[mem_addr+63:mem_addr] + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Load + + Load 128-bits (composed of 2 packed double-precision (64-bit) floating-point elements) from memory into "dst". + "mem_addr" does not need to be aligned on any particular boundary. + +dst[127:0] := MEM[mem_addr+127:mem_addr] + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Load + + Load a double-precision (64-bit) floating-point element from memory into the lower of "dst", and zero the upper element. "mem_addr" does not need to be aligned on any particular boundary. + +dst[63:0] := MEM[mem_addr+63:mem_addr] +dst[127:64] := 0 + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Load + + + Load a double-precision (64-bit) floating-point element from memory into the upper element of "dst", and copy the lower element from "a" to "dst". "mem_addr" does not need to be aligned on any particular boundary. + +dst[63:0] := a[63:0] +dst[127:64] := MEM[mem_addr+63:mem_addr] + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Load + + + Load a double-precision (64-bit) floating-point element from memory into the lower element of "dst", and copy the upper element from "a" to "dst". "mem_addr" does not need to be aligned on any particular boundary. + +dst[63:0] := MEM[mem_addr+63:mem_addr] +dst[127:64] := a[127:64] + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Store + + + Store 128-bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "a" into memory using a non-temporal memory hint. + "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+127:mem_addr] := a[127:0] + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Store + + + Store the lower double-precision (64-bit) floating-point element from "a" into memory. "mem_addr" does not need to be aligned on any particular boundary. + +MEM[mem_addr+63:mem_addr] := a[63:0] + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Store + + + Store the lower double-precision (64-bit) floating-point element from "a" into 2 contiguous elements in memory. "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+63:mem_addr] := a[63:0] +MEM[mem_addr+127:mem_addr+64] := a[63:0] + +
emmintrin.h
+
+ + Floating Point + SSE2 + Store + + + Store the lower double-precision (64-bit) floating-point element from "a" into 2 contiguous elements in memory. "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+63:mem_addr] := a[63:0] +MEM[mem_addr+127:mem_addr+64] := a[63:0] + +
emmintrin.h
+
+ + Floating Point + SSE2 + Store + + + Store 128-bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "a" into memory. + "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+127:mem_addr] := a[127:0] + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Store + + + Store 128-bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "a" into memory. + "mem_addr" does not need to be aligned on any particular boundary. + +MEM[mem_addr+127:mem_addr] := a[127:0] + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Store + + + Store 2 double-precision (64-bit) floating-point elements from "a" into memory in reverse order. + "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+63:mem_addr] := a[127:64] +MEM[mem_addr+127:mem_addr+64] := a[63:0] + +
emmintrin.h
+
+ + Floating Point + SSE2 + Store + + + Store the upper double-precision (64-bit) floating-point element from "a" into memory. + +MEM[mem_addr+63:mem_addr] := a[127:64] + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Store + + + Store the lower double-precision (64-bit) floating-point element from "a" into memory. + +MEM[mem_addr+63:mem_addr] := a[63:0] + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Swizzle + + + Unpack and interleave double-precision (64-bit) floating-point elements from the high half of "a" and "b", and store the results in "dst". + +INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]){ + dst[63:0] := src1[127:64] + dst[127:64] := src2[127:64] + RETURN dst[127:0] +} + +dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Swizzle + + + Unpack and interleave double-precision (64-bit) floating-point elements from the low half of "a" and "b", and store the results in "dst". + +INTERLEAVE_QWORDS(src1[127:0], src2[127:0]){ + dst[63:0] := src1[63:0] + dst[127:64] := src2[63:0] + RETURN dst[127:0] +} + +dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Miscellaneous + + Set each bit of mask "dst" based on the most significant bit of the corresponding packed double-precision (64-bit) floating-point element in "a". + +FOR j := 0 to 1 + i := j*64 + IF a[i+63] + dst[j] := 1 + ELSE + dst[j] := 0 + FI +ENDFOR +dst[MAX:2] := 0 + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Swizzle + + + + Shuffle double-precision (64-bit) floating-point elements using the control in "imm8", and store the results in "dst". + +dst[63:0] := (imm8[0] == 0) ? a[63:0] : a[127:64] +dst[127:64] := (imm8[1] == 0) ? b[63:0] : b[127:64] + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Move + + + Move the lower double-precision (64-bit) floating-point element from "b" to the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := b[63:0] +dst[127:64] := a[127:64] + + +
emmintrin.h
+
+ + Floating Point + SSE2 + Cast + + Cast vector of type __m128d to type __m128. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. +
emmintrin.h
+
+ + Floating Point + Integer + SSE2 + Cast + + Cast vector of type __m128d to type __m128i. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. +
emmintrin.h
+
+ + Floating Point + SSE2 + Cast + + Cast vector of type __m128 to type __m128d. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. +
emmintrin.h
+
+ + Floating Point + Integer + SSE2 + Cast + + Cast vector of type __m128 to type __m128i. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. +
emmintrin.h
+
+ + Floating Point + SSE2 + Cast + + Cast vector of type __m128i to type __m128d. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. +
emmintrin.h
+
+ + Floating Point + SSE2 + Cast + + Cast vector of type __m128i to type __m128. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. +
emmintrin.h
+
+ + Floating Point + Convert + + Convert the half-precision (16-bit) floating-point value "a" to a single-precision (32-bit) floating-point value, and store the result in "dst". + +dst[31:0] := Convert_FP16_To_FP32(a[15:0]) + +
emmintrin.h
+
+ + Floating Point + Convert + + + Convert the single-precision (32-bit) floating-point value "a" to a half-precision (16-bit) floating-point value, and store the result in "dst". + +dst[15:0] := Convert_FP32_To_FP16(a[31:0]) + +
emmintrin.h
+
+ + Floating Point + FP16C + Convert + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + m := j*16 + dst[i+31:i] := Convert_FP16_To_FP32(a[m+15:m]) +ENDFOR +dst[MAX:128] := 0 + + +
emmintrin.h
+
+ + Floating Point + FP16C + Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst". + [round_note] + + +FOR j := 0 to 3 + i := 16*j + l := 32*j + dst[i+15:i] := Convert_FP32_To_FP16FP(a[l+31:l]) +ENDFOR +dst[MAX:128] := 0 + + +
emmintrin.h
+
+ + + Floating Point + SSE3 + Arithmetic + + + Alternatively add and subtract packed single-precision (32-bit) floating-point elements in "a" to/from packed elements in "b", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + IF (j is even) + dst[i+31:i] := a[i+31:i] - b[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + b[i+31:i] + FI +ENDFOR + + +
pmmintrin.h
+
+ + Floating Point + SSE3 + Arithmetic + + + Alternatively add and subtract packed double-precision (64-bit) floating-point elements in "a" to/from packed elements in "b", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + IF (j is even) + dst[i+63:i] := a[i+63:i] - b[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + b[i+63:i] + FI +ENDFOR + + +
pmmintrin.h
+
+ + Floating Point + SSE3 + Arithmetic + + + Horizontally add adjacent pairs of double-precision (64-bit) floating-point elements in "a" and "b", and pack the results in "dst". + +dst[63:0] := a[127:64] + a[63:0] +dst[127:64] := b[127:64] + b[63:0] + + +
pmmintrin.h
+
+ + Floating Point + SSE3 + Arithmetic + + + Horizontally add adjacent pairs of single-precision (32-bit) floating-point elements in "a" and "b", and pack the results in "dst". + +dst[31:0] := a[63:32] + a[31:0] +dst[63:32] := a[127:96] + a[95:64] +dst[95:64] := b[63:32] + b[31:0] +dst[127:96] := b[127:96] + b[95:64] + + +
pmmintrin.h
+
+ + Floating Point + SSE3 + Arithmetic + + + Horizontally subtract adjacent pairs of double-precision (64-bit) floating-point elements in "a" and "b", and pack the results in "dst". + +dst[63:0] := a[63:0] - a[127:64] +dst[127:64] := b[63:0] - b[127:64] + + +
pmmintrin.h
+
+ + Floating Point + SSE3 + Arithmetic + + + Horizontally add adjacent pairs of single-precision (32-bit) floating-point elements in "a" and "b", and pack the results in "dst". + +dst[31:0] := a[31:0] - a[63:32] +dst[63:32] := a[95:64] - a[127:96] +dst[95:64] := b[31:0] - b[63:32] +dst[127:96] := b[95:64] - b[127:96] + + +
pmmintrin.h
+
+ + Integer + SSE3 + Load + + Load 128-bits of integer data from unaligned memory into "dst". This intrinsic may perform better than "_mm_loadu_si128" when the data crosses a cache line boundary. + +dst[127:0] := MEM[mem_addr+127:mem_addr] + + +
pmmintrin.h
+
+ + MONITOR + General Support + + + + Arm address monitoring hardware using the address specified in "p". A store to an address within the specified address range triggers the monitoring hardware. Specify optional extensions in "extensions", and optional hints in "hints". + +
pmmintrin.h
+
+ + Floating Point + SSE3 + Move + + Duplicate the low double-precision (64-bit) floating-point element from "a", and store the results in "dst". + + +tmp[63:0] := a[63:0] +tmp[127:64] := a[63:0] + + +
pmmintrin.h
+
+ + Floating Point + SSE3 + Load + + Load a double-precision (64-bit) floating-point element from memory into both elements of "dst". + + +tmp[63:0] := MEM[mem_addr+63:mem_addr] +tmp[127:64] := MEM[mem_addr+63:mem_addr] + + +
pmmintrin.h
+
+ + Floating Point + SSE3 + Move + + Duplicate odd-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst". + + +dst[31:0] := a[63:32] +dst[63:32] := a[63:32] +dst[95:64] := a[127:96] +dst[127:96] := a[127:96] + + +
pmmintrin.h
+
+ + Floating Point + SSE3 + Move + + Duplicate even-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst". + + +dst[31:0] := a[31:0] +dst[63:32] := a[31:0] +dst[95:64] := a[95:64] +dst[127:96] := a[95:64] + + +
pmmintrin.h
+
+ + MONITOR + General Support + + + Hint to the processor that it can enter an implementation-dependent-optimized state while waiting for an event or store operation to the address range specified by MONITOR. + +
pmmintrin.h
+
+ + + Integer + SSSE3 + Special Math Functions + + Compute the absolute value of packed 8-bit integers in "a", and store the unsigned results in "dst". + +FOR j := 0 to 7 + i := j*8 + dst[i+7:i] := ABS(a[i+7:i]) +ENDFOR + + +
tmmintrin.h
+
+ + Integer + SSSE3 + Special Math Functions + + Compute the absolute value of packed 8-bit integers in "a", and store the unsigned results in "dst". + +FOR j := 0 to 15 + i := j*8 + dst[i+7:i] := ABS(a[i+7:i]) +ENDFOR + + +
tmmintrin.h
+
+ + Integer + SSSE3 + Special Math Functions + + Compute the absolute value of packed 16-bit integers in "a", and store the unsigned results in "dst". + +FOR j := 0 to 3 + i := j*16 + dst[i+15:i] := ABS(a[i+15:i]) +ENDFOR + + +
tmmintrin.h
+
+ + Integer + SSSE3 + Special Math Functions + + Compute the absolute value of packed 16-bit integers in "a", and store the unsigned results in "dst". + +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := ABS(a[i+15:i]) +ENDFOR + + +
tmmintrin.h
+
+ + Integer + SSSE3 + Special Math Functions + + Compute the absolute value of packed 32-bit integers in "a", and store the unsigned results in "dst". + +FOR j := 0 to 1 + i := j*32 + dst[i+31:i] := ABS(a[i+31:i]) +ENDFOR + + +
tmmintrin.h
+
+ + Integer + SSSE3 + Special Math Functions + + Compute the absolute value of packed 32-bit integers in "a", and store the unsigned results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ABS(a[i+31:i]) +ENDFOR + + +
tmmintrin.h
+
+ + Integer + SSSE3 + Swizzle + + + Shuffle packed 8-bit integers in "a" according to shuffle control mask in the corresponding 8-bit element of "b", and store the results in "dst". + +FOR j := 0 to 15 + i := j*8 + IF b[i+7] == 1 + dst[i+7:i] := 0 + ELSE + index[3:0] := b[i+3:i] + dst[i+7:i] := a[index*8+7:index*8] + FI +ENDFOR + + +
tmmintrin.h
+
+ + Integer + SSSE3 + Swizzle + + + Shuffle packed 8-bit integers in "a" according to shuffle control mask in the corresponding 8-bit element of "b", and store the results in "dst". + +FOR j := 0 to 7 + i := j*8 + IF b[i+7] == 1 + dst[i+7:i] := 0 + ELSE + index[2:0] := b[i+2:i] + dst[i+7:i] := a[index*8+7:index*8] + FI +ENDFOR + + +
tmmintrin.h
+
+ + Integer + SSSE3 + Miscellaneous + + + + Concatenate 16-byte blocks in "a" and "b" into a 32-byte temporary result, shift the result right by "count" bytes, and store the low 16 bytes in "dst". + +tmp[255:0] := ((a[127:0] << 128) OR b[127:0]) >> (count[7:0]*8) +dst[127:0] := tmp[127:0] + + +
tmmintrin.h
+
+ + Integer + SSSE3 + Miscellaneous + + + + Concatenate 8-byte blocks in "a" and "b" into a 16-byte temporary result, shift the result right by "count" bytes, and store the low 16 bytes in "dst". + +tmp[127:0] := ((a[63:0] << 64) OR b[63:0]) >> (count[7:0]*8) +dst[63:0] := tmp[63:0] + + +
tmmintrin.h
+
+ + Integer + SSSE3 + Arithmetic + + + Horizontally add adjacent pairs of 16-bit integers in "a" and "b", and pack the signed 16-bit results in "dst". + +dst[15:0] := a[31:16] + a[15:0] +dst[31:16] := a[63:48] + a[47:32] +dst[47:32] := a[95:80] + a[79:64] +dst[63:48] := a[127:112] + a[111:96] +dst[79:64] := b[31:16] + b[15:0] +dst[95:80] := b[63:48] + b[47:32] +dst[111:96] := b[95:80] + b[79:64] +dst[127:112] := b[127:112] + b[111:96] + + +
tmmintrin.h
+
+ + Integer + SSSE3 + Arithmetic + + + Horizontally add adjacent pairs of 16-bit integers in "a" and "b" using saturation, and pack the signed 16-bit results in "dst". + +dst[15:0]= Saturate_To_Int16(a[31:16] + a[15:0]) +dst[31:16] = Saturate_To_Int16(a[63:48] + a[47:32]) +dst[47:32] = Saturate_To_Int16(a[95:80] + a[79:64]) +dst[63:48] = Saturate_To_Int16(a[127:112] + a[111:96]) +dst[79:64] = Saturate_To_Int16(b[31:16] + b[15:0]) +dst[95:80] = Saturate_To_Int16(b[63:48] + b[47:32]) +dst[111:96] = Saturate_To_Int16(b[95:80] + b[79:64]) +dst[127:112] = Saturate_To_Int16(b[127:112] + b[111:96]) + + +
tmmintrin.h
+
+ + Integer + SSSE3 + Arithmetic + + + Horizontally add adjacent pairs of 32-bit integers in "a" and "b", and pack the signed 32-bit results in "dst". + +dst[31:0] := a[63:32] + a[31:0] +dst[63:32] := a[127:96] + a[95:64] +dst[95:64] := b[63:32] + b[31:0] +dst[127:96] := b[127:96] + b[95:64] + + +
tmmintrin.h
+
+ + Integer + SSSE3 + Arithmetic + + + Horizontally add adjacent pairs of 16-bit integers in "a" and "b", and pack the signed 16-bit results in "dst". + +dst[15:0] := a[31:16] + a[15:0] +dst[31:16] := a[63:48] + a[47:32] +dst[47:32] := b[31:16] + b[15:0] +dst[63:48] := b[63:48] + b[47:32] + + +
tmmintrin.h
+
+ + Integer + SSSE3 + Arithmetic + + + Horizontally add adjacent pairs of 32-bit integers in "a" and "b", and pack the signed 32-bit results in "dst". + +dst[31:0] := a[63:32] + a[31:0] +dst[63:32] := b[63:32] + b[31:0] + + +
tmmintrin.h
+
+ + Integer + SSSE3 + Arithmetic + + + Horizontally add adjacent pairs of 16-bit integers in "a" and "b" using saturation, and pack the signed 16-bit results in "dst". + +dst[15:0]= Saturate_To_Int16(a[31:16] + a[15:0]) +dst[31:16] = Saturate_To_Int16(a[63:48] + a[47:32]) +dst[47:32] = Saturate_To_Int16(b[31:16] + b[15:0]) +dst[63:48] = Saturate_To_Int16(b[63:48] + b[47:32]) + + +
tmmintrin.h
+
+ + Integer + SSSE3 + Arithmetic + + + Horizontally subtract adjacent pairs of 16-bit integers in "a" and "b", and pack the signed 16-bit results in "dst". + +dst[15:0] := a[15:0] - a[31:16] +dst[31:16] := a[47:32] - a[63:48] +dst[47:32] := a[79:64] - a[95:80] +dst[63:48] := a[111:96] - a[127:112] +dst[79:64] := b[15:0] - b[31:16] +dst[95:80] := b[47:32] - b[63:48] +dst[111:96] := b[79:64] - b[95:80] +dst[127:112] := b[111:96] - b[127:112] + + +
tmmintrin.h
+
+ + Integer + SSSE3 + Arithmetic + + + Horizontally subtract adjacent pairs of 16-bit integers in "a" and "b" using saturation, and pack the signed 16-bit results in "dst". + +dst[15:0]= Saturate_To_Int16(a[15:0] - a[31:16]) +dst[31:16] = Saturate_To_Int16(a[47:32] - a[63:48]) +dst[47:32] = Saturate_To_Int16(a[79:64] - a[95:80]) +dst[63:48] = Saturate_To_Int16(a[111:96] - a[127:112]) +dst[79:64] = Saturate_To_Int16(b[15:0] - b[31:16]) +dst[95:80] = Saturate_To_Int16(b[47:32] - b[63:48]) +dst[111:96] = Saturate_To_Int16(b[79:64] - b[95:80]) +dst[127:112] = Saturate_To_Int16(b[111:96] - b[127:112]) + + +
tmmintrin.h
+
+ + Integer + SSSE3 + Arithmetic + + + Horizontally subtract adjacent pairs of 32-bit integers in "a" and "b", and pack the signed 32-bit results in "dst". + +dst[31:0] := a[31:0] - a[63:32] +dst[63:32] := a[95:64] - a[127:96] +dst[95:64] := b[31:0] - b[63:32] +dst[127:96] := b[95:64] - b[127:96] + + +
tmmintrin.h
+
+ + Integer + SSSE3 + Arithmetic + + + Horizontally subtract adjacent pairs of 16-bit integers in "a" and "b", and pack the signed 16-bit results in "dst". + +dst[15:0] := a[15:0] - a[31:16] +dst[31:16] := a[47:32] - a[63:48] +dst[47:32] := b[15:0] - b[31:16] +dst[63:48] := b[47:32] - b[63:48] + + +
tmmintrin.h
+
+ + Integer + SSSE3 + Arithmetic + + + Horizontally subtract adjacent pairs of 32-bit integers in "a" and "b", and pack the signed 32-bit results in "dst". + +dst[31:0] := a[31:0] - a[63:32] +dst[63:32] := b[31:0] - b[63:32] + + +
tmmintrin.h
+
+ + Integer + SSSE3 + Arithmetic + + + Horizontally subtract adjacent pairs of 16-bit integers in "a" and "b" using saturation, and pack the signed 16-bit results in "dst". + +dst[15:0]= Saturate_To_Int16(a[15:0] - a[31:16]) +dst[31:16] = Saturate_To_Int16(a[47:32] - a[63:48]) +dst[47:32] = Saturate_To_Int16(b[15:0] - b[31:16]) +dst[63:48] = Saturate_To_Int16(b[47:32] - b[63:48]) + + +
tmmintrin.h
+
+ + Integer + SSSE3 + Arithmetic + + + Vertically multiply each unsigned 8-bit integer from "a" with the corresponding signed 8-bit integer from "b", producing intermediate signed 16-bit integers. Horizontally add adjacent pairs of intermediate signed 16-bit integers, and pack the saturated results in "dst". + + +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := Saturate_To_Int16( a[i+15:i+8]*b[i+15:i+8] + a[i+7:i]*b[i+7:i] ) +ENDFOR + + +
tmmintrin.h
+
+ + Integer + SSSE3 + Arithmetic + + + Vertically multiply each unsigned 8-bit integer from "a" with the corresponding signed 8-bit integer from "b", producing intermediate signed 16-bit integers. Horizontally add adjacent pairs of intermediate signed 16-bit integers, and pack the saturated results in "dst". + + +FOR j := 0 to 3 + i := j*16 + dst[i+15:i] := Saturate_To_Int16( a[i+15:i+8]*b[i+15:i+8] + a[i+7:i]*b[i+7:i] ) +ENDFOR + + +
tmmintrin.h
+
+ + Integer + SSSE3 + Arithmetic + + + Multiply packed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Truncate each intermediate integer to the 18 most significant bits, round by adding 1, and store bits [16:1] to "dst". + +FOR j := 0 to 7 + i := j*16 + tmp[31:0] := ((a[i+15:i] * b[i+15:i]) >> 14) + 1 + dst[i+15:i] := tmp[16:1] +ENDFOR + + +
tmmintrin.h
+
+ + Integer + SSSE3 + Arithmetic + + + Multiply packed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Truncate each intermediate integer to the 18 most significant bits, round by adding 1, and store bits [16:1] to "dst". + +FOR j := 0 to 3 + i := j*16 + tmp[31:0] := ((a[i+15:i] * b[i+15:i]) >> 14) + 1 + dst[i+15:i] := tmp[16:1] +ENDFOR + + +
tmmintrin.h
+
+ + Integer + SSSE3 + Arithmetic + + + Negate packed 8-bit integers in "a" when the corresponding signed 8-bit integer in "b" is negative, and store the results in "dst". Element in "dst" are zeroed out when the corresponding element in "b" is zero. + +FOR j := 0 to 15 + i := j*8 + IF b[i+7:i] < 0 + dst[i+7:i] := NEG(a[i+7:i]) + ELSE IF b[i+7:i] = 0 + dst[i+7:i] := 0 + ELSE + dst[i+7:i] := a[i+7:i] + FI +ENDFOR + + +
tmmintrin.h
+
+ + Integer + SSSE3 + Arithmetic + + + Negate packed 16-bit integers in "a" when the corresponding signed 16-bit integer in "b" is negative, and store the results in "dst". Element in "dst" are zeroed out when the corresponding element in "b" is zero. + +FOR j := 0 to 7 + i := j*16 + IF b[i+15:i] < 0 + dst[i+15:i] := NEG(a[i+15:i]) + ELSE IF b[i+15:i] = 0 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := a[i+15:i] + FI +ENDFOR + + +
tmmintrin.h
+
+ + Integer + SSSE3 + Arithmetic + + + Negate packed 32-bit integers in "a" when the corresponding signed 32-bit integer in "b" is negative, and store the results in "dst". Element in "dst" are zeroed out when the corresponding element in "b" is zero. + +FOR j := 0 to 3 + i := j*32 + IF b[i+31:i] < 0 + dst[i+31:i] := NEG(a[i+31:i]) + ELSE IF b[i+31:i] = 0 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR + + +
tmmintrin.h
+
+ + Integer + SSSE3 + Arithmetic + + + Negate packed 8-bit integers in "a" when the corresponding signed 8-bit integer in "b" is negative, and store the results in "dst". Element in "dst" are zeroed out when the corresponding element in "b" is zero. + +FOR j := 0 to 7 + i := j*8 + IF b[i+7:i] < 0 + dst[i+7:i] := NEG(a[i+7:i]) + ELSE IF b[i+7:i] = 0 + dst[i+7:i] := 0 + ELSE + dst[i+7:i] := a[i+7:i] + FI +ENDFOR + + +
tmmintrin.h
+
+ + Integer + SSSE3 + Arithmetic + + + Negate packed 16-bit integers in "a" when the corresponding signed 16-bit integer in "b" is negative, and store the results in "dst". Element in "dst" are zeroed out when the corresponding element in "b" is zero. + +FOR j := 0 to 3 + i := j*16 + IF b[i+15:i] < 0 + dst[i+15:i] := NEG(a[i+15:i]) + ELSE IF b[i+15:i] = 0 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := a[i+15:i] + FI +ENDFOR + + +
tmmintrin.h
+
+ + Integer + SSSE3 + Arithmetic + + + Negate packed 32-bit integers in "a" when the corresponding signed 32-bit integer in "b" is negative, and store the results in "dst". Element in "dst" are zeroed out when the corresponding element in "b" is zero. + +FOR j := 0 to 1 + i := j*32 + IF b[i+31:i] < 0 + dst[i+31:i] := NEG(a[i+31:i]) + ELSE IF b[i+31:i] = 0 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR + + +
tmmintrin.h
+
+ + + Floating Point + SSE4.1 + Swizzle + + + + Blend packed double-precision (64-bit) floating-point elements from "a" and "b" using control mask "imm8", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + IF imm8[j%8] + dst[i+63:i] := b[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR + + +
smmintrin.h
+
+ + Floating Point + SSE4.1 + Swizzle + + + + Blend packed single-precision (32-bit) floating-point elements from "a" and "b" using control mask "imm8", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + IF imm8[j%8] + dst[i+31:i] := b[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR + + +
smmintrin.h
+
+ + Floating Point + SSE4.1 + Swizzle + + + + Blend packed double-precision (64-bit) floating-point elements from "a" and "b" using "mask", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + IF mask[i+63] + dst[i+63:i] := b[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR + + +
smmintrin.h
+
+ + Floating Point + SSE4.1 + Swizzle + + + + Blend packed single-precision (32-bit) floating-point elements from "a" and "b" using "mask", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + IF mask[i+31] + dst[i+31:i] := b[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR + + +
smmintrin.h
+
+ + Integer + SSE4.1 + Swizzle + + + + Blend packed 8-bit integers from "a" and "b" using "mask", and store the results in "dst". + +FOR j := 0 to 15 + i := j*8 + IF mask[i+7] + dst[i+7:i] := b[i+7:i] + ELSE + dst[i+7:i] := a[i+7:i] + FI +ENDFOR + + +
smmintrin.h
+
+ + Integer + SSE4.1 + Swizzle + + + + Blend packed 16-bit integers from "a" and "b" using control mask "imm8", and store the results in "dst". + +FOR j := 0 to 7 + i := j*16 + IF imm8[j%8] + dst[i+15:i] := b[i+15:i] + ELSE + dst[i+15:i] := a[i+15:i] + FI +ENDFOR + + +
smmintrin.h
+
+ + Floating Point + SSE4.1 + Arithmetic + + + + Conditionally multiply the packed double-precision (64-bit) floating-point elements in "a" and "b" using the high 4 bits in "imm8", sum the four products, and conditionally store the sum in "dst" using the low 4 bits of "imm8". + +DP(a[127:0], b[127:0], imm8[7:0]) { + FOR j := 0 to 1 + i := j*64 + IF imm8[(4+j)%8]] + temp[i+63:i] := a[i+63:i] * b[i+63:i] + ELSE + temp[i+63:i] := 0 + FI + ENDFOR + + sum[63:0] := temp[127:64] + temp[63:0] + + FOR j := 0 to 1 + i := j*64 + IF imm8[j%8] + tmpdst[i+63:i] := sum[63:0] + ELSE + tmpdst[i+63:i] := 0 + FI + ENDFOR + RETURN tmpdst[127:0] +} + +dst[127:0] := DP(a[127:0], b[127:0], imm8[7:0]) + + +
smmintrin.h
+
+ + Floating Point + SSE4.1 + Arithmetic + + + + Conditionally multiply the packed single-precision (32-bit) floating-point elements in "a" and "b" using the high 4 bits in "imm8", sum the four products, and conditionally store the sum in "dst" using the low 4 bits of "imm8". + +DP(a[127:0], b[127:0], imm8[7:0]) { + FOR j := 0 to 3 + i := j*32 + IF imm8[(4+j)%8] + temp[i+31:i] := a[i+31:i] * b[i+31:i] + ELSE + temp[i+31:i] := 0 + FI + ENDFOR + + sum[31:0] := (temp[127:96] + temp[95:64]) + (temp[63:32] + temp[31:0]) + + FOR j := 0 to 3 + i := j*32 + IF imm8[j%8] + tmpdst[i+31:i] := sum[31:0] + ELSE + tmpdst[i+31:i] := 0 + FI + ENDFOR + RETURN tmpdst[127:0] +} + +dst[127:0] := DP(a[127:0], b[127:0], imm8[7:0]) + + +
smmintrin.h
+
+ + Floating Point + SSE4.1 + Swizzle + + + Extract a single-precision (32-bit) floating-point element from "a", selected with "imm8", and store the result in "dst". + +dst[31:0] := (a[127:0] >> (imm8[1:0] * 32))[31:0] + + +
smmintrin.h
+
+ + Integer + SSE4.1 + Swizzle + + + Extract an 8-bit integer from "a", selected with "imm8", and store the result in the lower element of "dst". + +dst[7:0] := (a[127:0] >> (imm8[3:0] * 8))[7:0] +dst[31:8] := 0 + + +
smmintrin.h
+
+ + Integer + SSE4.1 + Swizzle + + + Extract a 32-bit integer from "a", selected with "imm8", and store the result in "dst". + +dst[31:0] := (a[127:0] >> (imm8[1:0] * 32))[31:0] + + +
smmintrin.h
+
+ + Integer + SSE4.1 + Swizzle + + + Extract a 64-bit integer from "a", selected with "imm8", and store the result in "dst". + +dst[63:0] := (a[127:0] >> (imm8[0] * 64))[63:0] + + +
smmintrin.h
+
+ + Floating Point + SSE4.1 + Swizzle + + + + Copy "a" to "tmp", then insert a single-precision (32-bit) floating-point element from "b" into "tmp" using the control in "imm8". Store "tmp" to "dst" using the mask in "imm8" (elements are zeroed out when the corresponding bit is set). + +tmp2[127:0] := a[127:0] +CASE (imm8[7:6]) of +0: tmp1[31:0] := b[31:0] +1: tmp1[31:0] := b[63:32] +2: tmp1[31:0] := b[95:64] +3: tmp1[31:0] := b[127:96] +ESAC +CASE (imm8[5:4]) of +0: tmp2[31:0] := tmp1[31:0] +1: tmp2[63:32] := tmp1[31:0] +2: tmp2[95:64] := tmp1[31:0] +3: tmp2[127:96] := tmp1[31:0] +ESAC +FOR j := 0 to 3 + i := j*32 + IF imm8[j%8] + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := tmp2[i+31:i] + FI +ENDFOR + + +
smmintrin.h
+
+ + Integer + SSE4.1 + Swizzle + + + + Copy "a" to "dst", and insert the lower 8-bit integer from "i" into "dst" at the location specified by "imm8". + +dst[127:0] := a[127:0] +sel := imm8[3:0]*8 +dst[sel+7:sel] := i[7:0] + + +
smmintrin.h
+
+ + Integer + SSE4.1 + Swizzle + + + + Copy "a" to "dst", and insert the 32-bit integer "i" into "dst" at the location specified by "imm8". + +dst[127:0] := a[127:0] +sel := imm8[1:0]*32 +dst[sel+31:sel] := i[31:0] + + +
smmintrin.h
+
+ + Integer + SSE4.1 + Swizzle + + + + Copy "a" to "dst", and insert the 64-bit integer "i" into "dst" at the location specified by "imm8". + +dst[127:0] := a[127:0] +sel := imm8[0]*64 +dst[sel+63:sel] := i[63:0] + + +
smmintrin.h
+
+ + Integer + SSE4.1 + Special Math Functions + + + Compare packed 8-bit integers in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 15 + i := j*8 + IF a[i+7:i] > b[i+7:i] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := b[i+7:i] + FI +ENDFOR + + +
smmintrin.h
+
+ + Integer + SSE4.1 + Special Math Functions + + + Compare packed 32-bit integers in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 3 + i := j*32 + IF a[i+31:i] > b[i+31:i] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := b[i+31:i] + FI +ENDFOR + + +
smmintrin.h
+
+ + Integer + SSE4.1 + Special Math Functions + + + Compare packed unsigned 32-bit integers in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 3 + i := j*32 + IF a[i+31:i] > b[i+31:i] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := b[i+31:i] + FI +ENDFOR + + +
smmintrin.h
+
+ + Integer + SSE4.1 + Special Math Functions + + + Compare packed unsigned 16-bit integers in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 7 + i := j*16 + IF a[i+15:i] > b[i+15:i] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := b[i+15:i] + FI +ENDFOR + + +
smmintrin.h
+
+ + Integer + SSE4.1 + Special Math Functions + + + Compare packed 8-bit integers in "a" and "b", and store packed minimum values in "dst". + +FOR j := 0 to 15 + i := j*8 + IF a[i+7:i] < b[i+7:i] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := b[i+7:i] + FI +ENDFOR + + +
smmintrin.h
+
+ + Integer + SSE4.1 + Special Math Functions + + + Compare packed 32-bit integers in "a" and "b", and store packed minimum values in "dst". + +FOR j := 0 to 3 + i := j*32 + IF a[i+31:i] < b[i+31:i] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := b[i+31:i] + FI +ENDFOR + + +
smmintrin.h
+
+ + Integer + SSE4.1 + Special Math Functions + + + Compare packed unsigned 32-bit integers in "a" and "b", and store packed minimum values in "dst". + +FOR j := 0 to 3 + i := j*32 + IF a[i+31:i] < b[i+31:i] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := b[i+31:i] + FI +ENDFOR + + +
smmintrin.h
+
+ + Integer + SSE4.1 + Special Math Functions + + + Compare packed unsigned 16-bit integers in "a" and "b", and store packed minimum values in "dst". + +FOR j := 0 to 7 + i := j*16 + IF a[i+15:i] < b[i+15:i] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := b[i+15:i] + FI +ENDFOR + + +
smmintrin.h
+
+ + Integer + SSE4.1 + Convert + Miscellaneous + + + Convert packed 32-bit integers from "a" and "b" to packed 16-bit integers using unsigned saturation, and store the results in "dst". + +dst[15:0] := Saturate_Int32_To_UnsignedInt16 (a[31:0]) +dst[31:16] := Saturate_Int32_To_UnsignedInt16 (a[63:32]) +dst[47:32] := Saturate_Int32_To_UnsignedInt16 (a[95:64]) +dst[63:48] := Saturate_Int32_To_UnsignedInt16 (a[127:96]) +dst[79:64] := Saturate_Int32_To_UnsignedInt16 (b[31:0]) +dst[95:80] := Saturate_Int32_To_UnsignedInt16 (b[63:32]) +dst[111:96] := Saturate_Int32_To_UnsignedInt16 (b[95:64]) +dst[127:112] := Saturate_Int32_To_UnsignedInt16 (b[127:96]) + + +
smmintrin.h
+
+ + Integer + SSE4.1 + Compare + + + Compare packed 64-bit integers in "a" and "b" for equality, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := ( a[i+63:i] == b[i+63:i] ) ? 0xFFFFFFFFFFFFFFFF : 0 +ENDFOR + + +
smmintrin.h
+
+ + Integer + SSE4.1 + Convert + + Sign extend packed 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst". + +FOR j := 0 to 7 + i := j*8 + l := j*16 + dst[l+15:l] := SignExtend(a[i+7:i]) +ENDFOR + + +
smmintrin.h
+
+ + Integer + SSE4.1 + Convert + + Sign extend packed 8-bit integers in "a" to packed 32-bit integers, and store the results in "dst". + +FOR j := 0 to 3 + i := 32*j + k := 8*j + dst[i+31:i] := SignExtend(a[k+7:k]) +ENDFOR + + +
smmintrin.h
+
+ + Integer + SSE4.1 + Convert + + Sign extend packed 8-bit integers in the low 8 bytes of "a" to packed 64-bit integers, and store the results in "dst". + +FOR j := 0 to 1 + i := 64*j + k := 8*j + dst[i+63:i] := SignExtend(a[k+7:k]) +ENDFOR + + +
smmintrin.h
+
+ + Integer + SSE4.1 + Convert + + Sign extend packed 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst". + +FOR j := 0 to 3 + i := 32*j + k := 16*j + dst[i+31:i] := SignExtend(a[k+15:k]) +ENDFOR + + +
smmintrin.h
+
+ + Integer + SSE4.1 + Convert + + Sign extend packed 16-bit integers in "a" to packed 64-bit integers, and store the results in "dst". + +FOR j := 0 to 1 + i := 64*j + k := 16*j + dst[i+63:i] := SignExtend(a[k+15:k]) +ENDFOR + + +
smmintrin.h
+
+ + Integer + SSE4.1 + Convert + + Sign extend packed 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst". + +FOR j := 0 to 1 + i := 64*j + k := 32*j + dst[i+63:i] := SignExtend(a[k+31:k]) +ENDFOR + + +
smmintrin.h
+
+ + Integer + SSE4.1 + Convert + + Zero extend packed unsigned 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst". + +FOR j := 0 to 7 + i := j*8 + l := j*16 + dst[l+15:l] := ZeroExtend(a[i+7:i]) +ENDFOR + + +
smmintrin.h
+
+ + Integer + SSE4.1 + Convert + + Zero extend packed unsigned 8-bit integers in "a" to packed 32-bit integers, and store the results in "dst". + +FOR j := 0 to 3 + i := 32*j + k := 8*j + dst[i+31:i] := ZeroExtend(a[k+7:k]) +ENDFOR + + +
smmintrin.h
+
+ + Integer + SSE4.1 + Convert + + Zero extend packed unsigned 8-bit integers in the low 8 byte sof "a" to packed 64-bit integers, and store the results in "dst". + +FOR j := 0 to 1 + i := 64*j + k := 8*j + dst[i+63:i] := ZeroExtend(a[k+7:k]) +ENDFOR + + +
smmintrin.h
+
+ + Integer + SSE4.1 + Convert + + Zero extend packed unsigned 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst". + +FOR j := 0 to 3 + i := 32*j + k := 16*j + dst[i+31:i] := ZeroExtend(a[k+15:k]) +ENDFOR + + +
smmintrin.h
+
+ + Integer + SSE4.1 + Convert + + Zero extend packed unsigned 16-bit integers in "a" to packed 64-bit integers, and store the results in "dst". + +FOR j := 0 to 1 + i := 64*j + k := 16*j + dst[i+63:i] := ZeroExtend(a[k+15:k]) +ENDFOR + + +
smmintrin.h
+
+ + Integer + SSE4.1 + Convert + + Zero extend packed unsigned 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst". + +FOR j := 0 to 1 + i := 64*j + k := 32*j + dst[i+63:i] := ZeroExtend(a[k+31:k]) +ENDFOR + + +
smmintrin.h
+
+ + Integer + SSE4.1 + Arithmetic + + + Multiply the low 32-bit integers from each packed 64-bit element in "a" and "b", and store the signed 64-bit results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := a[i+31:i] * b[i+31:i] +ENDFOR + + +
smmintrin.h
+
+ + Integer + SSE4.1 + Arithmetic + + + Multiply the packed 32-bit integers in "a" and "b", producing intermediate 64-bit integers, and store the low 32 bits of the intermediate integers in "dst". + +FOR j := 0 to 3 + i := j*32 + tmp[63:0] := a[i+31:i] * b[i+31:i] + dst[i+31:i] := tmp[31:0] +ENDFOR + + +
smmintrin.h
+
+ + Integer + SSE4.1 + Logical + + + Compute the bitwise AND of 128 bits (representing integer data) in "a" and "b", and set "ZF" to 1 if the result is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", and set "CF" to 1 if the result is zero, otherwise set "CF" to 0. Return the "ZF" value. + +IF (a[127:0] AND b[127:0] == 0) + ZF := 1 +ELSE + ZF := 0 +FI +IF ((NOT a[127:0]) AND b[127:0] == 0) + CF := 1 +ELSE + CF := 0 +FI +RETURN ZF + + +
smmintrin.h
+
+ + Integer + SSE4.1 + Logical + + + Compute the bitwise AND of 128 bits (representing integer data) in "a" and "b", and set "ZF" to 1 if the result is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", and set "CF" to 1 if the result is zero, otherwise set "CF" to 0. Return the "CF" value. + +IF (a[127:0] AND b[127:0] == 0) + ZF := 1 +ELSE + ZF := 0 +FI +IF ((NOT a[127:0]) AND b[127:0] == 0) + CF := 1 +ELSE + CF := 0 +FI +RETURN CF + + +
smmintrin.h
+
+ + Integer + SSE4.1 + Logical + + + Compute the bitwise AND of 128 bits (representing integer data) in "a" and "b", and set "ZF" to 1 if the result is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", and set "CF" to 1 if the result is zero, otherwise set "CF" to 0. Return 1 if both the "ZF" and "CF" values are zero, otherwise return 0. + +IF (a[127:0] AND b[127:0] == 0) + ZF := 1 +ELSE + ZF := 0 +FI +IF ((NOT a[127:0]) AND b[127:0] == 0) + CF := 1 +ELSE + CF := 0 +FI +IF (ZF == 0 && CF == 0) + RETURN 1 +ELSE + RETURN 0 +FI + + +
smmintrin.h
+
+ + Integer + SSE4.1 + Logical + + + Compute the bitwise AND of 128 bits (representing integer data) in "a" and "mask", and return 1 if the result is zero, otherwise return 0. + +IF (a[127:0] AND mask[127:0] == 0) + ZF := 1 +ELSE + ZF := 0 +FI +RETURN ZF + + +
smmintrin.h
+
+ + Integer + SSE4.1 + Logical + + + Compute the bitwise AND of 128 bits (representing integer data) in "a" and "mask", and set "ZF" to 1 if the result is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "mask", and set "CF" to 1 if the result is zero, otherwise set "CF" to 0. Return 1 if both the "ZF" and "CF" values are zero, otherwise return 0. + +IF (a[127:0] AND mask[127:0] == 0) + ZF := 1 +ELSE + ZF := 0 +FI +IF ((NOT a[127:0]) AND mask[127:0] == 0) + CF := 1 +ELSE + CF := 0 +FI +IF (ZF == 0 && CF == 0) + RETURN 1 +ELSE + RETURN 0 +FI + + +
smmintrin.h
+
+ + Integer + SSE4.1 + Logical + + Compute the bitwise NOT of "a" and then AND with a 128-bit vector containing all 1's, and return 1 if the result is zero, otherwise return 0. + +FOR j := 0 to 127 + tmp[i] := 1 +ENDFOR + +IF ((NOT a[127:0]) AND tmp[127:0] == 0) + CF := 1 +ELSE + CF := 0 +FI +RETURN CF + + + +
smmintrin.h
+
+ + Floating Point + SSE4.1 + Special Math Functions + + + Round the packed double-precision (64-bit) floating-point elements in "a" using the "rounding" parameter, and store the results as packed double-precision floating-point elements in "dst". + [round_note] + + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := ROUND(a[i+63:i]) +ENDFOR + + +
smmintrin.h
+
+ + Floating Point + SSE4.1 + Special Math Functions + + Round the packed double-precision (64-bit) floating-point elements in "a" down to an integer value, and store the results as packed double-precision floating-point elements in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := FLOOR(a[i+63:i]) +ENDFOR + + +
smmintrin.h
+
+ + Floating Point + SSE4.1 + Special Math Functions + + Round the packed double-precision (64-bit) floating-point elements in "a" up to an integer value, and store the results as packed double-precision floating-point elements in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := CEIL(a[i+63:i]) +ENDFOR + + +
smmintrin.h
+
+ + Floating Point + SSE4.1 + Special Math Functions + + + Round the packed single-precision (32-bit) floating-point elements in "a" using the "rounding" parameter, and store the results as packed single-precision floating-point elements in "dst". + [round_note] + + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ROUND(a[i+31:i]) +ENDFOR + + +
smmintrin.h
+
+ + Floating Point + SSE4.1 + Special Math Functions + + Round the packed single-precision (32-bit) floating-point elements in "a" down to an integer value, and store the results as packed single-precision floating-point elements in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := FLOOR(a[i+31:i]) +ENDFOR + + +
smmintrin.h
+
+ + Floating Point + SSE4.1 + Special Math Functions + + Round the packed single-precision (32-bit) floating-point elements in "a" up to an integer value, and store the results as packed single-precision floating-point elements in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := CEIL(a[i+31:i]) +ENDFOR + + +
smmintrin.h
+
+ + Floating Point + SSE4.1 + Special Math Functions + + + + Round the lower double-precision (64-bit) floating-point element in "b" using the "rounding" parameter, store the result as a double-precision floating-point element in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + [round_note] + + +dst[63:0] := ROUND(b[63:0]) +dst[127:64] := a[127:64] + + +
smmintrin.h
+
+ + Floating Point + SSE4.1 + Special Math Functions + + + Round the lower double-precision (64-bit) floating-point element in "b" down to an integer value, store the result as a double-precision floating-point element in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := FLOOR(b[63:0]) +dst[127:64] := a[127:64] + + +
smmintrin.h
+
+ + Floating Point + SSE4.1 + Special Math Functions + + + Round the lower double-precision (64-bit) floating-point element in "b" up to an integer value, store the result as a double-precision floating-point element in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := CEIL(b[63:0]) +dst[127:64] := a[127:64] + + +
smmintrin.h
+
+ + Floating Point + SSE4.1 + Special Math Functions + + + + Round the lower single-precision (32-bit) floating-point element in "b" using the "rounding" parameter, store the result as a single-precision floating-point element in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + + +dst[31:0] := ROUND(b[31:0]) +dst[127:32] := a[127:32] + + +
smmintrin.h
+
+ + Floating Point + SSE4.1 + Special Math Functions + + + Round the lower single-precision (32-bit) floating-point element in "b" down to an integer value, store the result as a single-precision floating-point element in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := FLOOR(b[31:0]) +dst[127:32] := a[127:32] + + +
smmintrin.h
+
+ + Floating Point + SSE4.1 + Special Math Functions + + + Round the lower single-precision (32-bit) floating-point element in "b" up to an integer value, store the result as a single-precision floating-point element in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := CEIL(b[31:0]) +dst[127:32] := a[127:32] + + +
smmintrin.h
+
+ + Integer + SSE4.1 + Miscellaneous + + Horizontally compute the minimum amongst the packed unsigned 16-bit integers in "a", store the minimum and index in "dst", and zero the remaining bits in "dst". + +index[2:0] := 0 +min[15:0] := a[15:0] +FOR j := 0 to 7 + i := j*16 + IF a[i+15:i] < min[15:0] + index[2:0] := j + min[15:0] := a[i+15:i] + FI +ENDFOR +dst[15:0] := min[15:0] +dst[18:16] := index[2:0] +dst[127:19] := 0 + + +
smmintrin.h
+
+ + Integer + SSE4.1 + Arithmetic + Miscellaneous + + + + Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit integers in "a" compared to those in "b", and store the 16-bit results in "dst". + Eight SADs are performed using one quadruplet from "b" and eight quadruplets from "a". One quadruplet is selected from "b" starting at on the offset specified in "imm8". Eight quadruplets are formed from sequential 8-bit integers selected from "a" starting at the offset specified in "imm8". + +MPSADBW(a[127:0], b[127:0], imm8[2:0]) { + a_offset := imm8[2]*32 + b_offset := imm8[1:0]*32 + FOR j := 0 to 7 + i := j*8 + k := a_offset+i + l := b_offset + tmp[i*2+15:i*2] := ABS(a[k+7:k] - b[l+7:l]) + ABS(a[k+15:k+8] - b[l+15:l+8]) + ABS(a[k+23:k+16] - b[l+23:l+16]) + ABS(a[k+31:k+24] - b[l+31:l+24]) + ENDFOR + RETURN tmp[127:0] +} + +dst[127:0] := MPSADBW(a[127:0], b[127:0], imm8[2:0]) + + +
smmintrin.h
+
+ + Integer + SSE4.1 + Load + + Load 128-bits of integer data from memory into "dst" using a non-temporal memory hint. + "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +dst[127:0] := MEM[mem_addr+127:mem_addr] + + +
smmintrin.h
+
+ + + SSE4.2 + String Compare + + + + Compare packed strings with implicit lengths in "a" and "b" using the control in "imm8", and store the generated mask in "dst". + [strcmp_note] + + +size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters +UpperBound := (128 / size) - 1 + +// compare all characters +aInvalid := 0 +bInvalid := 0 +FOR i := 0 to UpperBound + m := i*size + FOR j := 0 to UpperBound + n := j*size + BoolRes[i][j] := (a[m+size-1:m] == b[n+size-1:n]) + + // invalidate characters after EOS + IF a[m+size-1:m] == 0 + aInvalid := 1 + FI + IF b[n+size-1:n] == 0 + bInvalid := 1 + FI + + // override comparisons for invalid characters + CASE (imm8[3:2]) OF + 0: // equal any + IF (!aInvalid && bInvalid) + BoolRes[i][j] := 0 + ELSE IF (aInvalid && !bInvalid) + BoolRes[i][j] := 0 + ELSE If (aInvalid && bInvalid) + BoolRes[i][j] := 0 + FI + 1: // ranges + IF (!aInvalid && bInvalid) + BoolRes[i][j] := 0 + ELSE IF (aInvalid && !bInvalid) + BoolRes[i][j] := 0 + ELSE If (aInvalid && bInvalid) + BoolRes[i][j] := 0 + FI + 2: // equal each + IF (!aInvalid && bInvalid) + BoolRes[i][j] := 0 + ELSE IF (aInvalid && !bInvalid) + BoolRes[i][j] := 0 + ELSE If (aInvalid && bInvalid) + BoolRes[i][j] := 1 + FI + 3: // equal ordered + IF (!aInvalid && bInvalid) + BoolRes[i][j] := 0 + ELSE IF (aInvalid && !bInvalid) + BoolRes[i][j] := 1 + ELSE If (aInvalid && bInvalid) + BoolRes[i][j] := 1 + FI + ESAC + ENDFOR +ENDFOR + +// aggregate results +CASE (imm8[3:2]) OF +0: // equal any + IntRes1 := 0 + FOR i := 0 to UpperBound + FOR j := 0 to UpperBound + IntRes1[i] := IntRes1[i] OR BoolRes[i][j] + ENDFOR + ENDFOR +1: // ranges + IntRes1 := 0 + FOR i := 0 to UpperBound + FOR j := 0 to UpperBound, j += 2 + IntRes1[i] := IntRes1[i] OR (BoolRes[i][j] AND BoolRes[i][j+1]) + ENDFOR + ENDFOR +2: // equal each + IntRes1 := 0 + FOR i := 0 to UpperBound + IntRes1[i] := BoolRes[i][i] + ENDFOR +3: // equal ordered + IntRes1 := (imm8[0] ? 0xFF : 0xFFFF) + FOR i := 0 to UpperBound + k := i + FOR j := 0 to UpperBound-i + IntRes1[i] := IntRes1[i] AND BoolRes[k][j] + k++ + ENDFOR + ENDFOR +ESAC + +// optionally negate results +bInvalid := 0 +FOR i := 0 to UpperBound + IF imm8[4] + IF imm8[5] // only negate valid + IF b[n+size-1:n] == 0 + bInvalid := 1 + FI + IF bInvalid // invalid, don't negate + IntRes2[i] := IntRes1[i] + ELSE // valid, negate + IntRes2[i] := -1 XOR IntRes1[i] + FI + ELSE // negate all + IntRes2[i] := -1 XOR IntRes1[i] + FI + ELSE // don't negate + IntRes2[i] := IntRes1[i] + FI +ENDFOR + +// output +IF imm8[6] // byte / word mask + FOR i := 0 to UpperBound + j := i*size + IF IntRes2[i] + dst[j+size-1:j] := (imm8[0] ? 0xFF : 0xFFFF) + ELSE + dst[j+size-1:j] := 0 + FI + ENDFOR +ELSE // bit mask + dst[UpperBound:0] := IntRes[UpperBound:0] + dst[127:UpperBound+1] := 0 +FI + + +
nmmintrin.h
+
+ + SSE4.2 + String Compare + + + + Compare packed strings with implicit lengths in "a" and "b" using the control in "imm8", and store the generated index in "dst". + [strcmp_note] + + +size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters +UpperBound := (128 / size) - 1 + +// compare all characters +aInvalid := 0 +bInvalid := 0 +FOR i := 0 to UpperBound + m := i*size + FOR j := 0 to UpperBound + n := j*size + BoolRes[i][j] := (a[m+size-1:m] == b[n+size-1:n]) + + // invalidate characters after EOS + IF a[m+size-1:m] == 0 + aInvalid := 1 + FI + IF b[n+size-1:n] == 0 + bInvalid := 1 + FI + + // override comparisons for invalid characters + CASE (imm8[3:2]) OF + 0: // equal any + IF (!aInvalid && bInvalid) + BoolRes[i][j] := 0 + ELSE IF (aInvalid && !bInvalid) + BoolRes[i][j] := 0 + ELSE If (aInvalid && bInvalid) + BoolRes[i][j] := 0 + FI + 1: // ranges + IF (!aInvalid && bInvalid) + BoolRes[i][j] := 0 + ELSE IF (aInvalid && !bInvalid) + BoolRes[i][j] := 0 + ELSE If (aInvalid && bInvalid) + BoolRes[i][j] := 0 + FI + 2: // equal each + IF (!aInvalid && bInvalid) + BoolRes[i][j] := 0 + ELSE IF (aInvalid && !bInvalid) + BoolRes[i][j] := 0 + ELSE If (aInvalid && bInvalid) + BoolRes[i][j] := 1 + FI + 3: // equal ordered + IF (!aInvalid && bInvalid) + BoolRes[i][j] := 0 + ELSE IF (aInvalid && !bInvalid) + BoolRes[i][j] := 1 + ELSE If (aInvalid && bInvalid) + BoolRes[i][j] := 1 + FI + ESAC + ENDFOR +ENDFOR + +// aggregate results +CASE (imm8[3:2]) OF +0: // equal any + IntRes1 := 0 + FOR i := 0 to UpperBound + FOR j := 0 to UpperBound + IntRes1[i] := IntRes1[i] OR BoolRes[i][j] + ENDFOR + ENDFOR +1: // ranges + IntRes1 := 0 + FOR i := 0 to UpperBound + FOR j := 0 to UpperBound, j += 2 + IntRes1[i] := IntRes1[i] OR (BoolRes[i][j] AND BoolRes[i][j+1]) + ENDFOR + ENDFOR +2: // equal each + IntRes1 := 0 + FOR i := 0 to UpperBound + IntRes1[i] := BoolRes[i][i] + ENDFOR +3: // equal ordered + IntRes1 := (imm8[0] ? 0xFF : 0xFFFF) + FOR i := 0 to UpperBound + k := i + FOR j := 0 to UpperBound-i + IntRes1[i] := IntRes1[i] AND BoolRes[k][j] + k++ + ENDFOR + ENDFOR +ESAC + +// optionally negate results +bInvalid := 0 +FOR i := 0 to UpperBound + IF imm8[4] + IF imm8[5] // only negate valid + IF b[n+size-1:n] == 0 + bInvalid := 1 + FI + IF bInvalid // invalid, don't negate + IntRes2[i] := IntRes1[i] + ELSE // valid, negate + IntRes2[i] := -1 XOR IntRes1[i] + FI + ELSE // negate all + IntRes2[i] := -1 XOR IntRes1[i] + FI + ELSE // don't negate + IntRes2[i] := IntRes1[i] + FI +ENDFOR + +// output +IF imm8[6] // most significant bit + tmp := UpperBound + dst := tmp + DO WHILE ((tmp >= 0) AND a[tmp] = 0) + tmp := tmp - 1 + dst := tmp + OD +ELSE // least significant bit + tmp := 0 + dst := tmp + DO WHILE ((tmp <= UpperBound) AND a[tmp] = 0) + tmp := tmp + 1 + dst := tmp + OD +FI + + +
nmmintrin.h
+
+ + SSE4.2 + String Compare + + + + Compare packed strings with implicit lengths in "a" and "b" using the control in "imm8", and returns 1 if any character in "b" was null, and 0 otherwise. + [strcmp_note] + + +size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters +UpperBound := (128 / size) - 1 + +bInvalid := 0 +FOR j := 0 to UpperBound + n := j*size + IF b[n+size-1:n] == 0 + bInvalid := 1 + FI +ENDFOR + +dst := bInvalid + + +
nmmintrin.h
+
+ + SSE4.2 + String Compare + + + + Compare packed strings with implicit lengths in "a" and "b" using the control in "imm8", and returns 1 if the resulting mask was non-zero, and 0 otherwise. + [strcmp_note] + + +size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters +UpperBound := (128 / size) - 1 + +// compare all characters +aInvalid := 0 +bInvalid := 0 +FOR i := 0 to UpperBound + m := i*size + FOR j := 0 to UpperBound + n := j*size + BoolRes[i][j] := (a[m+size-1:m] == b[n+size-1:n]) + + // invalidate characters after EOS + IF a[m+size-1:m] == 0 + aInvalid := 1 + FI + IF b[n+size-1:n] == 0 + bInvalid := 1 + FI + + // override comparisons for invalid characters + CASE (imm8[3:2]) OF + 0: // equal any + IF (!aInvalid && bInvalid) + BoolRes[i][j] := 0 + ELSE IF (aInvalid && !bInvalid) + BoolRes[i][j] := 0 + ELSE If (aInvalid && bInvalid) + BoolRes[i][j] := 0 + FI + 1: // ranges + IF (!aInvalid && bInvalid) + BoolRes[i][j] := 0 + ELSE IF (aInvalid && !bInvalid) + BoolRes[i][j] := 0 + ELSE If (aInvalid && bInvalid) + BoolRes[i][j] := 0 + FI + 2: // equal each + IF (!aInvalid && bInvalid) + BoolRes[i][j] := 0 + ELSE IF (aInvalid && !bInvalid) + BoolRes[i][j] := 0 + ELSE If (aInvalid && bInvalid) + BoolRes[i][j] := 1 + FI + 3: // equal ordered + IF (!aInvalid && bInvalid) + BoolRes[i][j] := 0 + ELSE IF (aInvalid && !bInvalid) + BoolRes[i][j] := 1 + ELSE If (aInvalid && bInvalid) + BoolRes[i][j] := 1 + FI + ESAC + ENDFOR +ENDFOR + +// aggregate results +CASE (imm8[3:2]) OF +0: // equal any + IntRes1 := 0 + FOR i := 0 to UpperBound + FOR j := 0 to UpperBound + IntRes1[i] := IntRes1[i] OR BoolRes[i][j] + ENDFOR + ENDFOR +1: // ranges + IntRes1 := 0 + FOR i := 0 to UpperBound + FOR j := 0 to UpperBound, j += 2 + IntRes1[i] := IntRes1[i] OR (BoolRes[i][j] AND BoolRes[i][j+1]) + ENDFOR + ENDFOR +2: // equal each + IntRes1 := 0 + FOR i := 0 to UpperBound + IntRes1[i] := BoolRes[i][i] + ENDFOR +3: // equal ordered + IntRes1 := (imm8[0] ? 0xFF : 0xFFFF) + FOR i := 0 to UpperBound + k := i + FOR j := 0 to UpperBound-i + IntRes1[i] := IntRes1[i] AND BoolRes[k][j] + k++ + ENDFOR + ENDFOR +ESAC + +// optionally negate results +bInvalid := 0 +FOR i := 0 to UpperBound + IF imm8[4] + IF imm8[5] // only negate valid + IF b[n+size-1:n] == 0 + bInvalid := 1 + FI + IF bInvalid // invalid, don't negate + IntRes2[i] := IntRes1[i] + ELSE // valid, negate + IntRes2[i] := -1 XOR IntRes1[i] + FI + ELSE // negate all + IntRes2[i] := -1 XOR IntRes1[i] + FI + ELSE // don't negate + IntRes2[i] := IntRes1[i] + FI +ENDFOR + +// output +dst := (IntRes2 != 0) + + +
nmmintrin.h
+
+ + SSE4.2 + String Compare + + + + Compare packed strings with implicit lengths in "a" and "b" using the control in "imm8", and returns 1 if any character in "a" was null, and 0 otherwise. + [strcmp_note] + + +size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters +UpperBound := (128 / size) - 1 + +aInvalid := 0 +FOR i := 0 to UpperBound + m := i*size + IF b[m+size-1:m] == 0 + aInvalid := 1 + FI +ENDFOR + +dst := aInvalid + + +
nmmintrin.h
+
+ + SSE4.2 + String Compare + + + + Compare packed strings with implicit lengths in "a" and "b" using the control in "imm8", and returns bit 0 of the resulting bit mask. + [strcmp_note] + + +size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters +UpperBound := (128 / size) - 1 + +// compare all characters +aInvalid := 0 +bInvalid := 0 +FOR i := 0 to UpperBound + m := i*size + FOR j := 0 to UpperBound + n := j*size + BoolRes[i][j] := (a[m+size-1:m] == b[n+size-1:n]) + + // invalidate characters after EOS + IF a[m+size-1:m] == 0 + aInvalid := 1 + FI + IF b[n+size-1:n] == 0 + bInvalid := 1 + FI + + // override comparisons for invalid characters + CASE (imm8[3:2]) OF + 0: // equal any + IF (!aInvalid && bInvalid) + BoolRes[i][j] := 0 + ELSE IF (aInvalid && !bInvalid) + BoolRes[i][j] := 0 + ELSE If (aInvalid && bInvalid) + BoolRes[i][j] := 0 + FI + 1: // ranges + IF (!aInvalid && bInvalid) + BoolRes[i][j] := 0 + ELSE IF (aInvalid && !bInvalid) + BoolRes[i][j] := 0 + ELSE If (aInvalid && bInvalid) + BoolRes[i][j] := 0 + FI + 2: // equal each + IF (!aInvalid && bInvalid) + BoolRes[i][j] := 0 + ELSE IF (aInvalid && !bInvalid) + BoolRes[i][j] := 0 + ELSE If (aInvalid && bInvalid) + BoolRes[i][j] := 1 + FI + 3: // equal ordered + IF (!aInvalid && bInvalid) + BoolRes[i][j] := 0 + ELSE IF (aInvalid && !bInvalid) + BoolRes[i][j] := 1 + ELSE If (aInvalid && bInvalid) + BoolRes[i][j] := 1 + FI + ESAC + ENDFOR +ENDFOR + +// aggregate results +CASE (imm8[3:2]) OF +0: // equal any + IntRes1 := 0 + FOR i := 0 to UpperBound + FOR j := 0 to UpperBound + IntRes1[i] := IntRes1[i] OR BoolRes[i][j] + ENDFOR + ENDFOR +1: // ranges + IntRes1 := 0 + FOR i := 0 to UpperBound + FOR j := 0 to UpperBound, j += 2 + IntRes1[i] := IntRes1[i] OR (BoolRes[i][j] AND BoolRes[i][j+1]) + ENDFOR + ENDFOR +2: // equal each + IntRes1 := 0 + FOR i := 0 to UpperBound + IntRes1[i] := BoolRes[i][i] + ENDFOR +3: // equal ordered + IntRes1 := (imm8[0] ? 0xFF : 0xFFFF) + FOR i := 0 to UpperBound + k := i + FOR j := 0 to UpperBound-i + IntRes1[i] := IntRes1[i] AND BoolRes[k][j] + k++ + ENDFOR + ENDFOR +ESAC + +// optionally negate results +bInvalid := 0 +FOR i := 0 to UpperBound + IF imm8[4] + IF imm8[5] // only negate valid + IF b[n+size-1:n] == 0 + bInvalid := 1 + FI + IF bInvalid // invalid, don't negate + IntRes2[i] := IntRes1[i] + ELSE // valid, negate + IntRes2[i] := -1 XOR IntRes1[i] + FI + ELSE // negate all + IntRes2[i] := -1 XOR IntRes1[i] + FI + ELSE // don't negate + IntRes2[i] := IntRes1[i] + FI +ENDFOR + +// output +dst := IntRes2[0] + + +
nmmintrin.h
+
+ + SSE4.2 + String Compare + + + + Compare packed strings with implicit lengths in "a" and "b" using the control in "imm8", and returns 1 if "b" did not contain a null character and the resulting mask was zero, and 0 otherwise. + [strcmp_note] + + +size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters +UpperBound := (128 / size) - 1 + +// compare all characters +aInvalid := 0 +bInvalid := 0 +FOR i := 0 to UpperBound + m := i*size + FOR j := 0 to UpperBound + n := j*size + BoolRes[i][j] := (a[m+size-1:m] == b[n+size-1:n]) + + // invalidate characters after EOS + IF a[m+size-1:m] == 0 + aInvalid := 1 + FI + IF b[n+size-1:n] == 0 + bInvalid := 1 + FI + + // override comparisons for invalid characters + CASE (imm8[3:2]) OF + 0: // equal any + IF (!aInvalid && bInvalid) + BoolRes[i][j] := 0 + ELSE IF (aInvalid && !bInvalid) + BoolRes[i][j] := 0 + ELSE If (aInvalid && bInvalid) + BoolRes[i][j] := 0 + FI + 1: // ranges + IF (!aInvalid && bInvalid) + BoolRes[i][j] := 0 + ELSE IF (aInvalid && !bInvalid) + BoolRes[i][j] := 0 + ELSE If (aInvalid && bInvalid) + BoolRes[i][j] := 0 + FI + 2: // equal each + IF (!aInvalid && bInvalid) + BoolRes[i][j] := 0 + ELSE IF (aInvalid && !bInvalid) + BoolRes[i][j] := 0 + ELSE If (aInvalid && bInvalid) + BoolRes[i][j] := 1 + FI + 3: // equal ordered + IF (!aInvalid && bInvalid) + BoolRes[i][j] := 0 + ELSE IF (aInvalid && !bInvalid) + BoolRes[i][j] := 1 + ELSE If (aInvalid && bInvalid) + BoolRes[i][j] := 1 + FI + ESAC + ENDFOR +ENDFOR + +// aggregate results +CASE (imm8[3:2]) OF +0: // equal any + IntRes1 := 0 + FOR i := 0 to UpperBound + FOR j := 0 to UpperBound + IntRes1[i] := IntRes1[i] OR BoolRes[i][j] + ENDFOR + ENDFOR +1: // ranges + IntRes1 := 0 + FOR i := 0 to UpperBound + FOR j := 0 to UpperBound, j += 2 + IntRes1[i] := IntRes1[i] OR (BoolRes[i][j] AND BoolRes[i][j+1]) + ENDFOR + ENDFOR +2: // equal each + IntRes1 := 0 + FOR i := 0 to UpperBound + IntRes1[i] := BoolRes[i][i] + ENDFOR +3: // equal ordered + IntRes1 := (imm8[0] ? 0xFF : 0xFFFF) + FOR i := 0 to UpperBound + k := i + FOR j := 0 to UpperBound-i + IntRes1[i] := IntRes1[i] AND BoolRes[k][j] + k++ + ENDFOR + ENDFOR +ESAC + +// optionally negate results +bInvalid := 0 +FOR i := 0 to UpperBound + IF imm8[4] + IF imm8[5] // only negate valid + IF b[n+size-1:n] == 0 + bInvalid := 1 + FI + IF bInvalid // invalid, don't negate + IntRes2[i] := IntRes1[i] + ELSE // valid, negate + IntRes2[i] := -1 XOR IntRes1[i] + FI + ELSE // negate all + IntRes2[i] := -1 XOR IntRes1[i] + FI + ELSE // don't negate + IntRes2[i] := IntRes1[i] + FI +ENDFOR + +// output +dst := (IntRes2 == 0) AND bInvalid + + +
nmmintrin.h
+
+ + SSE4.2 + String Compare + + + + + + Compare packed strings in "a" and "b" with lengths "la" and "lb" using the control in "imm8", and store the generated mask in "dst". + [strcmp_note] + + +size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters +UpperBound := (128 / size) - 1 + +// compare all characters +aInvalid := 0 +bInvalid := 0 +FOR i := 0 to UpperBound + m := i*size + FOR j := 0 to UpperBound + n := j*size + BoolRes[i][j] := (a[m+size-1:m] == b[n+size-1:n]) + + // invalidate characters after EOS + IF i == la + aInvalid := 1 + FI + IF j == lb + bInvalid := 1 + FI + + // override comparisons for invalid characters + CASE (imm8[3:2]) OF + 0: // equal any + IF (!aInvalid && bInvalid) + BoolRes[i][j] := 0 + ELSE IF (aInvalid && !bInvalid) + BoolRes[i][j] := 0 + ELSE If (aInvalid && bInvalid) + BoolRes[i][j] := 0 + FI + 1: // ranges + IF (!aInvalid && bInvalid) + BoolRes[i][j] := 0 + ELSE IF (aInvalid && !bInvalid) + BoolRes[i][j] := 0 + ELSE If (aInvalid && bInvalid) + BoolRes[i][j] := 0 + FI + 2: // equal each + IF (!aInvalid && bInvalid) + BoolRes[i][j] := 0 + ELSE IF (aInvalid && !bInvalid) + BoolRes[i][j] := 0 + ELSE If (aInvalid && bInvalid) + BoolRes[i][j] := 1 + FI + 3: // equal ordered + IF (!aInvalid && bInvalid) + BoolRes[i][j] := 0 + ELSE IF (aInvalid && !bInvalid) + BoolRes[i][j] := 1 + ELSE If (aInvalid && bInvalid) + BoolRes[i][j] := 1 + FI + ESAC + ENDFOR +ENDFOR + +// aggregate results +CASE (imm8[3:2]) OF +0: // equal any + IntRes1 := 0 + FOR i := 0 to UpperBound + FOR j := 0 to UpperBound + IntRes1[i] := IntRes1[i] OR BoolRes[i][j] + ENDFOR + ENDFOR +1: // ranges + IntRes1 := 0 + FOR i := 0 to UpperBound + FOR j := 0 to UpperBound, j += 2 + IntRes1[i] := IntRes1[i] OR (BoolRes[i][j] AND BoolRes[i][j+1]) + ENDFOR + ENDFOR +2: // equal each + IntRes1 := 0 + FOR i := 0 to UpperBound + IntRes1[i] := BoolRes[i][i] + ENDFOR +3: // equal ordered + IntRes1 := (imm8[0] ? 0xFF : 0xFFFF) + FOR i := 0 to UpperBound + k := i + FOR j := 0 to UpperBound-i + IntRes1[i] := IntRes1[i] AND BoolRes[k][j] + k++ + ENDFOR + ENDFOR +ESAC + +// optionally negate results +FOR i := 0 to UpperBound + IF imm8[4] + IF imm8[5] // only negate valid + IF i >= lb // invalid, don't negate + IntRes2[i] := IntRes1[i] + ELSE // valid, negate + IntRes2[i] := -1 XOR IntRes1[i] + FI + ELSE // negate all + IntRes2[i] := -1 XOR IntRes1[i] + FI + ELSE // don't negate + IntRes2[i] := IntRes1[i] + FI +ENDFOR + +// output +IF imm8[6] // byte / word mask + FOR i := 0 to UpperBound + j := i*size + IF IntRes2[i] + dst[j+size-1:j] := (imm8[0] ? 0xFF : 0xFFFF) + ELSE + dst[j+size-1:j] := 0 + FI + ENDFOR +ELSE // bit mask + dst[UpperBound:0] := IntRes[UpperBound:0] + dst[127:UpperBound+1] := 0 +FI + + +
nmmintrin.h
+
+ + SSE4.2 + String Compare + + + + + + Compare packed strings in "a" and "b" with lengths "la" and "lb" using the control in "imm8", and store the generated index in "dst". + [strcmp_note] + + +size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters +UpperBound := (128 / size) - 1 + +// compare all characters +aInvalid := 0 +bInvalid := 0 +FOR i := 0 to UpperBound + m := i*size + FOR j := 0 to UpperBound + n := j*size + BoolRes[i][j] := (a[m+size-1:m] == b[n+size-1:n]) + + // invalidate characters after EOS + IF i == la + aInvalid := 1 + FI + IF j == lb + bInvalid := 1 + FI + + // override comparisons for invalid characters + CASE (imm8[3:2]) OF + 0: // equal any + IF (!aInvalid && bInvalid) + BoolRes[i][j] := 0 + ELSE IF (aInvalid && !bInvalid) + BoolRes[i][j] := 0 + ELSE If (aInvalid && bInvalid) + BoolRes[i][j] := 0 + FI + 1: // ranges + IF (!aInvalid && bInvalid) + BoolRes[i][j] := 0 + ELSE IF (aInvalid && !bInvalid) + BoolRes[i][j] := 0 + ELSE If (aInvalid && bInvalid) + BoolRes[i][j] := 0 + FI + 2: // equal each + IF (!aInvalid && bInvalid) + BoolRes[i][j] := 0 + ELSE IF (aInvalid && !bInvalid) + BoolRes[i][j] := 0 + ELSE If (aInvalid && bInvalid) + BoolRes[i][j] := 1 + FI + 3: // equal ordered + IF (!aInvalid && bInvalid) + BoolRes[i][j] := 0 + ELSE IF (aInvalid && !bInvalid) + BoolRes[i][j] := 1 + ELSE If (aInvalid && bInvalid) + BoolRes[i][j] := 1 + FI + ESAC + ENDFOR +ENDFOR + +// aggregate results +CASE (imm8[3:2]) OF +0: // equal any + IntRes1 := 0 + FOR i := 0 to UpperBound + FOR j := 0 to UpperBound + IntRes1[i] := IntRes1[i] OR BoolRes[i][j] + ENDFOR + ENDFOR +1: // ranges + IntRes1 := 0 + FOR i := 0 to UpperBound + FOR j := 0 to UpperBound, j += 2 + IntRes1[i] := IntRes1[i] OR (BoolRes[i][j] AND BoolRes[i][j+1]) + ENDFOR + ENDFOR +2: // equal each + IntRes1 := 0 + FOR i := 0 to UpperBound + IntRes1[i] := BoolRes[i][i] + ENDFOR +3: // equal ordered + IntRes1 := (imm8[0] ? 0xFF : 0xFFFF) + FOR i := 0 to UpperBound + k := i + FOR j := 0 to UpperBound-i + IntRes1[i] := IntRes1[i] AND BoolRes[k][j] + k++ + ENDFOR + ENDFOR +ESAC + +// optionally negate results +FOR i := 0 to UpperBound + IF imm8[4] + IF imm8[5] // only negate valid + IF i >= lb // invalid, don't negate + IntRes2[i] := IntRes1[i] + ELSE // valid, negate + IntRes2[i] := -1 XOR IntRes1[i] + FI + ELSE // negate all + IntRes2[i] := -1 XOR IntRes1[i] + FI + ELSE // don't negate + IntRes2[i] := IntRes1[i] + FI +ENDFOR + +// output +IF imm8[6] // most significant bit + tmp := UpperBound + dst := tmp + DO WHILE ((tmp >= 0) AND a[tmp] = 0) + tmp := tmp - 1 + dst := tmp + OD +ELSE // least significant bit + tmp := 0 + dst := tmp + DO WHILE ((tmp <= UpperBound) AND a[tmp] = 0) + tmp := tmp + 1 + dst := tmp + OD +FI + + +
nmmintrin.h
+
+ + SSE4.2 + String Compare + + + + + + Compare packed strings in "a" and "b" with lengths "la" and "lb" using the control in "imm8", and returns 1 if any character in "b" was null, and 0 otherwise. + [strcmp_note] + + +size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters +UpperBound := (128 / size) - 1 + +dst := (lb <= UpperBound) + + +
nmmintrin.h
+
+ + SSE4.2 + String Compare + + + + + + Compare packed strings in "a" and "b" with lengths "la" and "lb" using the control in "imm8", and returns 1 if the resulting mask was non-zero, and 0 otherwise. + [strcmp_note] + + +size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters +UpperBound := (128 / size) - 1 + +// compare all characters +aInvalid := 0 +bInvalid := 0 +FOR i := 0 to UpperBound + m := i*size + FOR j := 0 to UpperBound + n := j*size + BoolRes[i][j] := (a[m+size-1:m] == b[n+size-1:n]) + + // invalidate characters after EOS + IF i == la + aInvalid := 1 + FI + IF j == lb + bInvalid := 1 + FI + + // override comparisons for invalid characters + CASE (imm8[3:2]) OF + 0: // equal any + IF (!aInvalid && bInvalid) + BoolRes[i][j] := 0 + ELSE IF (aInvalid && !bInvalid) + BoolRes[i][j] := 0 + ELSE If (aInvalid && bInvalid) + BoolRes[i][j] := 0 + FI + 1: // ranges + IF (!aInvalid && bInvalid) + BoolRes[i][j] := 0 + ELSE IF (aInvalid && !bInvalid) + BoolRes[i][j] := 0 + ELSE If (aInvalid && bInvalid) + BoolRes[i][j] := 0 + FI + 2: // equal each + IF (!aInvalid && bInvalid) + BoolRes[i][j] := 0 + ELSE IF (aInvalid && !bInvalid) + BoolRes[i][j] := 0 + ELSE If (aInvalid && bInvalid) + BoolRes[i][j] := 1 + FI + 3: // equal ordered + IF (!aInvalid && bInvalid) + BoolRes[i][j] := 0 + ELSE IF (aInvalid && !bInvalid) + BoolRes[i][j] := 1 + ELSE If (aInvalid && bInvalid) + BoolRes[i][j] := 1 + FI + ESAC + ENDFOR +ENDFOR + +// aggregate results +CASE (imm8[3:2]) OF + 0: // equal any + IntRes1 := 0 + FOR i := 0 to UpperBound + FOR j := 0 to UpperBound + IntRes1[i] := IntRes1[i] OR BoolRes[i][j] + ENDFOR + ENDFOR + 1: // ranges + IntRes1 := 0 + FOR i := 0 to UpperBound + FOR j := 0 to UpperBound, j += 2 + IntRes1[i] := IntRes1[i] OR (BoolRes[i][j] AND BoolRes[i][j+1]) + ENDFOR + ENDFOR + 2: // equal each + IntRes1 := 0 + FOR i := 0 to UpperBound + IntRes1[i] := BoolRes[i][i] + ENDFOR + 3: // equal ordered + IntRes1 := (imm8[0] ? 0xFF : 0xFFFF) + FOR i := 0 to UpperBound + k := i + FOR j := 0 to UpperBound-i + IntRes1[i] := IntRes1[i] AND BoolRes[k][j] + k++ + ENDFOR + ENDFOR +ESAC + +// optionally negate results +FOR i := 0 to UpperBound + IF imm8[4] + IF imm8[5] // only negate valid + IF i >= lb // invalid, don't negate + IntRes2[i] := IntRes1[i] + ELSE // valid, negate + IntRes2[i] := -1 XOR IntRes1[i] + FI + ELSE // negate all + IntRes2[i] := -1 XOR IntRes1[i] + FI + ELSE // don't negate + IntRes2[i] := IntRes1[i] + FI +ENDFOR + +// output +dst := (IntRes2 != 0) + + +
nmmintrin.h
+
+ + SSE4.2 + String Compare + + + + + + Compare packed strings in "a" and "b" with lengths "la" and "lb" using the control in "imm8", and returns 1 if any character in "a" was null, and 0 otherwise. + [strcmp_note] + + +size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters +UpperBound := (128 / size) - 1 + +dst := (la <= UpperBound) + + +
nmmintrin.h
+
+ + SSE4.2 + String Compare + + + + + + Compare packed strings in "a" and "b" with lengths "la" and "lb" using the control in "imm8", and returns bit 0 of the resulting bit mask. + [strcmp_note] + + +size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters +UpperBound := (128 / size) - 1 + +// compare all characters +aInvalid := 0 +bInvalid := 0 +FOR i := 0 to UpperBound + m := i*size + FOR j := 0 to UpperBound + n := j*size + BoolRes[i][j] := (a[m+size-1:m] == b[n+size-1:n]) + + // invalidate characters after EOS + IF i == la + aInvalid := 1 + FI + IF j == lb + bInvalid := 1 + FI + + // override comparisons for invalid characters + CASE (imm8[3:2]) OF + 0: // equal any + IF (!aInvalid && bInvalid) + BoolRes[i][j] := 0 + ELSE IF (aInvalid && !bInvalid) + BoolRes[i][j] := 0 + ELSE If (aInvalid && bInvalid) + BoolRes[i][j] := 0 + FI + 1: // ranges + IF (!aInvalid && bInvalid) + BoolRes[i][j] := 0 + ELSE IF (aInvalid && !bInvalid) + BoolRes[i][j] := 0 + ELSE If (aInvalid && bInvalid) + BoolRes[i][j] := 0 + FI + 2: // equal each + IF (!aInvalid && bInvalid) + BoolRes[i][j] := 0 + ELSE IF (aInvalid && !bInvalid) + BoolRes[i][j] := 0 + ELSE If (aInvalid && bInvalid) + BoolRes[i][j] := 1 + FI + 3: // equal ordered + IF (!aInvalid && bInvalid) + BoolRes[i][j] := 0 + ELSE IF (aInvalid && !bInvalid) + BoolRes[i][j] := 1 + ELSE If (aInvalid && bInvalid) + BoolRes[i][j] := 1 + FI + ESAC + ENDFOR +ENDFOR + +// aggregate results +CASE (imm8[3:2]) OF + 0: // equal any + IntRes1 := 0 + FOR i := 0 to UpperBound + FOR j := 0 to UpperBound + IntRes1[i] := IntRes1[i] OR BoolRes[i][j] + ENDFOR + ENDFOR + 1: // ranges + IntRes1 := 0 + FOR i := 0 to UpperBound + FOR j := 0 to UpperBound, j += 2 + IntRes1[i] := IntRes1[i] OR (BoolRes[i][j] AND BoolRes[i][j+1]) + ENDFOR + ENDFOR + 2: // equal each + IntRes1 := 0 + FOR i := 0 to UpperBound + IntRes1[i] := BoolRes[i][i] + ENDFOR + 3: // equal ordered + IntRes1 := (imm8[0] ? 0xFF : 0xFFFF) + FOR i := 0 to UpperBound + k := i + FOR j := 0 to UpperBound-i + IntRes1[i] := IntRes1[i] AND BoolRes[k][j] + k++ + ENDFOR + ENDFOR +ESAC + +// optionally negate results +FOR i := 0 to UpperBound + IF imm8[4] + IF imm8[5] // only negate valid + IF i >= lb // invalid, don't negate + IntRes2[i] := IntRes1[i] + ELSE // valid, negate + IntRes2[i] := -1 XOR IntRes1[i] + FI + ELSE // negate all + IntRes2[i] := -1 XOR IntRes1[i] + FI + ELSE // don't negate + IntRes2[i] := IntRes1[i] + FI +ENDFOR + +// output +dst := IntRes2[0 + + +
nmmintrin.h
+
+ + SSE4.2 + String Compare + + + + + + Compare packed strings in "a" and "b" with lengths "la" and "lb" using the control in "imm8", and returns 1 if "b" did not contain a null character and the resulting mask was zero, and 0 otherwise. + [strcmp_note] + + +size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters +UpperBound := (128 / size) - 1 + +// compare all characters +aInvalid := 0 +bInvalid := 0 +FOR i := 0 to UpperBound + m := i*size + FOR j := 0 to UpperBound + n := j*size + BoolRes[i][j] := (a[m+size-1:m] == b[n+size-1:n]) + + // invalidate characters after EOS + IF i == la + aInvalid := 1 + FI + IF j == lb + bInvalid := 1 + FI + + // override comparisons for invalid characters + CASE (imm8[3:2]) OF + 0: // equal any + IF (!aInvalid && bInvalid) + BoolRes[i][j] := 0 + ELSE IF (aInvalid && !bInvalid) + BoolRes[i][j] := 0 + ELSE If (aInvalid && bInvalid) + BoolRes[i][j] := 0 + FI + 1: // ranges + IF (!aInvalid && bInvalid) + BoolRes[i][j] := 0 + ELSE IF (aInvalid && !bInvalid) + BoolRes[i][j] := 0 + ELSE If (aInvalid && bInvalid) + BoolRes[i][j] := 0 + FI + 2: // equal each + IF (!aInvalid && bInvalid) + BoolRes[i][j] := 0 + ELSE IF (aInvalid && !bInvalid) + BoolRes[i][j] := 0 + ELSE If (aInvalid && bInvalid) + BoolRes[i][j] := 1 + FI + 3: // equal ordered + IF (!aInvalid && bInvalid) + BoolRes[i][j] := 0 + ELSE IF (aInvalid && !bInvalid) + BoolRes[i][j] := 1 + ELSE If (aInvalid && bInvalid) + BoolRes[i][j] := 1 + FI + ESAC + ENDFOR +ENDFOR + +// aggregate results +CASE (imm8[3:2]) OF + 0: // equal any + IntRes1 := 0 + FOR i := 0 to UpperBound + FOR j := 0 to UpperBound + IntRes1[i] := IntRes1[i] OR BoolRes[i][j] + ENDFOR + ENDFOR + 1: // ranges + IntRes1 := 0 + FOR i := 0 to UpperBound + FOR j := 0 to UpperBound, j += 2 + IntRes1[i] := IntRes1[i] OR (BoolRes[i][j] AND BoolRes[i][j+1]) + ENDFOR + ENDFOR + 2: // equal each + IntRes1 := 0 + FOR i := 0 to UpperBound + IntRes1[i] := BoolRes[i][i] + ENDFOR + 3: // equal ordered + IntRes1 := (imm8[0] ? 0xFF : 0xFFFF) + FOR i := 0 to UpperBound + k := i + FOR j := 0 to UpperBound-i + IntRes1[i] := IntRes1[i] AND BoolRes[k][j] + k++ + ENDFOR + ENDFOR +ESAC + +// optionally negate results +FOR i := 0 to UpperBound + IF imm8[4] + IF imm8[5] // only negate valid + IF i >= lb // invalid, don't negate + IntRes2[i] := IntRes1[i] + ELSE // valid, negate + IntRes2[i] := -1 XOR IntRes1[i] + FI + ELSE // negate all + IntRes2[i] := -1 XOR IntRes1[i] + FI + ELSE // don't negate + IntRes2[i] := IntRes1[i] + FI +ENDFOR + +// output +dst := (IntRes2 == 0) AND (lb > UpperBound) + + +
nmmintrin.h
+
+ + Integer + SSE4.2 + Compare + + + Compare packed 64-bit integers in "a" and "b" for greater-than, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := ( a[i+63:i] > b[i+63:i] ) ? 0xFFFFFFFFFFFFFFFF : 0 +ENDFOR + + +
nmmintrin.h
+
+ + Integer + SSE4.2 + Cryptography + + + Starting with the initial value in "crc", accumulates a CRC32 value for unsigned 8-bit integer "v", and stores the result in "dst". + +tmp1[7:0] := v[0:7] // bit reflection +tmp2[31:0] := crc[0:31] // bit reflection +tmp3[39:0] := tmp1[7:0] << 32 +tmp4[39:0] := tmp2[31:0] << 8 +tmp5[39:0] := tmp3[39:0] XOR tmp4[39:0] +tmp6[31:0] := tmp5[39:0] MOD2 0x11EDC6F41 +dst[31:0] := tmp6[0:31] // bit reflection + + +
nmmintrin.h
+
+ + Integer + SSE4.2 + Cryptography + + + Starting with the initial value in "crc", accumulates a CRC32 value for unsigned 16-bit integer "v", and stores the result in "dst". + +tmp1[15:0] := v[0:15] // bit reflection +tmp2[31:0] := crc[0:31] // bit reflection +tmp3[47:0] := tmp1[15:0] << 32 +tmp4[47:0] := tmp2[31:0] << 16 +tmp5[47:0] := tmp3[47:0] XOR tmp4[47:0] +tmp6[31:0] := tmp5[47:0] MOD2 0x11EDC6F41 +dst[31:0] := tmp6[0:31] // bit reflection + + +
nmmintrin.h
+
+ + Integer + SSE4.2 + Cryptography + + + Starting with the initial value in "crc", accumulates a CRC32 value for unsigned 32-bit integer "v", and stores the result in "dst". + +tmp1[31:0] := v[0:31] // bit reflection +tmp2[31:0] := crc[0:31] // bit reflection +tmp3[63:0] := tmp1[31:0] << 32 +tmp4[63:0] := tmp2[31:0] << 32 +tmp5[63:0] := tmp3[63:0] XOR tmp4[63:0] +tmp6[31:0] := tmp5[63:0] MOD2 0x11EDC6F41 +dst[31:0] := tmp6[0:31] // bit reflection + + +
nmmintrin.h
+
+ + Integer + SSE4.2 + Cryptography + + + Starting with the initial value in "crc", accumulates a CRC32 value for unsigned 64-bit integer "v", and stores the result in "dst". + +tmp1[63:0] := v[0:63] // bit reflection +tmp2[31:0] := crc[0:31] // bit reflection +tmp3[95:0] := tmp1[31:0] << 32 +tmp4[95:0] := tmp2[63:0] << 64 +tmp5[95:0] := tmp3[95:0] XOR tmp4[95:0] +tmp6[31:0] := tmp5[95:0] MOD2 0x11EDC6F41 +dst[31:0] := tmp6[0:31] // bit reflection + + +
nmmintrin.h
+
+ + Integer + POPCNT + Bit Manipulation + + + Count the number of bits set to 1 in unsigned 32-bit integer "a", and return that count in "dst". + + +dst := 0 +FOR i := 0 to 31 + IF a[i] + dst := dst + 1 + FI +ENDFOR + + +
nmmintrin.h
+
+ + Integer + POPCNT + Bit Manipulation + + + Count the number of bits set to 1 in unsigned 64-bit integer "a", and return that count in "dst". + + +dst := 0 +FOR i := 0 to 63 + IF a[i] + dst := dst + 1 + FI +ENDFOR + + +
nmmintrin.h
+
+ + + Integer + AES + Cryptography + + + Perform one round of an AES encryption flow on data (state) in "a" using the round key in "RoundKey", and store the result in "dst"." + state := a +a[127:0] := ShiftRows(a[127:0]) +a[127:0] := SubBytes(a[127:0]) +a[127:0] := MixColumns(a[127:0]) +dst[127:0] := a[127:0] XOR RoundKey[127:0] + + +
wmmintrin.h
+
+ + Integer + AES + Cryptography + + + Perform the last round of an AES encryption flow on data (state) in "a" using the round key in "RoundKey", and store the result in "dst"." + state := a +a[127:0] := ShiftRows(a[127:0]) +a[127:0] := SubBytes(a[127:0]) +dst[127:0] := a[127:0] XOR RoundKey[127:0] + + +
wmmintrin.h
+
+ + Integer + AES + Cryptography + + + Perform one round of an AES decryption flow on data (state) in "a" using the round key in "RoundKey", and store the result in "dst"." + state := a +a[127:0] := InvShiftRows(a[127:0]) +a[127:0] := InvSubBytes(a[127:0]) +a[127:0] := InvMixColumns(a[127:0]) +dst[127:0] := a[127:0] XOR RoundKey[127:0] + + +
wmmintrin.h
+
+ + Integer + AES + Cryptography + + + Perform the last round of an AES decryption flow on data (state) in "a" using the round key in "RoundKey", and store the result in "dst"." + state := a +a[127:0] := InvShiftRows(a[127:0]) +a[127:0] := InvSubBytes(a[127:0]) +dst[127:0] := a[127:0] XOR RoundKey[127:0] + + +
wmmintrin.h
+
+ + Integer + AES + Cryptography + + Perform the InvMixColumns transformation on "a" and store the result in "dst". + +dst[127:0] := InvMixColumns(a[127:0]) + + +
wmmintrin.h
+
+ + Integer + AES + Cryptography + + + Assist in expanding the AES cipher key by computing steps towards generating a round key for encryption cipher using data from "a" and an 8-bit round constant specified in "imm8", and store the result in "dst"." + + +X3[31:0] := a[127:96] +X2[31:0] := a[95:64] +X1[31:0] := a[63:32] +X0[31:0] := a[31:0] +RCON[31:0] := ZeroExtend(imm8[7:0]); +dst[31:0] := SubWord(X1) +dst[63:32] := (RotWord(SubWord(X1)) XOR RCON; +dst[95:64] := SubWord(X3) +dst[127:96] := RotWord(SubWord(X3)) XOR RCON; + + +
wmmintrin.h
+
+ + Integer + PCLMULQDQ + Application-Targeted + + + + Perform a carry-less multiplication of two 64-bit integers, selected from "a" and "b" according to "imm8", and store the results in "dst". + + +IF (imm8[0] = 0) + TEMP1 := a[63:0]; +ELSE + TEMP1 := a[127:64]; +FI +IF (imm8[4] = 0) + TEMP2 := b[63:0]; +ELSE + TEMP2 := b[127:64]; +FI + +FOR i := 0 to 63 + TEMP[i] := (TEMP1[0] and TEMP2[i]); + FOR j := 1 to i + TEMP [i] := TEMP [i] XOR (TEMP1[j] AND TEMP2[i-j]) + ENDFOR + dst[i] := TEMP[i]; +ENDFOR +FOR i := 64 to 127 + TEMP [i] := 0; + FOR j := (i - 63) to 63 + TEMP [i] := TEMP [i] XOR (TEMP1[j] AND TEMP2[i-j]) + ENDFOR + dst[i] := TEMP[i]; +ENDFOR +dst[127] := 0 + + +
wmmintrin.h
+
+ + + Floating Point + AVX + Arithmetic + + + Add packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := a[i+63:i] + b[i+63:i] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Arithmetic + + + Add packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := a[i+31:i] + b[i+31:i] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Arithmetic + + + Alternatively add and subtract packed double-precision (64-bit) floating-point elements in "a" to/from packed elements in "b", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + IF (j is even) + dst[i+63:i] := a[i+63:i] - b[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + b[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Arithmetic + + + Alternatively add and subtract packed single-precision (32-bit) floating-point elements in "a" to/from packed elements in "b", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + IF (j is even) + dst[i+31:i] := a[i+31:i] - b[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + b[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Logical + + + Compute the bitwise AND of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := (a[i+63:i] AND b[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Logical + + + Compute the bitwise AND of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := (a[i+31:i] AND b[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Logical + + + Compute the bitwise NOT of packed double-precision (64-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Logical + + + Compute the bitwise NOT of packed single-precision (32-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := ((NOT a[i+31:i]) AND b[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Swizzle + + + + Blend packed double-precision (64-bit) floating-point elements from "a" and "b" using control mask "imm8", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + IF imm8[j%8] + dst[i+63:i] := b[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Swizzle + + + + Blend packed single-precision (32-bit) floating-point elements from "a" and "b" using control mask "imm8", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + IF imm8[j%8] + dst[i+31:i] := b[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Swizzle + + + + Blend packed double-precision (64-bit) floating-point elements from "a" and "b" using "mask", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + IF mask[i+63] + dst[i+63:i] := b[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Swizzle + + + + Blend packed single-precision (32-bit) floating-point elements from "a" and "b" using "mask", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + IF mask[i+31] + dst[i+31:i] := b[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Arithmetic + + + Divide packed double-precision (64-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst". + +FOR j := 0 to 3 + i := 64*j + dst[i+63:i] := a[i+63:i] / b[i+63:i] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Arithmetic + + + Divide packed single-precision (32-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst". + +FOR j := 0 to 7 + i := 32*j + dst[i+31:i] := a[i+31:i] / b[i+31:i] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Arithmetic + + + + Conditionally multiply the packed single-precision (32-bit) floating-point elements in "a" and "b" using the high 4 bits in "imm8", sum the four products, and conditionally store the sum in "dst" using the low 4 bits of "imm8". + +DP(a[127:0], b[127:0], imm8[7:0]) { + FOR j := 0 to 3 + i := j*32 + IF imm8[(4+j)%8] + temp[i+31:i] := a[i+31:i] * b[i+31:i] + ELSE + temp[i+31:i] := 0 + FI + ENDFOR + + sum[31:0] := (temp[127:96] + temp[95:64]) + (temp[63:32] + temp[31:0]) + + FOR j := 0 to 3 + i := j*32 + IF imm8[j%8] + tmpdst[i+31:i] := sum[31:0] + ELSE + tmpdst[i+31:i] := 0 + FI + ENDFOR + RETURN tmpdst[127:0] +} + +dst[127:0] := DP(a[127:0], b[127:0], imm8[7:0]) +dst[255:128] := DP(a[255:128], b[255:128], imm8[7:0]) +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Arithmetic + + + Horizontally add adjacent pairs of double-precision (64-bit) floating-point elements in "a" and "b", and pack the results in "dst". + +dst[63:0] := a[127:64] + a[63:0] +dst[127:64] := b[127:64] + b[63:0] +dst[191:128] := a[255:192] + a[191:128] +dst[255:192] := b[255:192] + b[191:128] +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Arithmetic + + + Horizontally add adjacent pairs of single-precision (32-bit) floating-point elements in "a" and "b", and pack the results in "dst". + +dst[31:0] := a[63:32] + a[31:0] +dst[63:32] := a[127:96] + a[95:64] +dst[95:64] := b[63:32] + b[31:0] +dst[127:96] := b[127:96] + b[95:64] +dst[159:128] := a[191:160] + a[159:128] +dst[191:160] := a[255:224] + a[223:192] +dst[223:192] := b[191:160] + b[159:128] +dst[255:224] := b[255:224] + b[223:192] +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Arithmetic + + + Horizontally subtract adjacent pairs of double-precision (64-bit) floating-point elements in "a" and "b", and pack the results in "dst". + +dst[63:0] := a[63:0] - a[127:64] +dst[127:64] := b[63:0] - b[127:64] +dst[191:128] := a[191:128] - a[255:192] +dst[255:192] := b[191:128] - b[255:192] +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Arithmetic + + + Horizontally add adjacent pairs of single-precision (32-bit) floating-point elements in "a" and "b", and pack the results in "dst". + +dst[31:0] := a[31:0] - a[63:32] +dst[63:32] := a[95:64] - a[127:96] +dst[95:64] := b[31:0] - b[63:32] +dst[127:96] := b[95:64] - b[127:96] +dst[159:128] := a[159:128] - a[191:160] +dst[191:160] := a[223:192] - a[255:224] +dst[223:192] := b[159:128] - b[191:160] +dst[255:224] := b[223:192] - b[255:224] +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Special Math Functions + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Special Math Functions + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Special Math Functions + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst". + + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Special Math Functions + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst". + + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Arithmetic + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst". + + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := a[i+63:i] * b[i+63:i] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Arithmetic + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst". + + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := a[i+31:i] * b[i+31:i] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Logical + + + Compute the bitwise OR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst". + + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := a[i+63:i] BITWISE OR b[i+63:i] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Logical + + + Compute the bitwise OR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst". + + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := a[i+31:i] BITWISE OR b[i+31:i] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Swizzle + + + + Shuffle double-precision (64-bit) floating-point elements within 128-bit lanes using the control in "imm8", and store the results in "dst". + +dst[63:0] := (imm8[0] == 0) ? a[63:0] : a[127:64] +dst[127:64] := (imm8[1] == 0) ? b[63:0] : b[127:64] +dst[191:128] := (imm8[2] == 0) ? a[191:128] : a[255:192] +dst[255:192] := (imm8[3] == 0) ? b[191:128] : b[255:192] +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Swizzle + + + + Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst". + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} + +dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +dst[95:64] := SELECT4(b[127:0], imm8[5:4]) +dst[127:96] := SELECT4(b[127:0], imm8[7:6]) +dst[159:128] := SELECT4(a[255:128], imm8[1:0]) +dst[191:160] := SELECT4(a[255:128], imm8[3:2]) +dst[223:192] := SELECT4(b[255:128], imm8[5:4]) +dst[255:224] := SELECT4(b[255:128], imm8[7:6]) +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Arithmetic + + + Subtract packed double-precision (64-bit) floating-point elements in "b" from packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := a[i+63:i] - b[i+63:i] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Arithmetic + + + Subtract packed single-precision (32-bit) floating-point elements in "b" from packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := a[i+31:i] - b[i+31:i] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Logical + + + Compute the bitwise XOR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst". + + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := a[i+63:i] XOR b[i+63:i] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Logical + + + Compute the bitwise XOR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst". + + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := a[i+31:i] XOR b[i+31:i] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Compare + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in "dst". + +CASE (imm8[7:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := ( a[i+63:i] OP b[i+63:i] ) ? 0xFFFFFFFFFFFFFFFF : 0 +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Compare + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in "dst". + +CASE (imm8[7:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := ( a[i+63:i] OP b[i+63:i] ) ? 0xFFFFFFFFFFFFFFFF : 0 +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Compare + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in "dst". + +CASE (imm8[7:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ( a[i+31:i] OP b[i+31:i] ) ? 0xFFFFFFFF : 0 +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Compare + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in "dst". + +CASE (imm8[7:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := ( a[i+31:i] OP b[i+31:i] ) ? 0xFFFFFFFF : 0 +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Compare + + + + Compare the lower double-precision (64-bit) floating-point element in "a" and "b" based on the comparison operand specified by "imm8", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +CASE (imm8[7:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC + +dst[63:0] := ( a[63:0] OP b[63:0] ) ? 0xFFFFFFFFFFFFFFFF : 0 +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Compare + + + + Compare the lower single-precision (32-bit) floating-point element in "a" and "b" based on the comparison operand specified by "imm8", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +CASE (imm8[7:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC + +dst[31:0] := ( a[31:0] OP b[31:0] ) ? 0xFFFFFFFF : 0 +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX + Convert + + Convert packed 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + m := j*64 + dst[m+63:m] := Convert_Int32_To_FP64(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX + Convert + + Convert packed 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 7 + i := 32*j + dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Convert + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 3 + i := 32*j + k := 64*j + dst[i+31:i] := Convert_FP64_To_FP32(a[k+63:k]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX + Convert + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst". + +FOR j := 0 to 7 + i := 32*j + dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Convert + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 3 + i := 64*j + k := 32*j + dst[i+63:i] := Convert_FP32_To_FP64(a[k+31:k]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX + Convert + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 3 + i := 32*j + k := 64*j + dst[i+31:i] := Convert_FP64_To_Int32_Truncate(a[k+63:k]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX + Convert + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst". + +FOR j := 0 to 3 + i := 32*j + k := 64*j + dst[i+31:i] := Convert_FP64_To_Int32(a[k+63:k]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX + Convert + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 7 + i := 32*j + dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Swizzle + + + Extract 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "a", selected with "imm8", and store the result in "dst". + +CASE imm8[7:0] of +0: dst[127:0] := a[127:0] +1: dst[127:0] := a[255:128] +ESAC +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Swizzle + + + Extract 128 bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "a", selected with "imm8", and store the result in "dst". + +CASE imm8[7:0] of +0: dst[127:0] := a[127:0] +1: dst[127:0] := a[255:128] +ESAC +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX + Swizzle + + + Extract 128 bits (composed of integer data) from "a", selected with "imm8", and store the result in "dst". + +CASE imm8[7:0] of +0: dst[127:0] := a[127:0] +1: dst[127:0] := a[255:128] +ESAC +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Swizzle + + + Extract an 8-bit integer from "a", selected with "index", and store the result in "dst". + +dst[7:0] := (a[255:0] >> (index * 8))[7:0] + +
immintrin.h
+
+ + Integer + AVX2 + Swizzle + + + Extract a 16-bit integer from "a", selected with "index", and store the result in "dst". + +dst[15:0] := (a[255:0] >> (index * 16))[15:0] + +
immintrin.h
+
+ + Integer + AVX + Swizzle + + + Extract a 32-bit integer from "a", selected with "index", and store the result in "dst". + +dst[31:0] := (a[255:0] >> (index * 32))[31:0] + +
immintrin.h
+
+ + Integer + AVX + Swizzle + + + Extract a 64-bit integer from "a", selected with "index", and store the result in "dst". + +dst[63:0] := (a[255:0] >> (index * 64))[63:0] + +
immintrin.h
+
+ + AVX + General Support + + Zero the contents of all XMM or YMM registers. + +YMM0[MAX:0] := 0 +YMM1[MAX:0] := 0 +YMM2[MAX:0] := 0 +YMM3[MAX:0] := 0 +YMM4[MAX:0] := 0 +YMM5[MAX:0] := 0 +YMM6[MAX:0] := 0 +YMM7[MAX:0] := 0 +IF 64-bit mode + YMM8[MAX:0] := 0 + YMM9[MAX:0] := 0 + YMM10[MAX:0] := 0 + YMM11[MAX:0] := 0 + YMM12[MAX:0] := 0 + YMM13[MAX:0] := 0 + YMM14[MAX:0] := 0 + YMM15[MAX:0] := 0 +FI + + +
immintrin.h
+
+ + AVX + General Support + + Zero the upper 128 bits of all YMM registers; the lower 128-bits of the registers are unmodified. + +YMM0[MAX:128] := 0 +YMM1[MAX:128] := 0 +YMM2[MAX:128] := 0 +YMM3[MAX:128] := 0 +YMM4[MAX:128] := 0 +YMM5[MAX:128] := 0 +YMM6[MAX:128] := 0 +YMM7[MAX:128] := 0 +IF 64-bit mode + YMM8[MAX:128] := 0 + YMM9[MAX:128] := 0 + YMM10[MAX:128] := 0 + YMM11[MAX:128] := 0 + YMM12[MAX:128] := 0 + YMM13[MAX:128] := 0 + YMM14[MAX:128] := 0 + YMM15[MAX:128] := 0 +FI + + +
immintrin.h
+
+ + Floating Point + AVX + Swizzle + + + Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "b", and store the results in "dst". + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} + +dst[31:0] := SELECT4(a[127:0], b[1:0]) +dst[63:32] := SELECT4(a[127:0], b[33:32]) +dst[95:64] := SELECT4(a[127:0], b[65:64]) +dst[127:96] := SELECT4(a[127:0], b[97:96]) +dst[159:128] := SELECT4(a[255:128], b[129:128]) +dst[191:160] := SELECT4(a[255:128], b[161:160]) +dst[223:192] := SELECT4(a[255:128], b[193:192]) +dst[255:224] := SELECT4(a[255:128], b[225:224]) +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Swizzle + + + Shuffle single-precision (32-bit) floating-point elements in "a" using the control in "b", and store the results in "dst". + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} + +dst[31:0] := SELECT4(a[127:0], b[1:0]) +dst[63:32] := SELECT4(a[127:0], b[33:32]) +dst[95:64] := SELECT4(a[127:0], b[65:64]) +dst[127:96] := SELECT4(a[127:0], b[97:96]) +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Swizzle + + + Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst". + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} + +dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +dst[95:64] := SELECT4(a[127:0], imm8[5:4]) +dst[127:96] := SELECT4(a[127:0], imm8[7:6]) +dst[159:128] := SELECT4(a[255:128], imm8[1:0]) +dst[191:160] := SELECT4(a[255:128], imm8[3:2]) +dst[223:192] := SELECT4(a[255:128], imm8[5:4]) +dst[255:224] := SELECT4(a[255:128], imm8[7:6]) +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Swizzle + + + Shuffle single-precision (32-bit) floating-point elements in "a" using the control in "imm8", and store the results in "dst". + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} + +dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +dst[95:64] := SELECT4(a[127:0], imm8[5:4]) +dst[127:96] := SELECT4(a[127:0], imm8[7:6]) +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Swizzle + + + Shuffle double-precision (64-bit) floating-point elements in "a" within 128-bit lanes using the control in "b", and store the results in "dst". + +IF (b[1] == 0) dst[63:0] := a[63:0] +IF (b[1] == 1) dst[63:0] := a[127:64] +IF (b[65] == 0) dst[127:64] := a[63:0] +IF (b[65] == 1) dst[127:64] := a[127:64] +IF (b[129] == 0) dst[191:128] := a[191:128] +IF (b[129] == 1) dst[191:128] := a[255:192] +IF (b[193] == 0) dst[255:192] := a[191:128] +IF (b[193] == 1) dst[255:192] := a[255:192] +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Swizzle + + + Shuffle double-precision (64-bit) floating-point elements in "a" using the control in "b", and store the results in "dst". + +IF (b[1] == 0) dst[63:0] := a[63:0] +IF (b[1] == 1) dst[63:0] := a[127:64] +IF (b[65] == 0) dst[127:64] := a[63:0] +IF (b[65] == 1) dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Swizzle + + + Shuffle double-precision (64-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst". + +IF (imm8[0] == 0) dst[63:0] := a[63:0] +IF (imm8[0] == 1) dst[63:0] := a[127:64] +IF (imm8[1] == 0) dst[127:64] := a[63:0] +IF (imm8[1] == 1) dst[127:64] := a[127:64] +IF (imm8[2] == 0) dst[191:128] := a[191:128] +IF (imm8[2] == 1) dst[191:128] := a[255:192] +IF (imm8[3] == 0) dst[255:192] := a[191:128] +IF (imm8[3] == 1) dst[255:192] := a[255:192] +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Swizzle + + + Shuffle double-precision (64-bit) floating-point elements in "a" using the control in "imm8", and store the results in "dst". + +IF (imm8[0] == 0) dst[63:0] := a[63:0] +IF (imm8[0] == 1) dst[63:0] := a[127:64] +IF (imm8[1] == 0) dst[127:64] := a[63:0] +IF (imm8[1] == 1) dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Swizzle + + + + Shuffle 128-bits (composed of 4 packed single-precision (32-bit) floating-point elements) selected by "imm8" from "a" and "b", and store the results in "dst". + +SELECT4(src1, src2, control){ + CASE(control[1:0]) + 0: tmp[127:0] := src1[127:0] + 1: tmp[127:0] := src1[255:128] + 2: tmp[127:0] := src2[127:0] + 3: tmp[127:0] := src2[255:128] + ESAC + IF control[3] + tmp[127:0] := 0 + FI + RETURN tmp[127:0] +} + +dst[127:0] := SELECT4(a[255:0], b[255:0], imm8[3:0]) +dst[255:128] := SELECT4(a[255:0], b[255:0], imm8[7:4]) +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Swizzle + + + + Shuffle 128-bits (composed of 2 packed double-precision (64-bit) floating-point elements) selected by "imm8" from "a" and "b", and store the results in "dst". + +SELECT4(src1, src2, control){ + CASE(control[1:0]) + 0: tmp[127:0] := src1[127:0] + 1: tmp[127:0] := src1[255:128] + 2: tmp[127:0] := src2[127:0] + 3: tmp[127:0] := src2[255:128] + ESAC + IF control[3] + tmp[127:0] := 0 + FI + RETURN tmp[127:0] +} + +dst[127:0] := SELECT4(a[255:0], b[255:0], imm8[3:0]) +dst[255:128] := SELECT4(a[255:0], b[255:0], imm8[7:4]) +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX + Swizzle + + + + Shuffle 128-bits (composed of integer data) selected by "imm8" from "a" and "b", and store the results in "dst". + +SELECT4(src1, src2, control){ + CASE(control[1:0]) + 0: tmp[127:0] := src1[127:0] + 1: tmp[127:0] := src1[255:128] + 2: tmp[127:0] := src2[127:0] + 3: tmp[127:0] := src2[255:128] + ESAC + IF control[3] + tmp[127:0] := 0 + FI + RETURN tmp[127:0] +} + +dst[127:0] := SELECT4(a[255:0], b[255:0], imm8[3:0]) +dst[255:128] := SELECT4(a[255:0], b[255:0], imm8[7:4]) +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Load + + Broadcast a single-precision (32-bit) floating-point element from memory to all elements of "dst". + +tmp[31:0] = MEM[mem_addr+31:mem_addr] +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := tmp[31:0] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Load + Swizzle + + Broadcast a single-precision (32-bit) floating-point element from memory to all elements of "dst". + +tmp[31:0] = MEM[mem_addr+31:mem_addr] +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := tmp[31:0] +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Load + Swizzle + + Broadcast a double-precision (64-bit) floating-point element from memory to all elements of "dst". + +tmp[63:0] = MEM[mem_addr+63:mem_addr] +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := tmp[63:0] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Load + Swizzle + + Broadcast 128 bits from memory (composed of 4 packed single-precision (32-bit) floating-point elements) to all elements of "dst". + +tmp[127:0] = MEM[mem_addr+127:mem_addr] +dst[127:0] := tmp[127:0] +dst[255:128] := tmp[127:0] +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Load + Swizzle + + Broadcast 128 bits from memory (composed of 2 packed double-precision (64-bit) floating-point elements) to all elements of "dst". + +tmp[127:0] = MEM[mem_addr+127:mem_addr] +dst[127:0] := tmp[127:0] +dst[255:128] := tmp[127:0] +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Swizzle + + + + Copy "a" to "dst", then insert 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "b" into "dst" at the location specified by "imm8". + +dst[255:0] := a[255:0] +CASE (imm8[1:0]) of +0: dst[127:0] := b[127:0] +1: dst[255:128] := b[127:0] +ESAC +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Swizzle + + + + Copy "a" to "dst", then insert 128 bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "b" into "dst" at the location specified by "imm8". + +dst[255:0] := a[255:0] +CASE imm8[7:0] of +0: dst[127:0] := b[127:0] +1: dst[255:128] := b[127:0] +ESAC +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX + Swizzle + + + + Copy "a" to "dst", then insert 128 bits from "b" into "dst" at the location specified by "imm8". + +dst[255:0] := a[255:0] +CASE (imm8[1:0]) of +0: dst[127:0] := b[127:0] +1: dst[255:128] := b[127:0] +ESAC +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX + Swizzle + + + + Copy "a" to "dst", and insert the 8-bit integer "i" into "dst" at the location specified by "index". + +dst[255:0] := a[255:0] +sel := index*8 +dst[sel+7:sel] := i[7:0] + +
immintrin.h
+
+ + Integer + AVX + Swizzle + + + + Copy "a" to "dst", and insert the 16-bit integer "i" into "dst" at the location specified by "index". + +dst[255:0] := a[255:0] +sel := index*16 +dst[sel+15:sel] := i[15:0] + +
immintrin.h
+
+ + Integer + AVX + Swizzle + + + + Copy "a" to "dst", and insert the 32-bit integer "i" into "dst" at the location specified by "index". + +dst[255:0] := a[255:0] +sel := index*32 +dst[sel+31:sel] := i[31:0] + +
immintrin.h
+
+ + Integer + AVX + Swizzle + + + + Copy "a" to "dst", and insert the 64-bit integer "i" into "dst" at the location specified by "index". + +dst[255:0] := a[255:0] +sel := index*64 +dst[sel+63:sel] := i[63:0] + +
immintrin.h
+
+ + Floating Point + AVX + Load + + Load 256-bits (composed of 4 packed double-precision (64-bit) floating-point elements) from memory into "dst". + "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated. + +dst[255:0] := MEM[mem_addr+255:mem_addr] +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Store + + + Store 256-bits (composed of 4 packed double-precision (64-bit) floating-point elements) from "a" into memory. + "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+255:mem_addr] := a[255:0] + + +
immintrin.h
+
+ + Floating Point + AVX + Load + + Load 256-bits (composed of 8 packed single-precision (32-bit) floating-point elements) from memory into "dst". + "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated. + +dst[255:0] := MEM[mem_addr+255:mem_addr] +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Store + + + Store 256-bits (composed of 8 packed single-precision (32-bit) floating-point elements) from "a" into memory. + "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+255:mem_addr] := a[255:0] + + +
immintrin.h
+
+ + Floating Point + AVX + Load + + Load 256-bits (composed of 4 packed double-precision (64-bit) floating-point elements) from memory into "dst". + "mem_addr" does not need to be aligned on any particular boundary. + +dst[255:0] := MEM[mem_addr+255:mem_addr] +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Store + + + Store 256-bits (composed of 4 packed double-precision (64-bit) floating-point elements) from "a" into memory. + "mem_addr" does not need to be aligned on any particular boundary. + +MEM[mem_addr+255:mem_addr] := a[255:0] + + +
immintrin.h
+
+ + Floating Point + AVX + Load + + Load 256-bits (composed of 8 packed single-precision (32-bit) floating-point elements) from memory into "dst". + "mem_addr" does not need to be aligned on any particular boundary. + +dst[255:0] := MEM[mem_addr+255:mem_addr] +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Store + + + Store 256-bits (composed of 8 packed single-precision (32-bit) floating-point elements) from "a" into memory. + "mem_addr" does not need to be aligned on any particular boundary. + +MEM[mem_addr+255:mem_addr] := a[255:0] + + +
immintrin.h
+
+ + Integer + AVX + Load + + Load 256-bits of integer data from memory into "dst". + "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated. + +dst[255:0] := MEM[mem_addr+255:mem_addr] +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX + Store + + + Store 256-bits of integer data from "a" into memory. + "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+255:mem_addr] := a[255:0] + + +
immintrin.h
+
+ + Integer + AVX + Load + + Load 256-bits of integer data from memory into "dst". + "mem_addr" does not need to be aligned on any particular boundary. + + +dst[255:0] := MEM[mem_addr+255:mem_addr] +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX + Store + + + Store 256-bits of integer data from "a" into memory. + "mem_addr" does not need to be aligned on any particular boundary. + + +MEM[mem_addr+255:mem_addr] := a[255:0] + + +
immintrin.h
+
+ + Floating Point + AVX + Load + + + Load packed double-precision (64-bit) floating-point elements from memory into "dst" using "mask" (elements are zeroed out when the high bit of the corresponding element is not set). + +FOR j := 0 to 3 + i := j*64 + IF mask[i+63] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Store + + + + Store packed double-precision (64-bit) floating-point elements from "a" into memory using "mask". + +FOR j := 0 to 3 + i := j*64 + IF mask[i+63] + MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i] + FI +ENDFOR + + +
immintrin.h
+
+ + Floating Point + AVX + Load + + + Load packed double-precision (64-bit) floating-point elements from memory into "dst" using "mask" (elements are zeroed out when the high bit of the corresponding element is not set). + +FOR j := 0 to 1 + i := j*64 + IF mask[i+63] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Store + + + + Store packed double-precision (64-bit) floating-point elements from "a" into memory using "mask". + +FOR j := 0 to 1 + i := j*64 + IF mask[i+63] + MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i] + FI +ENDFOR + + +
immintrin.h
+
+ + Floating Point + AVX + Load + + + Load packed single-precision (32-bit) floating-point elements from memory into "dst" using "mask" (elements are zeroed out when the high bit of the corresponding element is not set). + +FOR j := 0 to 7 + i := j*32 + IF mask[i+31] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Store + + + + Store packed single-precision (32-bit) floating-point elements from "a" into memory using "mask". + +FOR j := 0 to 7 + i := j*32 + IF mask[i+31] + MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i] + FI +ENDFOR + + +
immintrin.h
+
+ + Floating Point + AVX + Load + + + Load packed single-precision (32-bit) floating-point elements from memory into "dst" using "mask" (elements are zeroed out when the high bit of the corresponding element is not set). + +FOR j := 0 to 3 + i := j*32 + IF mask[i+31] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Store + + + + Store packed single-precision (32-bit) floating-point elements from "a" into memory using "mask". + +FOR j := 0 to 3 + i := j*32 + IF mask[i+31] + MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i] + FI +ENDFOR + + +
immintrin.h
+
+ + Floating Point + AVX + Move + + Duplicate odd-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst". + + +dst[31:0] := a[63:32] +dst[63:32] := a[63:32] +dst[95:64] := a[127:96] +dst[127:96] := a[127:96] +dst[159:128] := a[191:160] +dst[191:160] := a[191:160] +dst[223:192] := a[255:224] +dst[255:224] := a[255:224] +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Move + + Duplicate even-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst". + + +dst[31:0] := a[31:0] +dst[63:32] := a[31:0] +dst[95:64] := a[95:64] +dst[127:96] := a[95:64] +dst[159:128] := a[159:128] +dst[191:160] := a[159:128] +dst[223:192] := a[223:192] +dst[255:224] := a[223:192] +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Move + + Duplicate even-indexed double-precision (64-bit) floating-point elements from "a", and store the results in "dst". + + +dst[63:0] := a[63:0] +dst[127:64] := a[63:0] +dst[191:128] := a[191:128] +dst[255:192] := a[191:128] +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX + Load + + Load 256-bits of integer data from unaligned memory into "dst". This intrinsic may perform better than "_mm256_loadu_si256" when the data crosses a cache line boundary. + +dst[255:0] := MEM[mem_addr+255:mem_addr] +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX + Store + + + Store 256-bits of integer data from "a" into memory using a non-temporal memory hint. + "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+255:mem_addr] := a[255:0] + + +
immintrin.h
+
+ + Floating Point + AVX + Store + + + Store 256-bits (composed of 4 packed double-precision (64-bit) floating-point elements) from "a" into memory using a non-temporal memory hint. + "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+255:mem_addr] := a[255:0] + + +
immintrin.h
+
+ + Floating Point + AVX + Store + + + Store 256-bits (composed of 8 packed single-precision (32-bit) floating-point elements) from "a" into memory using a non-temporal memory hint. + "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+255:mem_addr] := a[255:0] + + +
immintrin.h
+
+ + Floating Point + AVX + Elementary Math Functions + + Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 1.5*2^-12. + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := APPROXIMATE(1.0/a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Elementary Math Functions + + Compute the approximate reciprocal square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 1.5*2^-12. + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := APPROXIMATE(1.0 / SQRT(a[i+31:i])) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Elementary Math Functions + + Compute the square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := SQRT(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Elementary Math Functions + + Compute the square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := SQRT(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Special Math Functions + + + Round the packed double-precision (64-bit) floating-point elements in "a" using the "rounding" parameter, and store the results as packed double-precision floating-point elements in "dst". + [round_note] + + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := ROUND(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Special Math Functions + + + Round the packed single-precision (32-bit) floating-point elements in "a" using the "rounding" parameter, and store the results as packed single-precision floating-point elements in "dst". + [round_note] + + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := ROUND(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Swizzle + + + Unpack and interleave double-precision (64-bit) floating-point elements from the high half of each 128-bit lane in "a" and "b", and store the results in "dst". + +INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]){ + dst[63:0] := src1[127:64] + dst[127:64] := src2[127:64] + RETURN dst[127:0] +} + +dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) +dst[255:128] := INTERLEAVE_HIGH_QWORDS(a[255:128], b[255:128]) +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Swizzle + + + Unpack and interleave single-precision (32-bit) floating-point elements from the high half of each 128-bit lane in "a" and "b", and store the results in "dst". + +INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]){ + dst[31:0] := src1[95:64] + dst[63:32] := src2[95:64] + dst[95:64] := src1[127:96] + dst[127:96] := src2[127:96] + RETURN dst[127:0] +} + +dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) +dst[255:128] := INTERLEAVE_HIGH_DWORDS(a[255:128], b[255:128]) +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Swizzle + + + Unpack and interleave double-precision (64-bit) floating-point elements from the low half of each 128-bit lane in "a" and "b", and store the results in "dst". + +INTERLEAVE_QWORDS(src1[127:0], src2[127:0]){ + dst[63:0] := src1[63:0] + dst[127:64] := src2[63:0] + RETURN dst[127:0] +} + +dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) +dst[255:128] := INTERLEAVE_QWORDS(a[255:128], b[255:128]) +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Swizzle + + + Unpack and interleave single-precision (32-bit) floating-point elements from the low half of each 128-bit lane in "a" and "b", and store the results in "dst". + +INTERLEAVE_DWORDS(src1[127:0], src2[127:0]){ + dst[31:0] := src1[31:0] + dst[63:32] := src2[31:0] + dst[95:64] := src1[63:32] + dst[127:96] := src2[63:32] + RETURN dst[127:0] +} + +dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) +dst[255:128] := INTERLEAVE_DWORDS(a[255:128], b[255:128]) +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX + Logical + + + Compute the bitwise AND of 256 bits (representing integer data) in "a" and "b", and set "ZF" to 1 if the result is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", and set "CF" to 1 if the result is zero, otherwise set "CF" to 0. Return the "ZF" value. + +IF (a[255:0] AND b[255:0] == 0) + ZF := 1 +ELSE + ZF := 0 +FI +IF ((NOT a[255:0]) AND b[255:0] == 0) + CF := 1 +ELSE + CF := 0 +FI +RETURN ZF + + +
immintrin.h
+
+ + Integer + AVX + Logical + + + Compute the bitwise AND of 256 bits (representing integer data) in "a" and "b", and set "ZF" to 1 if the result is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", and set "CF" to 1 if the result is zero, otherwise set "CF" to 0. Return the "CF" value. + +IF (a[255:0] AND b[255:0] == 0) + ZF := 1 +ELSE + ZF := 0 +FI +IF ((NOT a[255:0]) AND b[255:0] == 0) + CF := 1 +ELSE + CF := 0 +FI +RETURN CF + + +
immintrin.h
+
+ + Integer + AVX + Logical + + + Compute the bitwise AND of 256 bits (representing integer data) in "a" and "b", and set "ZF" to 1 if the result is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", and set "CF" to 1 if the result is zero, otherwise set "CF" to 0. Return 1 if both the "ZF" and "CF" values are zero, otherwise return 0. + +IF (a[255:0] AND b[255:0] == 0) + ZF := 1 +ELSE + ZF := 0 +FI +IF ((NOT a[255:0]) AND b[255:0] == 0) + CF := 1 +ELSE + CF := 0 +FI +IF (ZF == 0 && CF == 0) + RETURN 1 +ELSE + RETURN 0 +FI + + +
immintrin.h
+
+ + Floating Point + AVX + Logical + + + Compute the bitwise AND of 256 bits (representing double-precision (64-bit) floating-point elements) in "a" and "b", producing an intermediate 256-bit value, and set "ZF" to 1 if the sign bit of each 64-bit element in the intermediate value is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", producing an intermediate value, and set "CF" to 1 if the sign bit of each 64-bit element in the intermediate value is zero, otherwise set "CF" to 0. Return the "ZF" value. + +tmp[255:0] := a[255:0] AND b[255:0] +IF (tmp[63] == tmp[127] == tmp[191] == tmp[255] == 0) + ZF := 1 +ELSE + ZF := 0 +FI +tmp[255:0] := (NOT a[255:0]) AND b[255:0] +IF (tmp[63] == tmp[127] == tmp[191] == tmp[255] == 0) + CF := 1 +ELSE + CF := 0 +FI +RETURN ZF + + +
immintrin.h
+
+ + Floating Point + AVX + Logical + + + Compute the bitwise AND of 256 bits (representing double-precision (64-bit) floating-point elements) in "a" and "b", producing an intermediate 256-bit value, and set "ZF" to 1 if the sign bit of each 64-bit element in the intermediate value is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", producing an intermediate value, and set "CF" to 1 if the sign bit of each 64-bit element in the intermediate value is zero, otherwise set "CF" to 0. Return the "CF" value. + +tmp[255:0] := a[255:0] AND b[255:0] +IF (tmp[63] == tmp[127] == tmp[191] == tmp[255] == 0) + ZF := 1 +ELSE + ZF := 0 +FI +tmp[255:0] := (NOT a[255:0]) AND b[255:0] +IF (tmp[63] == tmp[127] == tmp[191] == tmp[255] == 0) + CF := 1 +ELSE + CF := 0 +FI +RETURN CF + + +
immintrin.h
+
+ + Floating Point + AVX + Logical + + + Compute the bitwise AND of 256 bits (representing double-precision (64-bit) floating-point elements) in "a" and "b", producing an intermediate 256-bit value, and set "ZF" to 1 if the sign bit of each 64-bit element in the intermediate value is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", producing an intermediate value, and set "CF" to 1 if the sign bit of each 64-bit element in the intermediate value is zero, otherwise set "CF" to 0. Return 1 if both the "ZF" and "CF" values are zero, otherwise return 0. + +tmp[255:0] := a[255:0] AND b[255:0] +IF (tmp[63] == tmp[127] == tmp[191] == tmp[255] == 0) + ZF := 1 +ELSE + ZF := 0 +FI +tmp[255:0] := (NOT a[255:0]) AND b[255:0] +IF (tmp[63] == tmp[127] == tmp[191] == tmp[255] == 0) + CF := 1 +ELSE + CF := 0 +FI +IF (ZF == 0 && CF == 0) + RETURN 1 +ELSE + RETURN 0 +FI + + +
immintrin.h
+
+ + Floating Point + AVX + Logical + + + Compute the bitwise AND of 128 bits (representing double-precision (64-bit) floating-point elements) in "a" and "b", producing an intermediate 128-bit value, and set "ZF" to 1 if the sign bit of each 64-bit element in the intermediate value is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", producing an intermediate value, and set "CF" to 1 if the sign bit of each 64-bit element in the intermediate value is zero, otherwise set "CF" to 0. Return the "ZF" value. + +tmp[127:0] := a[127:0] AND b[127:0] +IF (tmp[63] == tmp[127] == 0) + ZF := 1 +ELSE + ZF := 0 +FI +tmp[127:0] := (NOT a[127:0]) AND b[127:0] +IF (tmp[63] == tmp[127] == 0) + CF := 1 +ELSE + CF := 0 +FI +RETURN ZF + + +
immintrin.h
+
+ + Floating Point + AVX + Logical + + + Compute the bitwise AND of 128 bits (representing double-precision (64-bit) floating-point elements) in "a" and "b", producing an intermediate 128-bit value, and set "ZF" to 1 if the sign bit of each 64-bit element in the intermediate value is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", producing an intermediate value, and set "CF" to 1 if the sign bit of each 64-bit element in the intermediate value is zero, otherwise set "CF" to 0. Return the "CF" value. + +tmp[127:0] := a[127:0] AND b[127:0] +IF (tmp[63] == tmp[127] == 0) + ZF := 1 +ELSE + ZF := 0 +FI +tmp[127:0] := (NOT a[127:0]) AND b[127:0] +IF (tmp[63] == tmp[127] == 0) + CF := 1 +ELSE + CF := 0 +FI +RETURN CF + + +
immintrin.h
+
+ + Floating Point + AVX + Logical + + + Compute the bitwise AND of 128 bits (representing double-precision (64-bit) floating-point elements) in "a" and "b", producing an intermediate 128-bit value, and set "ZF" to 1 if the sign bit of each 64-bit element in the intermediate value is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", producing an intermediate value, and set "CF" to 1 if the sign bit of each 64-bit element in the intermediate value is zero, otherwise set "CF" to 0. Return 1 if both the "ZF" and "CF" values are zero, otherwise return 0. + +tmp[127:0] := a[127:0] AND b[127:0] +IF (tmp[63] == tmp[127] == 0) + ZF := 1 +ELSE + ZF := 0 +FI +tmp[127:0] := (NOT a[127:0]) AND b[127:0] +IF (tmp[63] == tmp[127] == 0) + CF := 1 +ELSE + CF := 0 +FI +IF (ZF == 0 && CF == 0) + RETURN 1 +ELSE + RETURN 0 +FI + + +
immintrin.h
+
+ + Floating Point + AVX + Logical + + + Compute the bitwise AND of 256 bits (representing single-precision (32-bit) floating-point elements) in "a" and "b", producing an intermediate 256-bit value, and set "ZF" to 1 if the sign bit of each 32-bit element in the intermediate value is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", producing an intermediate value, and set "CF" to 1 if the sign bit of each 32-bit element in the intermediate value is zero, otherwise set "CF" to 0. Return the "ZF" value. + +tmp[255:0] := a[255:0] AND b[255:0] +IF (tmp[31] == tmp[63] == tmp[95] == tmp[127] == tmp[159] == tmp[191] == tmp[223] == tmp[255] == 0) + ZF := 1 +ELSE + ZF := 0 +FI +tmp[255:0] := (NOT a[255:0]) AND b[255:0] +IF (tmp[31] == tmp[63] == tmp[95] == tmp[127] == tmp[159] == tmp[191] == tmp[223] == tmp[255] == 0) + CF := 1 +ELSE + CF := 0 +FI +RETURN ZF + + +
immintrin.h
+
+ + Floating Point + AVX + Logical + + + Compute the bitwise AND of 256 bits (representing single-precision (32-bit) floating-point elements) in "a" and "b", producing an intermediate 256-bit value, and set "ZF" to 1 if the sign bit of each 32-bit element in the intermediate value is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", producing an intermediate value, and set "CF" to 1 if the sign bit of each 32-bit element in the intermediate value is zero, otherwise set "CF" to 0. Return the "CF" value. + +tmp[255:0] := a[255:0] AND b[255:0] +IF (tmp[31] == tmp[63] == tmp[95] == tmp[127] == tmp[159] == tmp[191] == tmp[223] == tmp[255] == 0) + ZF := 1 +ELSE + ZF := 0 +FI +tmp[255:0] := (NOT a[255:0]) AND b[255:0] +IF (tmp[31] == tmp[63] == tmp[95] == tmp[127] == tmp[159] == tmp[191] == tmp[223] == tmp[255] == 0) + CF := 1 +ELSE + CF := 0 +FI +RETURN CF + + +
immintrin.h
+
+ + Floating Point + AVX + Logical + + + Compute the bitwise AND of 256 bits (representing single-precision (32-bit) floating-point elements) in "a" and "b", producing an intermediate 256-bit value, and set "ZF" to 1 if the sign bit of each 32-bit element in the intermediate value is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", producing an intermediate value, and set "CF" to 1 if the sign bit of each 32-bit element in the intermediate value is zero, otherwise set "CF" to 0. Return 1 if both the "ZF" and "CF" values are zero, otherwise return 0. + +tmp[255:0] := a[255:0] AND b[255:0] +IF (tmp[31] == tmp[63] == tmp[95] == tmp[127] == tmp[159] == tmp[191] == tmp[223] == tmp[255] == 0) + ZF := 1 +ELSE + ZF := 0 +FI +tmp[255:0] := (NOT a[255:0]) AND b[255:0] +IF (tmp[31] == tmp[63] == tmp[95] == tmp[127] == tmp[159] == tmp[191] == tmp[223] == tmp[255] == 0) + CF := 1 +ELSE + CF := 0 +FI +IF (ZF == 0 && CF == 0) + RETURN 1 +ELSE + RETURN 0 +FI + + +
immintrin.h
+
+ + Floating Point + AVX + Logical + + + Compute the bitwise AND of 128 bits (representing single-precision (32-bit) floating-point elements) in "a" and "b", producing an intermediate 128-bit value, and set "ZF" to 1 if the sign bit of each 32-bit element in the intermediate value is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", producing an intermediate value, and set "CF" to 1 if the sign bit of each 32-bit element in the intermediate value is zero, otherwise set "CF" to 0. Return the "ZF" value. + +tmp[127:0] := a[127:0] AND b[127:0] +IF (tmp[31] == tmp[63] == tmp[95] == tmp[127] == 0) + ZF := 1 +ELSE + ZF := 0 +FI +tmp[127:0] := (NOT a[127:0]) AND b[127:0] +IF (tmp[31] == tmp[63] == tmp[95] == tmp[127] == 0) + CF := 1 +ELSE + CF := 0 +FI +RETURN ZF + + +
immintrin.h
+
+ + Floating Point + AVX + Logical + + + Compute the bitwise AND of 128 bits (representing single-precision (32-bit) floating-point elements) in "a" and "b", producing an intermediate 128-bit value, and set "ZF" to 1 if the sign bit of each 32-bit element in the intermediate value is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", producing an intermediate value, and set "CF" to 1 if the sign bit of each 32-bit element in the intermediate value is zero, otherwise set "CF" to 0. Return the "CF" value. + +tmp[127:0] := a[127:0] AND b[127:0] +IF (tmp[31] == tmp[63] == tmp[95] == tmp[127] == 0) + ZF := 1 +ELSE + ZF := 0 +FI +tmp[127:0] := (NOT a[127:0]) AND b[127:0] +IF (tmp[31] == tmp[63] == tmp[95] == tmp[127] == 0) + CF := 1 +ELSE + CF := 0 +FI +RETURN CF + + +
immintrin.h
+
+ + Floating Point + AVX + Logical + + + Compute the bitwise AND of 128 bits (representing single-precision (32-bit) floating-point elements) in "a" and "b", producing an intermediate 128-bit value, and set "ZF" to 1 if the sign bit of each 32-bit element in the intermediate value is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", producing an intermediate value, and set "CF" to 1 if the sign bit of each 32-bit element in the intermediate value is zero, otherwise set "CF" to 0. Return 1 if both the "ZF" and "CF" values are zero, otherwise return 0. + +tmp[127:0] := a[127:0] AND b[127:0] +IF (tmp[31] == tmp[63] == tmp[95] == tmp[127] == 0) + ZF := 1 +ELSE + ZF := 0 +FI +tmp[127:0] := (NOT a[127:0]) AND b[127:0] +IF (tmp[31] == tmp[63] == tmp[95] == tmp[127] == 0) + CF := 1 +ELSE + CF := 0 +FI +IF (ZF == 0 && CF == 0) + RETURN 1 +ELSE + RETURN 0 +FI + + +
immintrin.h
+
+ + Floating Point + AVX + Miscellaneous + + Set each bit of mask "dst" based on the most significant bit of the corresponding packed double-precision (64-bit) floating-point element in "a". + +FOR j := 0 to 3 + i := j*64 + IF a[i+63] + dst[j] := 1 + ELSE + dst[j] := 0 + FI +ENDFOR +dst[MAX:4] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Miscellaneous + + Set each bit of mask "dst" based on the most significant bit of the corresponding packed single-precision (32-bit) floating-point element in "a". + +FOR j := 0 to 7 + i := j*32 + IF a[i+31] + dst[j] := 1 + ELSE + dst[j] := 0 + FI +ENDFOR +dst[MAX:8] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Set + + Return vector of type __m256d with all elements set to zero. + +dst[MAX:0] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Set + + Return vector of type __m256 with all elements set to zero. + +dst[MAX:0] := 0 + + +
immintrin.h
+
+ + Integer + AVX + Set + + Return vector of type __m256i with all elements set to zero. + +dst[MAX:0] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Set + + + + + Set packed double-precision (64-bit) floating-point elements in "dst" with the supplied values. + +dst[63:0] := e0 +dst[127:64] := e1 +dst[191:128] := e2 +dst[255:192] := e3 +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Set + + + + + + + + + Set packed single-precision (32-bit) floating-point elements in "dst" with the supplied values. + +dst[31:0] := e0 +dst[63:32] := e1 +dst[95:64] := e2 +dst[127:96] := e3 +dst[159:128] := e4 +dst[191:160] := e5 +dst[223:192] := e6 +dst[255:224] := e7 +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Integer + AVX + Set + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Set packed 8-bit integers in "dst" with the supplied values in reverse order. + +dst[7:0] := e0 +dst[15:8] := e1 +dst[23:16] := e2 +dst[31:24] := e3 +dst[39:32] := e4 +dst[47:40] := e5 +dst[55:48] := e6 +dst[63:56] := e7 +dst[71:64] := e8 +dst[79:72] := e9 +dst[87:80] := e10 +dst[95:88] := e11 +dst[103:96] := e12 +dst[111:104] := e13 +dst[119:112] := e14 +dst[127:120] := e15 +dst[135:128] := e16 +dst[143:136] := e17 +dst[151:144] := e18 +dst[159:152] := e19 +dst[167:160] := e20 +dst[175:168] := e21 +dst[183:176] := e22 +dst[191:184] := e23 +dst[199:192] := e24 +dst[207:200] := e25 +dst[215:208] := e26 +dst[223:216] := e27 +dst[231:224] := e28 +dst[239:232] := e29 +dst[247:240] := e30 +dst[255:248] := e31 +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Integer + AVX + Set + + + + + + + + + + + + + + + + + Set packed 16-bit integers in "dst" with the supplied values. + +dst[15:0] := e0 +dst[31:16] := e1 +dst[47:32] := e2 +dst[63:48] := e3 +dst[79:64] := e4 +dst[95:80] := e5 +dst[111:96] := e6 +dst[127:112] := e7 +dst[145:128] := e8 +dst[159:144] := e9 +dst[175:160] := e10 +dst[191:176] := e11 +dst[207:192] := e12 +dst[223:208] := e13 +dst[239:224] := e14 +dst[255:240] := e15 +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Integer + AVX + Set + + + + + + + + + Set packed 32-bit integers in "dst" with the supplied values. + +dst[31:0] := e0 +dst[63:32] := e1 +dst[95:64] := e2 +dst[127:96] := e3 +dst[159:128] := e4 +dst[191:160] := e5 +dst[223:192] := e6 +dst[255:224] := e7 +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Integer + AVX + Set + + + + + Set packed 64-bit integers in "dst" with the supplied values. + +dst[63:0] := e0 +dst[127:64] := e1 +dst[191:128] := e2 +dst[255:192] := e3 +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Set + + + + + Set packed double-precision (64-bit) floating-point elements in "dst" with the supplied values in reverse order. + +dst[63:0] := e3 +dst[127:64] := e2 +dst[191:128] := e1 +dst[255:192] := e0 +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Set + + + + + + + + + Set packed single-precision (32-bit) floating-point elements in "dst" with the supplied values in reverse order. + +dst[31:0] := e7 +dst[63:32] := e6 +dst[95:64] := e5 +dst[127:96] := e4 +dst[159:128] := e3 +dst[191:160] := e2 +dst[223:192] := e1 +dst[255:224] := e0 +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Integer + AVX + Set + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Set packed 8-bit integers in "dst" with the supplied values in reverse order. + +dst[7:0] := e31 +dst[15:8] := e30 +dst[23:16] := e29 +dst[31:24] := e28 +dst[39:32] := e27 +dst[47:40] := e26 +dst[55:48] := e25 +dst[63:56] := e24 +dst[71:64] := e23 +dst[79:72] := e22 +dst[87:80] := e21 +dst[95:88] := e20 +dst[103:96] := e19 +dst[111:104] := e18 +dst[119:112] := e17 +dst[127:120] := e16 +dst[135:128] := e15 +dst[143:136] := e14 +dst[151:144] := e13 +dst[159:152] := e12 +dst[167:160] := e11 +dst[175:168] := e10 +dst[183:176] := e9 +dst[191:184] := e8 +dst[199:192] := e7 +dst[207:200] := e6 +dst[215:208] := e5 +dst[223:216] := e4 +dst[231:224] := e3 +dst[239:232] := e2 +dst[247:240] := e1 +dst[255:248] := e0 +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Integer + AVX + Set + + + + + + + + + + + + + + + + + Set packed 16-bit integers in "dst" with the supplied values in reverse order. + +dst[15:0] := e15 +dst[31:16] := e14 +dst[47:32] := e13 +dst[63:48] := e12 +dst[79:64] := e11 +dst[95:80] := e10 +dst[111:96] := e9 +dst[127:112] := e8 +dst[145:128] := e7 +dst[159:144] := e6 +dst[175:160] := e5 +dst[191:176] := e4 +dst[207:192] := e3 +dst[223:208] := e2 +dst[239:224] := e1 +dst[255:240] := e0 +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Integer + AVX + Set + + + + + + + + + Set packed 32-bit integers in "dst" with the supplied values in reverse order. + +dst[31:0] := e7 +dst[63:32] := e6 +dst[95:64] := e5 +dst[127:96] := e4 +dst[159:128] := e3 +dst[191:160] := e2 +dst[223:192] := e1 +dst[255:224] := e0 +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Integer + AVX + Set + + + + + Set packed 64-bit integers in "dst" with the supplied values in reverse order. + +dst[63:0] := e3 +dst[127:64] := e2 +dst[191:128] := e1 +dst[255:192] := e0 +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Set + + Broadcast double-precision (64-bit) floating-point value "a" to all elements of "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := a[63:0] +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Set + + Broadcast single-precision (32-bit) floating-point value "a" to all elements of "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := a[31:0] +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Integer + AVX + Set + + Broadcast 8-bit integer "a" to all elements of "dst". This intrinsic may generate the "vpbroadcastb". + +FOR j := 0 to 31 + i := j*8 + dst[i+7:i] := a[7:0] +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Integer + AVX + Set + + Broadcast 16-bit integer "a" to all all elements of "dst". This intrinsic may generate the "vpbroadcastw". + +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := a[15:0] +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Integer + AVX + Set + + Broadcast 32-bit integer "a" to all elements of "dst". This intrinsic may generate the "vpbroadcastd". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := a[31:0] +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Integer + AVX + Set + + Broadcast 64-bit integer "a" to all elements of "dst". This intrinsic may generate the "vpbroadcastq". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := a[63:0] +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Cast + + Cast vector of type __m256d to type __m256. + This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. +
immintrin.h
+
+ + Floating Point + AVX + Cast + + Cast vector of type __m256 to type __m256d. + This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. +
immintrin.h
+
+ + Floating Point + Integer + AVX + Cast + + Casts vector of type __m256 to type __m256i. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + +
immintrin.h
+
+ + Floating Point + Integer + AVX + Cast + + Casts vector of type __m256d to type __m256i. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + +
immintrin.h
+
+ + Floating Point + AVX + Cast + + Casts vector of type __m256i to type __m256. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + +
immintrin.h
+
+ + Floating Point + AVX + Cast + + Casts vector of type __m256i to type __m256d. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + +
immintrin.h
+
+ + Floating Point + AVX + Cast + + Casts vector of type __m256 to type __m128. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + +
immintrin.h
+
+ + Floating Point + AVX + Cast + + Casts vector of type __m256d to type __m128d. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + +
immintrin.h
+
+ + Integer + AVX + Cast + + Casts vector of type __m256i to type __m128i. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + +
immintrin.h
+
+ + Floating Point + AVX + Cast + + Casts vector of type __m128 to type __m256; the upper 128 bits of the result are undefined. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + +
immintrin.h
+
+ + Floating Point + AVX + Cast + + Casts vector of type __m128d to type __m256d; the upper 128 bits of the result are undefined. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + +
immintrin.h
+
+ + Integer + AVX + Cast + + Casts vector of type __m128i to type __m256i; the upper 128 bits of the result are undefined. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + +
immintrin.h
+
+ + Floating Point + AVX + Cast + + Casts vector of type __m128 to type __m256; the upper 128 bits of the result are zeroed. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + +
immintrin.h
+
+ + Floating Point + AVX + Cast + + Casts vector of type __m128d to type __m256d; the upper 128 bits of the result are zeroed. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + +
immintrin.h
+
+ + Integer + AVX + Cast + + Casts vector of type __m128i to type __m256i; the upper 128 bits of the result are zeroed. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + +
immintrin.h
+
+ + Floating Point + AVX + Special Math Functions + + Round the packed single-precision (32-bit) floating-point elements in "a" down to an integer value, and store the results as packed single-precision floating-point elements in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := FLOOR(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Special Math Functions + + Round the packed single-precision (32-bit) floating-point elements in "a" up to an integer value, and store the results as packed single-precision floating-point elements in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := CEIL(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Special Math Functions + + Round the packed double-precision (64-bit) floating-point elements in "a" down to an integer value, and store the results as packed double-precision floating-point elements in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := FLOOR(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Special Math Functions + + Round the packed double-precision (64-bit) floating-point elements in "a" up to an integer value, and store the results as packed double-precision floating-point elements in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := CEIL(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + SSE + General Support + + Return vector of type __m128 with undefined elements. +
immintrin.h
+
+ + Floating Point + SSE2 + General Support + + Return vector of type __m128d with undefined elements. +
immintrin.h
+
+ + Integer + SSE2 + General Support + + Return vector of type __m128i with undefined elements. +
immintrin.h
+
+ + Floating Point + AVX + General Support + + Return vector of type __m256 with undefined elements. +
immintrin.h
+
+ + Floating Point + AVX + General Support + + Return vector of type __m256d with undefined elements. +
immintrin.h
+
+ + Integer + AVX + General Support + + Return vector of type __m256i with undefined elements. +
immintrin.h
+
+ + CLFLUSHOPT + General Support + + Invalidate and flush the cache line that contains "p" from all levels of the cache hierarchy. + +
immintrin.h
+
+ + CLWB + General Support + + Write back to memory the cache line that contains "p" from any level of the cache hierarchy in the cache coherence domain. + +
immintrin.h
+
+ + Floating Point + AVX + Set + + + Set packed __m256 vector "dst" with the supplied values. + +dst[127:0] := lo[127:0] +dst[255:128] := hi[127:0] +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Set + + + Set packed __m256d vector "dst" with the supplied values. + +dst[127:0] := lo[127:0] +dst[255:128] := hi[127:0] +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX + Set + + + Set packed __m256i vector "dst" with the supplied values. + +dst[127:0] := lo[127:0] +dst[255:128] := hi[127:0] +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Set + + + Set packed __m256 vector "dst" with the supplied values. + +dst[127:0] := lo[127:0] +dst[255:128] := hi[127:0] +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Set + + + Set packed __m256d vector "dst" with the supplied values. + +dst[127:0] := lo[127:0] +dst[255:128] := hi[127:0] +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX + Set + + + Set packed __m256i vector "dst" with the supplied values. + +dst[127:0] := lo[127:0] +dst[255:128] := hi[127:0] +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX + Load + + + Load two 128-bit values (composed of 4 packed single-precision (32-bit) floating-point elements) from memory, and combine them into a 256-bit value in "dst". + "hiaddr" and "loaddr" do not need to be aligned on any particular boundary. + +dst[127:0] := MEM[loaddr+127:loaddr] +dst[255:128] := MEM[hiaddr+127:hiaddr] +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Load + + + Load two 128-bit values (composed of 2 packed double-precision (64-bit) floating-point elements) from memory, and combine them into a 256-bit value in "dst". + "hiaddr" and "loaddr" do not need to be aligned on any particular boundary. + +dst[127:0] := MEM[loaddr+127:loaddr] +dst[255:128] := MEM[hiaddr+127:hiaddr] +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Integer + AVX + Load + + + Load two 128-bit values (composed of integer data) from memory, and combine them into a 256-bit value in "dst". + "hiaddr" and "loaddr" do not need to be aligned on any particular boundary. + +dst[127:0] := MEM[loaddr+127:loaddr] +dst[255:128] := MEM[hiaddr+127:hiaddr] +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Store + + + + Store the high and low 128-bit halves (each composed of 4 packed single-precision (32-bit) floating-point elements) from "a" into memory two different 128-bit locations. + "hiaddr" and "loaddr" do not need to be aligned on any particular boundary. + +MEM[loaddr+127:loaddr] := a[127:0] +MEM[hiaddr+127:hiaddr] := a[255:128] + +
immintrin.h
+
+ + Floating Point + AVX + Store + + + + Store the high and low 128-bit halves (each composed of 2 packed double-precision (64-bit) floating-point elements) from "a" into memory two different 128-bit locations. + "hiaddr" and "loaddr" do not need to be aligned on any particular boundary. + +MEM[loaddr+127:loaddr] := a[127:0] +MEM[hiaddr+127:hiaddr] := a[255:128] + +
immintrin.h
+
+ + Integer + AVX + Store + + + + Store the high and low 128-bit halves (each composed of integer data) from "a" into memory two different 128-bit locations. + "hiaddr" and "loaddr" do not need to be aligned on any particular boundary. + +MEM[loaddr+127:loaddr] := a[127:0] +MEM[hiaddr+127:hiaddr] := a[255:128] + +
immintrin.h
+
+ + Integer + AVX2 + Special Math Functions + + Compute the absolute value of packed 8-bit integers in "a", and store the unsigned results in "dst". + +FOR j := 0 to 31 + i := j*8 + dst[i+7:i] := ABS(a[i+7:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Special Math Functions + + Compute the absolute value of packed 16-bit integers in "a", and store the unsigned results in "dst". + +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := ABS(a[i+15:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Special Math Functions + + Compute the absolute value of packed 32-bit integers in "a", and store the unsigned results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := ABS(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Arithmetic + + + Add packed 8-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 31 + i := j*8 + dst[i+7:i] := a[i+7:i] + b[i+7:i] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Arithmetic + + + Add packed 16-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := a[i+15:i] + b[i+15:i] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Arithmetic + + + Add packed 32-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := a[i+31:i] + b[i+31:i] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Arithmetic + + + Add packed 64-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := a[i+63:i] + b[i+63:i] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Arithmetic + + + Add packed 8-bit integers in "a" and "b" using saturation, and store the results in "dst". + +FOR j := 0 to 31 + i := j*8 + dst[i+7:i] := Saturate_To_Int8( a[i+7:i] + b[i+7:i] ) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Arithmetic + + + Add packed 16-bit integers in "a" and "b" using saturation, and store the results in "dst". + +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := Saturate_To_Int16( a[i+15:i] + b[i+15:i] ) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Arithmetic + + + Add packed unsigned 8-bit integers in "a" and "b" using saturation, and store the results in "dst". + +FOR j := 0 to 31 + i := j*8 + dst[i+7:i] := Saturate_To_UnsignedInt8( a[i+7:i] + b[i+7:i] ) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Arithmetic + + + Add packed unsigned 16-bit integers in "a" and "b" using saturation, and store the results in "dst". + +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := Saturate_To_UnsignedInt16( a[i+15:i] + b[i+15:i] ) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Miscellaneous + + + + Concatenate pairs of 16-byte blocks in "a" and "b" into a 32-byte temporary result, shift the result right by "count" bytes, and store the low 16 bytes in "dst". + +FOR j := 0 to 1 + i := j*128 + tmp[255:0] := ((a[i+127:i] << 128) OR b[i+127:i]) >> (count[7:0]*8) + dst[i+127:i] := tmp[127:0] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Logical + + + Compute the bitwise AND of 256 bits (representing integer data) in "a" and "b", and store the result in "dst". + +dst[255:0] := (a[255:0] AND b[255:0]) +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Logical + + + Compute the bitwise NOT of 256 bits (representing integer data) in "a" and then AND with "b", and store the result in "dst". + +dst[255:0] := ((NOT a[255:0]) AND b[255:0]) +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Probability/Statistics + + + Average packed unsigned 8-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 31 + i := j*8 + dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) >> 1 +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Probability/Statistics + + + Average packed unsigned 16-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) >> 1 +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Swizzle + + + + Blend packed 16-bit integers from "a" and "b" using control mask "imm8", and store the results in "dst". + +FOR j := 0 to 15 + i := j*16 + IF imm8[j%8] + dst[i+15:i] := b[i+15:i] + ELSE + dst[i+15:i] := a[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Swizzle + + + + Blend packed 32-bit integers from "a" and "b" using control mask "imm8", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + IF imm8[j%8] + dst[i+31:i] := b[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Swizzle + + + + Blend packed 32-bit integers from "a" and "b" using control mask "imm8", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + IF imm8[j%8] + dst[i+31:i] := b[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Swizzle + + + + Blend packed 8-bit integers from "a" and "b" using "mask", and store the results in "dst". + +FOR j := 0 to 31 + i := j*8 + IF mask[i+7] + dst[i+7:i] := b[i+7:i] + ELSE + dst[i+7:i] := a[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Swizzle + + Broadcast the low packed 8-bit integer from "a" to all elements of "dst". + +FOR j := 0 to 15 + i := j*8 + dst[i+7:i] := a[7:0] +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Swizzle + + Broadcast the low packed 8-bit integer from "a" to all elements of "dst". + +FOR j := 0 to 31 + i := j*8 + dst[i+7:i] := a[7:0] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Swizzle + + Broadcast the low packed 32-bit integer from "a" to all elements of "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := a[31:0] +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Swizzle + + Broadcast the low packed 32-bit integer from "a" to all elements of "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := a[31:0] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Swizzle + + Broadcast the low packed 64-bit integer from "a" to all elements of "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := a[63:0] +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Swizzle + + Broadcast the low packed 64-bit integer from "a" to all elements of "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := a[63:0] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX2 + Swizzle + + Broadcast the low double-precision (64-bit) floating-point element from "a" to all elements of "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := a[63:0] +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX2 + Swizzle + + Broadcast the low double-precision (64-bit) floating-point element from "a" to all elements of "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := a[63:0] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Swizzle + + Broadcast 128 bits of integer data from "a" to all 128-bit lanes in "dst". + + +dst[127:0] := a[127:0] +dst[255:128] := a[127:0] +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Swizzle + + Broadcast 128 bits of integer data from "a" to all 128-bit lanes in "dst". + + +dst[127:0] := a[127:0] +dst[255:128] := a[127:0] +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX2 + Swizzle + + Broadcast the low single-precision (32-bit) floating-point element from "a" to all elements of "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := a[31:0] +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX2 + Swizzle + + Broadcast the low single-precision (32-bit) floating-point element from "a" to all elements of "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := a[31:0] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Swizzle + + Broadcast the low packed 16-bit integer from "a" to all elements of "dst". + +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := a[15:0] +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Swizzle + + Broadcast the low packed 16-bit integer from "a" to all elements of "dst". + +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := a[15:0] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Compare + + + Compare packed 8-bit integers in "a" and "b" for equality, and store the results in "dst". + +FOR j := 0 to 31 + i := j*8 + dst[i+7:i] := ( a[i+7:i] == b[i+7:i] ) ? 0xFF : 0 +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Compare + + + Compare packed 16-bit integers in "a" and "b" for equality, and store the results in "dst". + +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := ( a[i+15:i] == b[i+15:i] ) ? 0xFFFF : 0 +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Compare + + + Compare packed 32-bit integers in "a" and "b" for equality, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := ( a[i+31:i] == b[i+31:i] ) ? 0xFFFFFFFF : 0 +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Compare + + + Compare packed 64-bit integers in "a" and "b" for equality, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := ( a[i+63:i] == b[i+63:i] ) ? 0xFFFFFFFFFFFFFFFF : 0 +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Compare + + + Compare packed 8-bit integers in "a" and "b" for greater-than, and store the results in "dst". + +FOR j := 0 to 31 + i := j*8 + dst[i+7:i] := ( a[i+7:i] > b[i+7:i] ) ? 0xFF : 0 +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Compare + + + Compare packed 16-bit integers in "a" and "b" for greater-than, and store the results in "dst". + +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := ( a[i+15:i] > b[i+15:i] ) ? 0xFFFF : 0 +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Compare + + + Compare packed 32-bit integers in "a" and "b" for greater-than, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := ( a[i+31:i] > b[i+31:i] ) ? 0xFFFFFFFF : 0 +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Compare + + + Compare packed 64-bit integers in "a" and "b" for greater-than, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := ( a[i+63:i] > b[i+63:i] ) ? 0xFFFFFFFFFFFFFFFF : 0 +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Convert + + Sign extend packed 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst". + +FOR j:= 0 to 7 + i := 32*j + k := 16*j + dst[i+31:i] := SignExtend(a[k+15:k]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Convert + + Sign extend packed 16-bit integers in "a" to packed 64-bit integers, and store the results in "dst". + +FOR j:= 0 to 3 + i := 64*j + k := 16*j + dst[i+63:i] := SignExtend(a[k+15:k]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Convert + + Sign extend packed 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst". + +FOR j:= 0 to 3 + i := 64*j + k := 32*j + dst[i+63:i] := SignExtend(a[k+31:k]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Convert + + Sign extend packed 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst". + +FOR j := 0 to 15 + i := j*8 + l := j*16 + dst[l+15:l] := SignExtend(a[i+7:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Convert + + Sign extend packed 8-bit integers in "a" to packed 32-bit integers, and store the results in "dst". + +FOR j := 0 to 7 + i := 32*j + k := 8*j + dst[i+31:i] := SignExtend(a[k+7:k]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Convert + + Sign extend packed 8-bit integers in the low 8 bytes of "a" to packed 64-bit integers, and store the results in "dst". + +FOR j := 0 to 3 + i := 64*j + k := 8*j + dst[i+63:i] := SignExtend(a[k+7:k]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Convert + + Zero extend packed unsigned 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst". + +FOR j := 0 to 7 + i := 32*j + k := 16*j + dst[i+31:i] := ZeroExtend(a[k+15:k]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Convert + + Zero extend packed unsigned 16-bit integers in "a" to packed 64-bit integers, and store the results in "dst". + +FOR j:= 0 to 3 + i := 64*j + k := 16*j + dst[i+63:i] := ZeroExtend(a[k+15:k]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Convert + + Zero extend packed unsigned 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst". + +FOR j:= 0 to 3 + i := 64*j + k := 32*j + dst[i+63:i] := ZeroExtend(a[k+31:k]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Convert + + Zero extend packed unsigned 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst". + +FOR j := 0 to 15 + i := j*8 + l := j*16 + dst[l+15:l] := ZeroExtend(a[i+7:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Convert + + Zero extend packed unsigned 8-bit integers in "a" to packed 32-bit integers, and store the results in "dst". + +FOR j := 0 to 7 + i := 32*j + k := 8*j + dst[i+31:i] := ZeroExtend(a[k+7:k]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Convert + + Zero extend packed unsigned 8-bit integers in the low 8 byte sof "a" to packed 64-bit integers, and store the results in "dst". + +FOR j := 0 to 3 + i := 64*j + k := 8*j + dst[i+63:i] := ZeroExtend(a[k+7:k]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Swizzle + + + Extract 128 bits (composed of integer data) from "a", selected with "imm8", and store the result in "dst". + +CASE imm8[7:0] of +0: dst[127:0] := a[127:0] +1: dst[127:0] := a[255:128] +ESAC +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Arithmetic + + + Horizontally add adjacent pairs of 16-bit integers in "a" and "b", and pack the signed 16-bit results in "dst". + +dst[15:0] := a[31:16] + a[15:0] +dst[31:16] := a[63:48] + a[47:32] +dst[47:32] := a[95:80] + a[79:64] +dst[63:48] := a[127:112] + a[111:96] +dst[79:64] := b[31:16] + b[15:0] +dst[95:80] := b[63:48] + b[47:32] +dst[111:96] := b[95:80] + b[79:64] +dst[127:112] := b[127:112] + b[111:96] +dst[143:128] := a[159:144] + a[143:128] +dst[159:144] := a[191:176] + a[175:160] +dst[175:160] := a[223:208] + a[207:192] +dst[191:176] := a[255:240] + a[239:224] +dst[207:192] := b[127:112] + b[143:128] +dst[223:208] := b[159:144] + b[175:160] +dst[239:224] := b[191:176] + b[207:192] +dst[255:240] := b[223:208] + b[239:224] +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Arithmetic + + + Horizontally add adjacent pairs of 32-bit integers in "a" and "b", and pack the signed 32-bit results in "dst". + +dst[31:0] := a[63:32] + a[31:0] +dst[63:32] := a[127:96] + a[95:64] +dst[95:64] := b[63:32] + b[31:0] +dst[127:96] := b[127:96] + b[95:64] +dst[159:128] := a[191:160] + a[159:128] +dst[191:160] := a[255:224] + a[223:192] +dst[223:192] := b[191:160] + b[159:128] +dst[255:224] := b[255:224] + b[223:192] +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Arithmetic + + + Horizontally add adjacent pairs of 16-bit integers in "a" and "b" using saturation, and pack the signed 16-bit results in "dst". + +dst[15:0]= Saturate_To_Int16(a[31:16] + a[15:0]) +dst[31:16] = Saturate_To_Int16(a[63:48] + a[47:32]) +dst[47:32] = Saturate_To_Int16(a[95:80] + a[79:64]) +dst[63:48] = Saturate_To_Int16(a[127:112] + a[111:96]) +dst[79:64] = Saturate_To_Int16(b[31:16] + b[15:0]) +dst[95:80] = Saturate_To_Int16(b[63:48] + b[47:32]) +dst[111:96] = Saturate_To_Int16(b[95:80] + b[79:64]) +dst[127:112] = Saturate_To_Int16(b[127:112] + b[111:96]) +dst[143:128] = Saturate_To_Int16(a[159:144] + a[143:128]) +dst[159:144] = Saturate_To_Int16(a[191:176] + a[175:160]) +dst[175:160] = Saturate_To_Int16( a[223:208] + a[207:192]) +dst[191:176] = Saturate_To_Int16(a[255:240] + a[239:224]) +dst[207:192] = Saturate_To_Int16(b[127:112] + b[143:128]) +dst[223:208] = Saturate_To_Int16(b[159:144] + b[175:160]) +dst[239:224] = Saturate_To_Int16(b[191-160] + b[159-128]) +dst[255:240] = Saturate_To_Int16(b[255:240] + b[239:224]) +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Arithmetic + + + Horizontally subtract adjacent pairs of 16-bit integers in "a" and "b", and pack the signed 16-bit results in "dst". + +dst[15:0] := a[15:0] - a[31:16] +dst[31:16] := a[47:32] - a[63:48] +dst[47:32] := a[79:64] - a[95:80] +dst[63:48] := a[111:96] - a[127:112] +dst[79:64] := b[15:0] - b[31:16] +dst[95:80] := b[47:32] - b[63:48] +dst[111:96] := b[79:64] - b[95:80] +dst[127:112] := b[111:96] - b[127:112] +dst[143:128] := a[143:128] - a[159:144] +dst[159:144] := a[175:160] - a[191:176] +dst[175:160] := a[207:192] - a[223:208] +dst[191:176] := a[239:224] - a[255:240] +dst[207:192] := b[143:128] - b[159:144] +dst[223:208] := b[175:160] - b[191:176] +dst[239:224] := b[207:192] - b[223:208] +dst[255:240] := b[239:224] - b[255:240] +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Arithmetic + + + Horizontally subtract adjacent pairs of 32-bit integers in "a" and "b", and pack the signed 32-bit results in "dst". + +dst[31:0] := a[31:0] - a[63:32] +dst[63:32] := a[95:64] - a[127:96] +dst[95:64] := b[31:0] - b[63:32] +dst[127:96] := b[95:64] - b[127:96] +dst[159:128] := a[159:128] - a[191:160] +dst[191:160] := a[223:192] - a[255:224] +dst[223:192] := b[159:128] - b[191:160] +dst[255:224] := b[223:192] - b[255:224] +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Arithmetic + + + Horizontally subtract adjacent pairs of 16-bit integers in "a" and "b" using saturation, and pack the signed 16-bit results in "dst". + +dst[15:0]= Saturate_To_Int16(a[15:0] - a[31:16]) +dst[31:16] = Saturate_To_Int16(a[47:32] - a[63:48]) +dst[47:32] = Saturate_To_Int16(a[79:64] - a[95:80]) +dst[63:48] = Saturate_To_Int16(a[111:96] - a[127:112]) +dst[79:64] = Saturate_To_Int16(b[15:0] - b[31:16]) +dst[95:80] = Saturate_To_Int16(b[47:32] - b[63:48]) +dst[111:96] = Saturate_To_Int16(b[79:64] - b[95:80]) +dst[127:112] = Saturate_To_Int16(b[111:96] - b[127:112]) +dst[143:128]= Saturate_To_Int16(a[143:128] - a[159:144]) +dst[159:144] = Saturate_To_Int16(a[175:160] - a[191:176]) +dst[175:160] = Saturate_To_Int16(a[207:192] - a[223:208]) +dst[191:176] = Saturate_To_Int16(a[239:224] - a[255:240]) +dst[207:192] = Saturate_To_Int16(b[143:128] - b[159:144]) +dst[223:208] = Saturate_To_Int16(b[175:160] - b[191:176]) +dst[239:224] = Saturate_To_Int16(b[207:192] - b[223:208]) +dst[255:240] = Saturate_To_Int16(b[239:224] - b[255:240]) +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX2 + Load + + + + + Gather double-precision (64-bit) floating-point elements from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 1 + i := j*64 + m := j*32 + dst[i+63:i] := MEM[base_addr + SignExtend(vindex[m+31:m])*scale] +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX2 + Load + + + + + Gather double-precision (64-bit) floating-point elements from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 3 + i := j*64 + m := j*32 + dst[i+63:i] := MEM[base_addr + SignExtend(vindex[m+31:m])*scale] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX2 + Load + + + + Gather single-precision (32-bit) floating-point elements from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := MEM[base_addr + SignExtend(vindex[i+31:i])*scale] +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX2 + Load + + + + Gather single-precision (32-bit) floating-point elements from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := MEM[base_addr + SignExtend(vindex[i+31:i])*scale] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Load + + + + + Gather 32-bit integers from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := MEM[base_addr + SignExtend(vindex[i+31:i])*scale] +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Load + + + + + Gather 32-bit integers from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := MEM[base_addr + SignExtend(vindex[i+31:i])*scale] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Load + + + + Gather 64-bit integers from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 1 + i := j*64 + m := j*32 + dst[i+63:i] := MEM[base_addr + SignExtend(vindex[m+31:m])*scale] +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Load + + + + Gather 64-bit integers from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 3 + i := j*64 + m := j*32 + dst[i+63:i] := MEM[base_addr + SignExtend(vindex[m+31:m])*scale] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX2 + Load + + + + + Gather double-precision (64-bit) floating-point elements from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := MEM[base_addr + SignExtend(vindex[m+63:m])*scale] +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX2 + Load + + + + + Gather double-precision (64-bit) floating-point elements from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := MEM[base_addr + SignExtend(vindex[m+63:m])*scale] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX2 + Load + + + + + Gather single-precision (32-bit) floating-point elements from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 1 + i := j*32 + m := j*64 + dst[i+31:i] := MEM[base_addr + SignExtend(vindex[m+63:m])*scale] +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX2 + Load + + + + + Gather single-precision (32-bit) floating-point elements from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 3 + i := j*32 + m := j*64 + dst[i+31:i] := MEM[base_addr + SignExtend(vindex[m+63:m])*scale] +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Load + + + + Gather 32-bit integers from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 1 + i := j*32 + m := j*64 + dst[i+31:i] := MEM[base_addr + SignExtend(vindex[m+63:m])*scale] +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Load + + + + Gather 32-bit integers from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 3 + i := j*32 + m := j*64 + dst[i+31:i] := MEM[base_addr + SignExtend(vindex[m+63:m])*scale] +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Load + + + + Gather 64-bit integers from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := MEM[base_addr + SignExtend(vindex[m+63:m])*scale] +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Load + + + + Gather 64-bit integers from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := MEM[base_addr + SignExtend(vindex[m+63:m])*scale] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Swizzle + + + + Copy "a" to "dst", then insert 128 bits (composed of integer data) from "b" into "dst" at the location specified by "imm8". + +dst[255:0] := a[255:0] +CASE (imm8[1:0]) of +0: dst[127:0] := b[127:0] +1: dst[255:128] := b[127:0] +ESAC +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Arithmetic + + + Multiply packed signed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, and pack the results in "dst". + + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := a[i+31:i+16]*b[i+31:i+16] + a[i+15:i]*b[i+15:i] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Arithmetic + + + Vertically multiply each unsigned 8-bit integer from "a" with the corresponding signed 8-bit integer from "b", producing intermediate signed 16-bit integers. Horizontally add adjacent pairs of intermediate signed 16-bit integers, and pack the saturated results in "dst". + + +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := Saturate_To_Int16( a[i+15:i+8]*b[i+15:i+8] + a[i+7:i]*b[i+7:i] ) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX2 + Load + + + + + + + Gather double-precision (64-bit) floating-point elements from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from "src" when the highest bit is not set in the corresponding element). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 1 + i := j*64 + m := j*32 + IF mask[i+63] + dst[i+63:i] := MEM[base_addr + SignExtend(vindex[m+31:m])*scale] + mask[i+63] := 0 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +mask[MAX:128] := 0 +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX2 + Load + + + + + + + Gather double-precision (64-bit) floating-point elements from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from "src" when the highest bit is not set in the corresponding element). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 3 + i := j*64 + m := j*32 + IF mask[i+63] + dst[i+63:i] := MEM[base_addr + SignExtend(vindex[m+31:m])*scale] + mask[i+63] := 0 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +mask[MAX:256] := 0 +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX2 + Load + + + + + + + Gather single-precision (32-bit) floating-point elements from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from "src" when the highest bit is not set in the corresponding element). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 3 + i := j*32 + IF mask[i+31] + dst[i+31:i] := MEM[base_addr + SignExtend(vindex[i+31:i])*scale] + mask[i+31] := 0 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +mask[MAX:128] := 0 +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX2 + Load + + + + + + + Gather single-precision (32-bit) floating-point elements from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from "src" when the highest bit is not set in the corresponding element). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 7 + i := j*32 + IF mask[i+31] + dst[i+31:i] := MEM[base_addr + SignExtend(vindex[i+31:i])*scale] + mask[i+31] := 0 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +mask[MAX:256] := 0 +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Load + + + + + + + Gather 32-bit integers from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from "src" when the highest bit is not set in the corresponding element). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 3 + i := j*32 + IF mask[i+31] + dst[i+31:i] := MEM[base_addr + SignExtend(vindex[i+31:i])*scale] + mask[i+31] := 0 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +mask[MAX:128] := 0 +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Load + + + + + + + Gather 32-bit integers from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from "src" when the highest bit is not set in the corresponding element). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 7 + i := j*32 + IF mask[i+31] + dst[i+31:i] := MEM[base_addr + SignExtend(vindex[i+31:i])*scale] + mask[i+31] := 0 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +mask[MAX:256] := 0 +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Load + + + + + + + Gather 64-bit integers from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from "src" when the highest bit is not set in the corresponding element). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 1 + i := j*64 + m := j*32 + IF mask[i+63] + dst[i+63:i] := MEM[base_addr + SignExtend(vindex[m+31:m])*scale] + mask[i+63] := 0 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +mask[MAX:128] := 0 +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Load + + + + + + + Gather 64-bit integers from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from "src" when the highest bit is not set in the corresponding element). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 3 + i := j*64 + m := j*32 + IF mask[i+63] + dst[i+63:i] := MEM[base_addr + SignExtend(vindex[m+31:m])*scale] + mask[i+63] := 0 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +mask[MAX:256] := 0 +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX2 + Load + + + + + + + Gather double-precision (64-bit) floating-point elements from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from "src" when the highest bit is not set in the corresponding element). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 1 + i := j*64 + IF mask[i+63] + dst[i+63:i] := MEM[base_addr + SignExtend(vindex[m+63:m])*scale] + mask[i+63] := 0 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +mask[MAX:128] := 0 +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX2 + Load + + + + + + + Gather double-precision (64-bit) floating-point elements from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from "src" when the highest bit is not set in the corresponding element). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 3 + i := j*64 + IF mask[i+63] + dst[i+63:i] := MEM[base_addr + SignExtend(vindex[m+63:m])*scale] + mask[i+63] := 0 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +mask[MAX:256] := 0 +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX2 + Load + + + + + + + Gather single-precision (32-bit) floating-point elements from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from "src" when the highest bit is not set in the corresponding element). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 1 + i := j*32 + m := j*64 + IF mask[i+31] + dst[i+31:i] := MEM[base_addr + SignExtend(vindex[m+63:m])*scale] + mask[i+31] := 0 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +mask[MAX:64] := 0 +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX2 + Load + + + + + + + Gather single-precision (32-bit) floating-point elements from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from "src" when the highest bit is not set in the corresponding element). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 3 + i := j*32 + m := j*64 + IF mask[i+31] + dst[i+31:i] := MEM[base_addr + SignExtend(vindex[m+63:m])*scale] + mask[i+31] := 0 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +mask[MAX:128] := 0 +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Load + + + + + + + Gather 32-bit integers from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from "src" when the highest bit is not set in the corresponding element). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 1 + i := j*32 + m := j*64 + IF mask[i+31] + dst[i+31:i] := MEM[base_addr + SignExtend(vindex[m+63:m])*scale] + mask[i+31] := 0 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +mask[MAX:64] := 0 +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Load + + + + + + + Gather 32-bit integers from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from "src" when the highest bit is not set in the corresponding element). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 3 + i := j*32 + m := j*64 + IF mask[i+31] + dst[i+31:i] := MEM[base_addr + SignExtend(vindex[m+63:m])*scale] + mask[i+31] := 0 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +mask[MAX:128] := 0 +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Load + + + + + + + Gather 64-bit integers from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from "src" when the highest bit is not set in the corresponding element). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 1 + i := j*64 + IF mask[i+63] + dst[i+63:i] := MEM[base_addr + SignExtend(vindex[m+63:m])*scale] + mask[i+63] := 0 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +mask[MAX:128] := 0 +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Load + + + + + + + Gather 64-bit integers from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from "src" when the highest bit is not set in the corresponding element). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 3 + i := j*64 + IF mask[i+63] + dst[i+63:i] := MEM[base_addr + SignExtend(vindex[m+63:m])*scale] + mask[i+63] := 0 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +mask[MAX:256] := 0 +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Load + + + Load packed 32-bit integers from memory into "dst" using "mask" (elements are zeroed out when the highest bit is not set in the corresponding element). + +FOR j := 0 to 3 + i := j*32 + IF mask[i+31] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Load + + + Load packed 32-bit integers from memory into "dst" using "mask" (elements are zeroed out when the highest bit is not set in the corresponding element). + +FOR j := 0 to 7 + i := j*32 + IF mask[i+31] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Load + + + Load packed 64-bit integers from memory into "dst" using "mask" (elements are zeroed out when the highest bit is not set in the corresponding element). + +FOR j := 0 to 1 + i := j*64 + IF mask[i+63] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Load + + + Load packed 64-bit integers from memory into "dst" using "mask" (elements are zeroed out when the highest bit is not set in the corresponding element). + +FOR j := 0 to 3 + i := j*64 + IF mask[i+63] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Store + + + + Store packed 32-bit integers from "a" into memory using "mask" (elements are not stored when the highest bit is not set in the corresponding element). + + +FOR j := 0 to 3 + i := j*32 + IF mask[i+31] + MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i] + FI +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX2 + Store + + + + Store packed 32-bit integers from "a" into memory using "mask" (elements are not stored when the highest bit is not set in the corresponding element). + + +FOR j := 0 to 7 + i := j*32 + IF mask[i+31] + MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i] + FI +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX2 + Store + + + + Store packed 64-bit integers from "a" into memory using "mask" (elements are not stored when the highest bit is not set in the corresponding element). + + +FOR j := 0 to 1 + i := j*64 + IF mask[i+63] + MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i] + FI +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX2 + Store + + + + Store packed 64-bit integers from "a" into memory using "mask" (elements are not stored when the highest bit is not set in the corresponding element). + + +FOR j := 0 to 3 + i := j*64 + IF mask[i+63] + MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i] + FI +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX2 + Special Math Functions + + + Compare packed 8-bit integers in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 31 + i := j*8 + IF a[i+7:i] > b[i+7:i] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := b[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Special Math Functions + + + Compare packed 16-bit integers in "a" and "b", and store packed maximum values in "dst". + + +FOR j := 0 to 15 + i := j*16 + IF a[i+15:i] > b[i+15:i] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := b[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Special Math Functions + + + Compare packed 32-bit integers in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 7 + i := j*32 + IF a[i+31:i] > b[i+31:i] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := b[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Special Math Functions + + + Compare packed unsigned 8-bit integers in "a" and "b", and store packed maximum values in "dst". + + +FOR j := 0 to 31 + i := j*8 + IF a[i+7:i] > b[i+7:i] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := b[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Special Math Functions + + + Compare packed unsigned 16-bit integers in "a" and "b", and store packed maximum values in "dst". + + +FOR j := 0 to 15 + i := j*16 + IF a[i+15:i] > b[i+15:i] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := b[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Special Math Functions + + + Compare packed unsigned 32-bit integers in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 7 + i := j*32 + IF a[i+31:i] > b[i+31:i] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := b[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Special Math Functions + + + Compare packed 8-bit integers in "a" and "b", and store packed minimum values in "dst". + + +FOR j := 0 to 31 + i := j*8 + IF a[i+7:i] < b[i+7:i] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := b[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Special Math Functions + + + Compare packed 16-bit integers in "a" and "b", and store packed minimum values in "dst". + + +FOR j := 0 to 15 + i := j*16 + IF a[i+15:i] < b[i+15:i] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := b[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Special Math Functions + + + Compare packed 32-bit integers in "a" and "b", and store packed minimum values in "dst". + + +FOR j := 0 to 7 + i := j*32 + IF a[i+31:i] < b[i+31:i] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := b[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Special Math Functions + + + Compare packed unsigned 8-bit integers in "a" and "b", and store packed minimum values in "dst". + + +FOR j := 0 to 31 + i := j*8 + IF a[i+7:i] < b[i+7:i] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := b[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Special Math Functions + + + Compare packed unsigned 16-bit integers in "a" and "b", and store packed minimum values in "dst". + + +FOR j := 0 to 15 + i := j*16 + IF a[i+15:i] < b[i+15:i] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := b[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Special Math Functions + + + Compare packed unsigned 32-bit integers in "a" and "b", and store packed minimum values in "dst". + + +FOR j := 0 to 7 + i := j*32 + IF a[i+31:i] < b[i+31:i] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := b[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Miscellaneous + + +Create mask from the most significant bit of each 8-bit element in "a", and store the result in "dst". + + +FOR j := 0 to 31 + i := j*8 + dst[j] := a[i+7] +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX2 + Miscellaneous + + + + Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit integers in "a" compared to those in "b", and store the 16-bit results in "dst". + Eight SADs are performed for each 128-bit lane using one quadruplet from "b" and eight quadruplets from "a". One quadruplet is selected from "b" starting at on the offset specified in "imm8". Eight quadruplets are formed from sequential 8-bit integers selected from "a" starting at the offset specified in "imm8". + +MPSADBW(a[127:0], b[127:0], imm8[2:0]) { + a_offset := imm8[2]*32 + b_offset := imm8[1:0]*32 + FOR j := 0 to 7 + i := j*8 + k := a_offset+i + l := b_offset + tmp[i+15:i] := ABS(a[k+7:k] - b[l+7:l]) + ABS(a[k+15:k+8] - b[l+15:l+8]) + ABS(a[k+23:k+16] - b[l+23:l+16]) + ABS(a[k+31:k+24] - b[l+31:l+24]) + ENDFOR + RETURN tmp[127:0] +} + +dst[127:0] := MPSADBW(a[127:0], b[127:0], imm8[2:0]) +dst[255:128] := MPSADBW(a[255:128], b[255:128], imm8[5:3]) +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Arithmetic + + + Multiply the low 32-bit integers from each packed 64-bit element in "a" and "b", and store the signed 64-bit results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := a[i+31:i] * b[i+31:i] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Arithmetic + + + Multiply the low unsigned 32-bit integers from each packed 64-bit element in "a" and "b", and store the unsigned 64-bit results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := a[i+31:i] * b[i+31:i] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Arithmetic + + + Multiply the packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst". + +FOR j := 0 to 15 + i := j*16 + tmp[31:0] := a[i+15:i] * b[i+15:i] + dst[i+15:i] := tmp[31:16] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Arithmetic + + + Multiply the packed unsigned 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst". + +FOR j := 0 to 15 + i := j*16 + tmp[31:0] := a[i+15:i] * b[i+15:i] + dst[i+15:i] := tmp[31:16] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Arithmetic + + + Multiply packed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Truncate each intermediate integer to the 18 most significant bits, round by adding 1, and store bits [16:1] to "dst". + +FOR j := 0 to 15 + i := j*16 + tmp[31:0] := ((a[i+15:i] * b[i+15:i]) >> 14) + 1 + dst[i+15:i] := tmp[16:1] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Arithmetic + + + Multiply the packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the low 16 bits of the intermediate integers in "dst". + +FOR j := 0 to 15 + i := j*16 + tmp[31:0] := a[i+15:i] * b[i+15:i] + dst[i+15:i] := tmp[15:0] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Arithmetic + + + Multiply the packed 32-bit integers in "a" and "b", producing intermediate 64-bit integers, and store the low 32 bits of the intermediate integers in "dst". + +FOR j := 0 to 7 + i := j*32 + tmp[63:0] := a[i+31:i] * b[i+31:i] + dst[i+31:i] := tmp[31:0] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Logical + + + Compute the bitwise OR of 256 bits (representing integer data) in "a" and "b", and store the result in "dst". + +dst[255:0] := (a[255:0] OR b[255:0]) +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Miscellaneous + + + Convert packed 16-bit integers from "a" and "b" to packed 8-bit integers using signed saturation, and store the results in "dst". + + +dst[7:0] := Saturate_Int16_To_Int8 (a[15:0]) +dst[15:8] := Saturate_Int16_To_Int8 (a[31:16]) +dst[23:16] := Saturate_Int16_To_Int8 (a[47:32]) +dst[31:24] := Saturate_Int16_To_Int8 (a[63:48]) +dst[39:32] := Saturate_Int16_To_Int8 (a[79:64]) +dst[47:40] := Saturate_Int16_To_Int8 (a[95:80]) +dst[55:48] := Saturate_Int16_To_Int8 (a[111:96]) +dst[63:56] := Saturate_Int16_To_Int8 (a[127:112]) +dst[71:64] := Saturate_Int16_To_Int8 (b[15:0]) +dst[79:72] := Saturate_Int16_To_Int8 (b[31:16]) +dst[87:80] := Saturate_Int16_To_Int8 (b[47:32]) +dst[95:88] := Saturate_Int16_To_Int8 (b[63:48]) +dst[103:96] := Saturate_Int16_To_Int8 (b[79:64]) +dst[111:104] := Saturate_Int16_To_Int8 (b[95:80]) +dst[119:112] := Saturate_Int16_To_Int8 (b[111:96]) +dst[127:120] := Saturate_Int16_To_Int8 (b[127:112]) +dst[135:128] := Saturate_Int16_To_Int8 (a[143:128]) +dst[143:136] := Saturate_Int16_To_Int8 (a[159:144]) +dst[151:144] := Saturate_Int16_To_Int8 (a[175:160]) +dst[159:152] := Saturate_Int16_To_Int8 (a[191:176]) +dst[167:160] := Saturate_Int16_To_Int8 (a[207:192]) +dst[175:168] := Saturate_Int16_To_Int8 (a[223:208]) +dst[183:176] := Saturate_Int16_To_Int8 (a[239:224]) +dst[191:184] := Saturate_Int16_To_Int8 (a[255:240]) +dst[199:192] := Saturate_Int16_To_Int8 (b[143:128]) +dst[207:200] := Saturate_Int16_To_Int8 (b[159:144]) +dst[215:208] := Saturate_Int16_To_Int8 (b[175:160]) +dst[223:216] := Saturate_Int16_To_Int8 (b[191:176]) +dst[231:224] := Saturate_Int16_To_Int8 (b[207:192]) +dst[239:232] := Saturate_Int16_To_Int8 (b[223:208]) +dst[247:240] := Saturate_Int16_To_Int8 (b[239:224]) +dst[255:248] := Saturate_Int16_To_Int8 (b[255:240]) +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Miscellaneous + + + Convert packed 32-bit integers from "a" and "b" to packed 16-bit integers using signed saturation, and store the results in "dst". + +dst[15:0] := Saturate_Int32_To_Int16 (a[31:0]) +dst[31:16] := Saturate_Int32_To_Int16 (a[63:32]) +dst[47:32] := Saturate_Int32_To_Int16 (a[95:64]) +dst[63:48] := Saturate_Int32_To_Int16 (a[127:96]) +dst[79:64] := Saturate_Int32_To_Int16 (b[31:0]) +dst[95:80] := Saturate_Int32_To_Int16 (b[63:32]) +dst[111:96] := Saturate_Int32_To_Int16 (b[95:64]) +dst[127:112] := Saturate_Int32_To_Int16 (b[127:96]) +dst[143:128] := Saturate_Int32_To_Int16 (a[159:128]) +dst[159:144] := Saturate_Int32_To_Int16 (a[191:160]) +dst[175:160] := Saturate_Int32_To_Int16 (a[223:192]) +dst[191:176] := Saturate_Int32_To_Int16 (a[255:224]) +dst[207:192] := Saturate_Int32_To_Int16 (b[159:128]) +dst[223:208] := Saturate_Int32_To_Int16 (b[191:160]) +dst[239:224] := Saturate_Int32_To_Int16 (b[223:192]) +dst[255:240] := Saturate_Int32_To_Int16 (b[255:224]) +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Miscellaneous + + + Convert packed 16-bit integers from "a" and "b" to packed 8-bit integers using unsigned saturation, and store the results in "dst". + +dst[7:0] := Saturate_Int16_To_UnsignedInt8 (a[15:0]) +dst[15:8] := Saturate_Int16_To_UnsignedInt8 (a[31:16]) +dst[23:16] := Saturate_Int16_To_UnsignedInt8 (a[47:32]) +dst[31:24] := Saturate_Int16_To_UnsignedInt8 (a[63:48]) +dst[39:32] := Saturate_Int16_To_UnsignedInt8 (a[79:64]) +dst[47:40] := Saturate_Int16_To_UnsignedInt8 (a[95:80]) +dst[55:48] := Saturate_Int16_To_UnsignedInt8 (a[111:96]) +dst[63:56] := Saturate_Int16_To_UnsignedInt8 (a[127:112]) +dst[71:64] := Saturate_Int16_To_UnsignedInt8 (b[15:0]) +dst[79:72] := Saturate_Int16_To_UnsignedInt8 (b[31:16]) +dst[87:80] := Saturate_Int16_To_UnsignedInt8 (b[47:32]) +dst[95:88] := Saturate_Int16_To_UnsignedInt8 (b[63:48]) +dst[103:96] := Saturate_Int16_To_UnsignedInt8 (b[79:64]) +dst[111:104] := Saturate_Int16_To_UnsignedInt8 (b[95:80]) +dst[119:112] := Saturate_Int16_To_UnsignedInt8 (b[111:96]) +dst[127:120] := Saturate_Int16_To_UnsignedInt8 (b[127:112]) +dst[135:128] := Saturate_Int16_To_UnsignedInt8 (a[143:128]) +dst[143:136] := Saturate_Int16_To_UnsignedInt8 (a[159:144]) +dst[151:144] := Saturate_Int16_To_UnsignedInt8 (a[175:160]) +dst[159:152] := Saturate_Int16_To_UnsignedInt8 (a[191:176]) +dst[167:160] := Saturate_Int16_To_UnsignedInt8 (a[207:192]) +dst[175:168] := Saturate_Int16_To_UnsignedInt8 (a[223:208]) +dst[183:176] := Saturate_Int16_To_UnsignedInt8 (a[239:224]) +dst[191:184] := Saturate_Int16_To_UnsignedInt8 (a[255:240]) +dst[199:192] := Saturate_Int16_To_UnsignedInt8 (b[143:128]) +dst[207:200] := Saturate_Int16_To_UnsignedInt8 (b[159:144]) +dst[215:208] := Saturate_Int16_To_UnsignedInt8 (b[175:160]) +dst[223:216] := Saturate_Int16_To_UnsignedInt8 (b[191:176]) +dst[231:224] := Saturate_Int16_To_UnsignedInt8 (b[207:192]) +dst[239:232] := Saturate_Int16_To_UnsignedInt8 (b[223:208]) +dst[247:240] := Saturate_Int16_To_UnsignedInt8 (b[239:224]) +dst[255:248] := Saturate_Int16_To_UnsignedInt8 (b[255:240]) +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Miscellaneous + + + Convert packed 32-bit integers from "a" and "b" to packed 16-bit integers using unsigned saturation, and store the results in "dst". + +dst[15:0] := Saturate_Int32_To_UnsignedInt16 (a[31:0]) +dst[31:16] := Saturate_Int32_To_UnsignedInt16 (a[63:32]) +dst[47:32] := Saturate_Int32_To_UnsignedInt16 (a[95:64]) +dst[63:48] := Saturate_Int32_To_UnsignedInt16 (a[127:96]) +dst[79:64] := Saturate_Int32_To_UnsignedInt16 (b[31:0]) +dst[95:80] := Saturate_Int32_To_UnsignedInt16 (b[63:32]) +dst[111:96] := Saturate_Int32_To_UnsignedInt16 (b[95:64]) +dst[127:112] := Saturate_Int32_To_UnsignedInt16 (b[127:96]) +dst[143:128] := Saturate_Int32_To_UnsignedInt16 (a[159:128]) +dst[159:144] := Saturate_Int32_To_UnsignedInt16 (a[191:160]) +dst[175:160] := Saturate_Int32_To_UnsignedInt16 (a[223:192]) +dst[191:176] := Saturate_Int32_To_UnsignedInt16 (a[255:224]) +dst[207:192] := Saturate_Int32_To_UnsignedInt16 (b[159:128]) +dst[223:208] := Saturate_Int32_To_UnsignedInt16 (b[191:160]) +dst[239:224] := Saturate_Int32_To_UnsignedInt16 (b[223:192]) +dst[255:240] := Saturate_Int32_To_UnsignedInt16 (b[255:224]) +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Swizzle + + + + Shuffle 128-bits (composed of integer data) selected by "imm8" from "a" and "b", and store the results in "dst". + +SELECT4(src1, src2, control){ + CASE(control[1:0]) + 0: tmp[127:0] := src1[127:0] + 1: tmp[127:0] := src1[255:128] + 2: tmp[127:0] := src2[127:0] + 3: tmp[127:0] := src2[255:128] + ESAC + IF control[3] + tmp[127:0] := 0 + FI + RETURN tmp[127:0] +} + +dst[127:0] := SELECT4(a[255:0], b[255:0], imm8[3:0]) +dst[255:128] := SELECT4(a[255:0], b[255:0], imm8[7:4]) +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Swizzle + + + Shuffle 64-bit integers in "a" across lanes using the control in "imm8", and store the results in "dst". + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[63:0] := src[63:0] + 1: tmp[63:0] := src[127:64] + 2: tmp[63:0] := src[191:128] + 3: tmp[63:0] := src[255:192] + ESAC + RETURN tmp[63:0] +} + +dst[63:0] := SELECT4(a[255:0], imm8[1:0]) +dst[127:64] := SELECT4(a[255:0], imm8[3:2]) +dst[191:128] := SELECT4(a[255:0], imm8[5:4]) +dst[255:192] := SELECT4(a[255:0], imm8[7:6]) +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX2 + Swizzle + + + Shuffle double-precision (64-bit) floating-point elements in "a" across lanes using the control in "imm8", and store the results in "dst". + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[63:0] := src[63:0] + 1: tmp[63:0] := src[127:64] + 2: tmp[63:0] := src[191:128] + 3: tmp[63:0] := src[255:192] + ESAC + RETURN tmp[63:0] +} + +dst[63:0] := SELECT4(a[255:0], imm8[1:0]) +dst[127:64] := SELECT4(a[255:0], imm8[3:2]) +dst[191:128] := SELECT4(a[255:0], imm8[5:4]) +dst[255:192] := SELECT4(a[255:0], imm8[7:6]) +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Swizzle + + + Shuffle 32-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst". + + +FOR j := 0 to 7 + i := j*32 + id := idx[i+2:i]*32 + dst[i+31:i] := a[id+31:id] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX2 + Swizzle + + + Shuffle single-precision (32-bit) floating-point elements in "a" across lanes using the corresponding index in "idx". + +FOR j := 0 to 7 + i := j*32 + id := idx[i+2:i]*32 + dst[i+31:i] := a[id+31:id] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Arithmetic + + + Compute the absolute differences of packed unsigned 8-bit integers in "a" and "b", then horizontally sum each consecutive 8 differences to produce four unsigned 16-bit integers, and pack these unsigned 16-bit integers in the low 16 bits of 64-bit elements in "dst". + +FOR j := 0 to 31 + i := j*8 + tmp[i+7:i] := ABS(a[i+7:i] - b[i+7:i]) +ENDFOR +FOR j := 0 to 4 + i := j*64 + dst[i+15:i] := tmp[i+7:i] + tmp[i+15:i+8] + tmp[i+23:i+16] + tmp[i+31:i+24] + tmp[i+39:i+32] + tmp[i+47:i+40] + tmp[i+55:i+48] + tmp[i+63:i+56] + dst[i+63:i+16] := 0 +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Swizzle + + + Shuffle 32-bit integers in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst". + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} + +dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +dst[95:64] := SELECT4(a[127:0], imm8[5:4]) +dst[127:96] := SELECT4(a[127:0], imm8[7:6]) +dst[159:128] := SELECT4(a[255:128], imm8[1:0]) +dst[191:160] := SELECT4(a[255:128], imm8[3:2]) +dst[223:192] := SELECT4(a[255:128], imm8[5:4]) +dst[255:224] := SELECT4(a[255:128], imm8[7:6]) +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Swizzle + + + Shuffle 8-bit integers in "a" within 128-bit lanes according to shuffle control mask in the corresponding 8-bit element of "b", and store the results in "dst". + +FOR j := 0 to 15 + i := j*8 + IF b[i+7] == 1 + dst[i+7:i] := 0 + ELSE + index[3:0] := b[i+3:i] + dst[i+7:i] := a[index*8+7:index*8] + FI + IF b[128+i+7] == 1 + dst[128+i+7:128+i] := 0 + ELSE + index[3:0] := b[128+i+3:128+i] + dst[128+i+7:128+i] := a[128+index*8+7:128+index*8] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Swizzle + + + Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of "a" using the control in "imm8". Store the results in the high 64 bits of 128-bit lanes of "dst", with the low 64 bits of 128-bit lanes being copied from from "a" to "dst". + +dst[63:0] := a[63:0] +dst[79:64] := (a >> (imm8[1:0] * 16))[79:64] +dst[95:80] := (a >> (imm8[3:2] * 16))[79:64] +dst[111:96] := (a >> (imm8[5:4] * 16))[79:64] +dst[127:112] := (a >> (imm8[7:6] * 16))[79:64] +dst[191:128] := a[191:128] +dst[207:192] := (a >> (imm8[1:0] * 16))[207:192] +dst[223:208] := (a >> (imm8[3:2] * 16))[207:192] +dst[239:224] := (a >> (imm8[5:4] * 16))[207:192] +dst[255:240] := (a >> (imm8[7:6] * 16))[207:192] +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Swizzle + + + Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of "a" using the control in "imm8". Store the results in the low 64 bits of 128-bit lanes of "dst", with the high 64 bits of 128-bit lanes being copied from from "a" to "dst". + +dst[15:0] := (a >> (imm8[1:0] * 16))[15:0] +dst[31:16] := (a >> (imm8[3:2] * 16))[15:0] +dst[47:32] := (a >> (imm8[5:4] * 16))[15:0] +dst[63:48] := (a >> (imm8[7:6] * 16))[15:0] +dst[127:64] := a[127:64] +dst[143:128] := (a >> (imm8[1:0] * 16))[143:128] +dst[159:144] := (a >> (imm8[3:2] * 16))[143:128] +dst[175:160] := (a >> (imm8[5:4] * 16))[143:128] +dst[191:176] := (a >> (imm8[7:6] * 16))[143:128] +dst[255:192] := a[255:192] +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Arithmetic + + + Negate packed 8-bit integers in "a" when the corresponding signed 8-bit integer in "b" is negative, and store the results in "dst". Element in "dst" are zeroed out when the corresponding element in "b" is zero. + +FOR j := 0 to 31 + i := j*8 + IF b[i+7:i] < 0 + dst[i+7:i] := NEG(a[i+7:i]) + ELSE IF b[i+7:i] = 0 + dst[i+7:i] := 0 + ELSE + dst[i+7:i] := a[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Arithmetic + + + Negate packed 16-bit integers in "a" when the corresponding signed 16-bit integer in "b" is negative, and store the results in "dst". Element in "dst" are zeroed out when the corresponding element in "b" is zero. + +FOR j := 0 to 15 + i := j*16 + IF b[i+15:i] < 0 + dst[i+15:i] := NEG(a[i+15:i]) + ELSE IF b[i+15:i] = 0 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := a[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Arithmetic + + + Negate packed 32-bit integers in "a" when the corresponding signed 32-bit integer in "b" is negative, and store the results in "dst". Element in "dst" are zeroed out when the corresponding element in "b" is zero. + +FOR j := 0 to 7 + i := j*32 + IF b[i+31:i] < 0 + dst[i+31:i] := NEG(a[i+31:i]) + ELSE IF b[i+31:i] = 0 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Shift + + + Shift 128-bit lanes in "a" left by "imm8" bytes while shifting in zeros, and store the results in "dst". + +tmp := imm8[7:0] +IF tmp > 15 + tmp := 16 +FI +dst[127:0] := a[127:0] << (tmp*8) +dst[255:128] := a[255:128] << (tmp*8) +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Shift + + + Shift 128-bit lanes in "a" left by "imm8" bytes while shifting in zeros, and store the results in "dst". + +tmp := imm8[7:0] +IF tmp > 15 + tmp := 16 +FI +dst[127:0] := a[127:0] << (tmp*8) +dst[255:128] := a[255:128] << (tmp*8) +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Shift + + + Shift packed 16-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 15 + i := j*16 + IF count[63:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend(a[i+15:i] << count[63:0]) + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Shift + + + Shift packed 16-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 15 + i := j*16 + IF imm8[7:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend(a[i+15:i] << imm8[7:0]) + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Shift + + + Shift packed 32-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + IF count[63:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend(a[i+31:i] << count[63:0]) + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Shift + + + Shift packed 32-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + IF imm8[7:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend(a[i+31:i] << imm8[7:0]) + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Shift + + + Shift packed 64-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + IF count[63:0] > 63 + dst[i+63:i] := 0 + ELSE + dst[i+63:i] := ZeroExtend(a[i+63:i] << count[63:0]) + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Shift + + + Shift packed 64-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + IF imm8[7:0] > 63 + dst[i+63:i] := 0 + ELSE + dst[i+63:i] := ZeroExtend(a[i+63:i] << imm8[7:0]) + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Shift + + + Shift packed 32-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ZeroExtend(a[i+31:i] << count[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Shift + + + Shift packed 32-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := ZeroExtend(a[i+31:i] << count[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Shift + + + Shift packed 64-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := ZeroExtend(a[i+63:i] << count[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Shift + + + Shift packed 64-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := ZeroExtend(a[i+63:i] << count[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Shift + + + Shift packed 16-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 15 + i := j*16 + IF count[63:0] > 15 + dst[i+15:i] := SignBit + ELSE + dst[i+15:i] := SignExtend(a[i+15:i] >> count[63:0]) + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Shift + + + Shift packed 16-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 15 + i := j*16 + IF imm8[7:0] > 15 + dst[i+15:i] := SignBit + ELSE + dst[i+15:i] := SignExtend(a[i+15:i] >> imm8[7:0]) + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Shift + + + Shift packed 32-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + IF count[63:0] > 31 + dst[i+31:i] := SignBit + ELSE + dst[i+31:i] := SignExtend(a[i+31:i] >> count[63:0]) + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Shift + + + Shift packed 32-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + IF imm8[7:0] > 31 + dst[i+31:i] := SignBit + ELSE + dst[i+31:i] := SignExtend(a[i+31:i] >> imm8[7:0]) + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Shift + + + Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := SignExtend(a[i+31:i] >> count[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Shift + + + Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := SignExtend(a[i+31:i] >> count[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Shift + + + Shift 128-bit lanes in "a" right by "imm8" bytes while shifting in zeros, and store the results in "dst". + +tmp := imm8[7:0] +IF tmp > 15 + tmp := 16 +FI +dst[127:0] := a[127:0] >> (tmp*8) +dst[255:128] := a[255:128] >> (tmp*8) +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Shift + + + Shift 128-bit lanes in "a" right by "imm8" bytes while shifting in zeros, and store the results in "dst". + +tmp := imm8[7:0] +IF tmp > 15 + tmp := 16 +FI +dst[127:0] := a[127:0] >> (tmp*8) +dst[255:128] := a[255:128] >> (tmp*8) +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Shift + + + Shift packed 16-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 15 + i := j*16 + IF count[63:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend(a[i+15:i] >> count[63:0]) + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Shift + + + Shift packed 16-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 15 + i := j*16 + IF imm8[7:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend(a[i+15:i] >> imm8[7:0]) + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Shift + + + Shift packed 32-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + IF count[63:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend(a[i+31:i] >> count[63:0]) + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Shift + + + Shift packed 32-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + IF imm8[7:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend(a[i+31:i] >> imm8[7:0]) + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Shift + + + Shift packed 64-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + IF count[63:0] > 63 + dst[i+63:i] := 0 + ELSE + dst[i+63:i] := ZeroExtend(a[i+63:i] >> count[63:0]) + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Shift + + + Shift packed 64-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + IF imm8[7:0] > 63 + dst[i+63:i] := 0 + ELSE + dst[i+63:i] := ZeroExtend(a[i+63:i] >> imm8[7:0]) + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Shift + + + Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ZeroExtend(a[i+31:i] >> count[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Shift + + + Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := ZeroExtend(a[i+31:i] >> count[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Shift + + + Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := ZeroExtend(a[i+63:i] >> count[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Shift + + + Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := ZeroExtend(a[i+63:i] >> count[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Load + + Load 256-bits of integer data from memory into "dst" using a non-temporal memory hint. + "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated. + +dst[255:0] := MEM[mem_addr+255:mem_addr] +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Arithmetic + + + Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a", and store the results in "dst". + +FOR j := 0 to 31 + i := j*8 + dst[i+7:i] := a[i+7:i] - b[i+7:i] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Arithmetic + + + Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a", and store the results in "dst". + +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := a[i+15:i] - b[i+15:i] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Arithmetic + + + Subtract packed 32-bit integers in "b" from packed 32-bit integers in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := a[i+31:i] - b[i+31:i] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Arithmetic + + + Subtract packed 64-bit integers in "b" from packed 64-bit integers in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := a[i+63:i] - b[i+63:i] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Arithmetic + + + Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a" using saturation, and store the results in "dst". + +FOR j := 0 to 31 + i := j*8 + dst[i+7:i] := Saturate_To_Int8(a[i+7:i] - b[i+7:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Arithmetic + + + Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a" using saturation, and store the results in "dst". + +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := Saturate_To_Int16(a[i+15:i] - b[i+15:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Arithmetic + + + Subtract packed unsigned 8-bit integers in "b" from packed unsigned 8-bit integers in "a" using saturation, and store the results in "dst". + +FOR j := 0 to 31 + i := j*8 + dst[i+7:i] := Saturate_To_UnsignedInt8(a[i+7:i] - b[i+7:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Arithmetic + + + Subtract packed unsigned 16-bit integers in "b" from packed unsigned 16-bit integers in "a" using saturation, and store the results in "dst". + +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := Saturate_To_UnsignedInt16(a[i+15:i] - b[i+15:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Logical + + + Compute the bitwise XOR of 256 bits (representing integer data) in "a" and "b", and store the result in "dst". + +dst[255:0] := (a[255:0] XOR b[255:0]) +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Swizzle + + + Unpack and interleave 8-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst". + +INTERLEAVE_HIGH_BYTES(src1[127:0], src2[127:0]){ + dst[7:0] := src1[71:64] + dst[15:8] := src2[71:64] + dst[23:16] := src1[79:72] + dst[31:24] := src2[79:72] + dst[39:32] := src1[87:80] + dst[47:40] := src2[87:80] + dst[55:48] := src1[95:88] + dst[63:56] := src2[95:88] + dst[71:64] := src1[103:96] + dst[79:72] := src2[103:96] + dst[87:80] := src1[111:104] + dst[95:88] := src2[111:104] + dst[103:96] := src1[119:112] + dst[111:104] := src2[119:112] + dst[119:112] := src1[127:120] + dst[127:120] := src2[127:120] + RETURN dst[127:0] +} + +dst[127:0] := INTERLEAVE_HIGH_BYTES(a[127:0], b[127:0]) +dst[255:128] := INTERLEAVE_HIGH_BYTES(a[255:128], b[255:128]) +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Swizzle + + + Unpack and interleave 16-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst". + +INTERLEAVE_HIGH_WORDS(src1[127:0], src2[127:0]){ + dst[15:0] := src1[79:64] + dst[31:16] := src2[79:64] + dst[47:32] := src1[95:80] + dst[63:48] := src2[95:80] + dst[79:64] := src1[111:96] + dst[95:80] := src2[111:96] + dst[111:96] := src1[127:112] + dst[127:112] := src2[127:112] + RETURN dst[127:0] +} + +dst[127:0] := INTERLEAVE_HIGH_WORDS(a[127:0], b[127:0]) +dst[255:128] := INTERLEAVE_HIGH_WORDS(a[255:128], b[255:128]) +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Swizzle + + + Unpack and interleave 32-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst". + +INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]){ + dst[31:0] := src1[95:64] + dst[63:32] := src2[95:64] + dst[95:64] := src1[127:96] + dst[127:96] := src2[127:96] + RETURN dst[127:0] +} + +dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) +dst[255:128] := INTERLEAVE_HIGH_DWORDS(a[255:128], b[255:128]) +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Swizzle + + + Unpack and interleave 64-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst". + +INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]){ + dst[63:0] := src1[127:64] + dst[127:64] := src2[127:64] + RETURN dst[127:0] +} + +dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) +dst[255:128] := INTERLEAVE_HIGH_QWORDS(a[255:128], b[255:128]) +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Swizzle + + + Unpack and interleave 8-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst". + +INTERLEAVE_BYTES(src1[127:0], src2[127:0]){ + dst[7:0] := src1[7:0] + dst[15:8] := src2[7:0] + dst[23:16] := src1[15:8] + dst[31:24] := src2[15:8] + dst[39:32] := src1[23:16] + dst[47:40] := src2[23:16] + dst[55:48] := src1[31:24] + dst[63:56] := src2[31:24] + dst[71:64] := src1[39:32] + dst[79:72] := src2[39:32] + dst[87:80] := src1[47:40] + dst[95:88] := src2[47:40] + dst[103:96] := src1[55:48] + dst[111:104] := src2[55:48] + dst[119:112] := src1[63:56] + dst[127:120] := src2[63:56] + RETURN dst[127:0] +} + +dst[127:0] := INTERLEAVE_BYTES(a[127:0], b[127:0]) +dst[255:128] := INTERLEAVE_BYTES(a[255:128], b[255:128]) +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Swizzle + + + Unpack and interleave 16-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst". + +INTERLEAVE_WORDS(src1[127:0], src2[127:0]){ + dst[15:0] := src1[15:0] + dst[31:16] := src2[15:0] + dst[47:32] := src1[31:16] + dst[63:48] := src2[31:16] + dst[79:64] := src1[47:32] + dst[95:80] := src2[47:32] + dst[111:96] := src1[63:48] + dst[127:112] := src2[63:48] + RETURN dst[127:0] +} + +dst[127:0] := INTERLEAVE_WORDS(a[127:0], b[127:0]) +dst[255:128] := INTERLEAVE_WORDS(a[255:128], b[255:128]) +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Swizzle + + + Unpack and interleave 32-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst". + +INTERLEAVE_DWORDS(src1[127:0], src2[127:0]){ + dst[31:0] := src1[31:0] + dst[63:32] := src2[31:0] + dst[95:64] := src1[63:32] + dst[127:96] := src2[63:32] + RETURN dst[127:0] +} + +dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) +dst[255:128] := INTERLEAVE_DWORDS(a[255:128], b[255:128]) +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX2 + Swizzle + + + Unpack and interleave 64-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst". + +INTERLEAVE_QWORDS(src1[127:0], src2[127:0]){ + dst[63:0] := src1[63:0] + dst[127:64] := src2[63:0] + RETURN dst[127:0] +} + +dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) +dst[255:128] := INTERLEAVE_QWORDS(a[255:128], b[255:128]) +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + FMA + Arithmetic + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst". + + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] +ENDFOR +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + FMA + Arithmetic + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst". + + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] +ENDFOR +dst[MAX:256] := 0 + + + + +
immintrin.h
+
+ + Floating Point + FMA + Arithmetic + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst". + + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] +ENDFOR +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + FMA + Arithmetic + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst". + + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] +ENDFOR +dst[MAX:256] := 0 + + + + +
immintrin.h
+
+ + Floating Point + FMA + Arithmetic + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + + +dst[63:0] := (a[63:0] * b[63:0]) + c[63:0] +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + FMA + Arithmetic + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + + +dst[31:0] := (a[31:0] * b[31:0]) + c[31:0] +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + FMA + Arithmetic + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst". + + +FOR j := 0 to 1 + i := j*64 + IF (j is even) + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + FMA + Arithmetic + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst". + + +FOR j := 0 to 3 + i := j*64 + IF (j is even) + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + + +
immintrin.h
+
+ + Floating Point + FMA + Arithmetic + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst". + + +FOR j := 0 to 3 + i := j*32 + IF (j is even) + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + FMA + Arithmetic + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst". + + +FOR j := 0 to 7 + i := j*32 + IF (j is even) + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + + +
immintrin.h
+
+ + Floating Point + FMA + Arithmetic + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] +ENDFOR +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + FMA + Arithmetic + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] +ENDFOR +dst[MAX:256] := 0 + + + + +
immintrin.h
+
+ + Floating Point + FMA + Arithmetic + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst". + + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] +ENDFOR +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + FMA + Arithmetic + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst". + + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] +ENDFOR +dst[MAX:256] := 0 + + + + +
immintrin.h
+
+ + Floating Point + FMA + Arithmetic + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := (a[63:0] * b[63:0]) - c[63:0] +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + FMA + Arithmetic + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := (a[31:0] * b[31:0]) - c[31:0] +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + FMA + Arithmetic + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst". + + +FOR j := 0 to 1 + i := j*64 + IF (j is even) + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + FMA + Arithmetic + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst". + + +FOR j := 0 to 3 + i := j*64 + IF (j is even) + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + + +
immintrin.h
+
+ + Floating Point + FMA + Arithmetic + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst". + + +FOR j := 0 to 3 + i := j*32 + IF (j is even) + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + FMA + Arithmetic + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst". + + +FOR j := 0 to 7 + i := j*32 + IF (j is even) + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + + +
immintrin.h
+
+ + Floating Point + FMA + Arithmetic + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst". + + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i] +ENDFOR +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + FMA + Arithmetic + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst". + + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i] +ENDFOR +dst[MAX:256] := 0 + + + + +
immintrin.h
+
+ + Floating Point + FMA + Arithmetic + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst". + + +FOR j := 0 to 3 + i := j*32 + a[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i] +ENDFOR +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + FMA + Arithmetic + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst". + + +FOR j := 0 to 7 + i := j*32 + a[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i] +ENDFOR +dst[MAX:256] := 0 + + + + +
immintrin.h
+
+ + Floating Point + FMA + Arithmetic + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := -(a[63:0] * b[63:0]) + c[63:0] +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + FMA + Arithmetic + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := -(a[31:0] * b[31:0]) + c[31:0] +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + FMA + Arithmetic + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i] +ENDFOR +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + FMA + Arithmetic + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i] +ENDFOR +dst[MAX:256] := 0 + + + + +
immintrin.h
+
+ + Floating Point + FMA + Arithmetic + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i] +ENDFOR +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + FMA + Arithmetic + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i] +ENDFOR +dst[MAX:256] := 0 + + + + +
immintrin.h
+
+ + Floating Point + FMA + Arithmetic + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the negated intermediate result. Store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := -(a[63:0] * b[63:0]) - c[63:0] +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + + b +
immintrin.h
+
+ + Floating Point + FMA + Arithmetic + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the negated intermediate result. Store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := -(a[31:0] * b[31:0]) - c[31:0] +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Integer + BMI1 + Bit Manipulation + + + + Extract contiguous bits from unsigned 32-bit integer "a", and store the result in "dst". Extract the number of bits specified by "len", starting at the bit specified by "start". + +tmp := ZERO_EXTEND_TO_512(a) +dst := ZERO_EXTEND(tmp[start+len-1:start]) + + +
immintrin.h
+
+ + Integer + BMI1 + Bit Manipulation + + + Extract contiguous bits from unsigned 32-bit integer "a", and store the result in "dst". Extract the number of bits specified by bits 15:8 of "control", starting at the bit specified by bits 0:7 of "control". + +start := control[7:0] +len := control[15:8] +tmp := ZERO_EXTEND_TO_512(a) +dst := ZERO_EXTEND(tmp[start+len-1:start]) + + +
immintrin.h
+
+ + Integer + BMI1 + Bit Manipulation + + + + Extract contiguous bits from unsigned 64-bit integer "a", and store the result in "dst". Extract the number of bits specified by "len", starting at the bit specified by "start". + +tmp := ZERO_EXTEND_TO_512(a) +dst := ZERO_EXTEND(tmp[start+len-1:start]) + + +
immintrin.h
+
+ + Integer + BMI1 + Bit Manipulation + + + Extract contiguous bits from unsigned 64-bit integer "a", and store the result in "dst". Extract the number of bits specified by bits 15:8 of "control", starting at the bit specified by bits 0:7 of "control".. + +start := control[7:0] +len := control[15:8] +tmp := ZERO_EXTEND_TO_512(a) +dst := ZERO_EXTEND(tmp[start+len-1:start]) + + +
immintrin.h
+
+ + Integer + BMI1 + Bit Manipulation + + Extract the lowest set bit from unsigned 32-bit integer "a" and set the corresponding bit in "dst". All other bits in "dst" are zeroed, and all bits are zeroed if no bits are set in "a". + +dst := (-a) BITWISE AND a + + +
immintrin.h
+
+ + Integer + BMI1 + Bit Manipulation + + Extract the lowest set bit from unsigned 64-bit integer "a" and set the corresponding bit in "dst". All other bits in "dst" are zeroed, and all bits are zeroed if no bits are set in "a". + +dst := (-a) BITWISE AND a + + +
immintrin.h
+
+ + Integer + BMI1 + Bit Manipulation + + Set all the lower bits of "dst" up to and including the lowest set bit in unsigned 32-bit integer "a". + +dst := (a - 1) XOR a + + +
immintrin.h
+
+ + Integer + BMI1 + Bit Manipulation + + Set all the lower bits of "dst" up to and including the lowest set bit in unsigned 64-bit integer "a". + +dst := (a - 1) XOR a + + +
immintrin.h
+
+ + Integer + BMI1 + Bit Manipulation + + Copy all bits from unsigned 32-bit integer "a" to "dst", and reset (set to 0) the bit in "dst" that corresponds to the lowest set bit in "a". + +dst := (a - 1) BITWISE AND a + + +
immintrin.h
+
+ + Integer + BMI1 + Bit Manipulation + + Copy all bits from unsigned 64-bit integer "a" to "dst", and reset (set to 0) the bit in "dst" that corresponds to the lowest set bit in "a". + +dst := (a - 1) BITWISE AND a + + +
immintrin.h
+
+ + Integer + BMI2 + Bit Manipulation + + + Copy all bits from unsigned 32-bit integer "a" to "dst", and reset (set to 0) the high bits in "dst" starting at "index". + +n := index[7:0] +dst := a +IF (n < 32) + dst[31:n] := 0 +FI + + +
immintrin.h
+
+ + Integer + BMI2 + Bit Manipulation + + + Copy all bits from unsigned 64-bit integer "a" to "dst", and reset (set to 0) the high bits in "dst" starting at "index". + +n := index[7:0] +dst := a +IF (n < 64) + dst[63:n] := 0 +FI + + +
immintrin.h
+
+ + INVPCID + OS-Targeted + + + + Invalidate mappings in the Translation Lookaside Buffers (TLBs) and paging-structure caches for the processor context identifier (PCID) specified by "descriptor" based on the invalidation type specified in "type". + The PCID "descriptor" is specified as a 16-byte memory operand (with no alignment restrictions) where bits [11:0] specify the PCID, and bits [127:64] specify the linear address; bits [63:12] are reserved. + The types supported are: + 0) Individual-address invalidation: If "type" is 0, the logical processor invalidates mappings for a single linear address and tagged with the PCID specified in "descriptor", except global translations. The instruction may also invalidate global translations, mappings for other linear addresses, or mappings tagged with other PCIDs. + 1) Single-context invalidation: If "type" is 1, the logical processor invalidates all mappings tagged with the PCID specified in "descriptor" except global translations. In some cases, it may invalidate mappings for other PCIDs as well. + 2) All-context invalidation: If "type" is 2, the logical processor invalidates all mappings tagged with any PCID. + 3) All-context invalidation, retaining global translations: If "type" is 3, the logical processor invalidates all mappings tagged with any PCID except global translations, ignoring "descriptor". The instruction may also invalidate global translations as well. + + +CASE type OF +0: // individual-address invalidation retaining global translations + OP_PCID := descriptor[11:0] + ADDR := descriptor[127:64] + BREAK +1: // single PCID invalidation retaining globals + OP_PCID := descriptor[11:0] + // invalidate all mappings tagged with OP_PCID except global translations + BREAK +2: // all PCID invalidation + // invalidate all mappings tagged with any PCID + BREAK +3: // all PCID invalidation retaining global translations + // invalidate all mappings tagged with any PCID except global translations + BREAK +ESAC + + +
immintrin.h
+
+ + Integer + LZCNT + Bit Manipulation + + Count the number of leading zero bits in unsigned 32-bit integer "a", and return that count in "dst". + +tmp := 31 +dst := 0 +DO WHILE (tmp >= 0 AND a[tmp] = 0) + tmp := tmp - 1 + dst := dst + 1 +OD + + +
immintrin.h
+
+ + Integer + LZCNT + Bit Manipulation + + Count the number of leading zero bits in unsigned 64-bit integer "a", and return that count in "dst". + +tmp := 63 +dst := 0 +DO WHILE (tmp >= 0 AND a[tmp] = 0) + tmp := tmp - 1 + dst := dst + 1 +OD + + +
immintrin.h
+
+ + Integer + BMI2 + Bit Manipulation + + + Deposit contiguous low bits from unsigned 32-bit integer "a" to "dst" at the corresponding bit locations specified by "mask"; all other bits in "dst" are set to zero. + +tmp := a +dst := 0 +m := 0 +k := 0 +DO WHILE m < 32 + IF mask[m] = 1 + dst[m] := tmp[k] + k := k + 1 + FI + m := m + 1 +OD + + +
immintrin.h
+
+ + Integer + BMI2 + Bit Manipulation + + + Deposit contiguous low bits from unsigned 64-bit integer "a" to "dst" at the corresponding bit locations specified by "mask"; all other bits in "dst" are set to zero. + +tmp := a +dst := 0 +m := 0 +k := 0 +DO WHILE m < 64 + IF mask[m] = 1 + dst[m] := tmp[k] + k := k + 1 + FI + m := m + 1 +OD + + +
immintrin.h
+
+ + Integer + BMI2 + Bit Manipulation + + + Extract bits from unsigned 32-bit integer "a" at the corresponding bit locations specified by "mask" to contiguous low bits in "dst"; the remaining upper bits in "dst" are set to zero. + +tmp := a +dst := 0 +m := 0 +k := 0 +DO WHILE m < 32 + IF mask[m] = 1 + dst[k] := tmp[m] + k := k + 1 + FI + m := m + 1 +OD + + +
immintrin.h
+
+ + Integer + BMI2 + Bit Manipulation + + + Extract bits from unsigned 64-bit integer "a" at the corresponding bit locations specified by "mask" to contiguous low bits in "dst"; the remaining upper bits in "dst" are set to zero. + +tmp := a +dst := 0 +m := 0 +k := 0 +DO WHILE m < 64 + IF mask[m] = 1 + dst[k] := tmp[m] + k := k + 1 + FI + m := m + 1 +OD + + +
immintrin.h
+
+ + Integer + BMI1 + Bit Manipulation + + + Compute the bitwise NOT of 32-bit integer "a" and then AND with b, and store the results in dst. + +dst[31:0] := ((NOT a[31:0]) AND b[31:0]) + + +
immintrin.h
+
+ + Integer + BMI1 + Bit Manipulation + + + Compute the bitwise NOT of 64-bit integer "a" and then AND with b, and store the results in dst. + +dst[63:0] := ((NOT a[63:0]) AND b[63:0]) + + +
immintrin.h
+
+ + Integer + BMI2 + Arithmetic + + + + Multiply unsigned 32-bit integers "a" and "b", store the low 32-bits of the result in "dst", and store the high 32-bits in "hi". This does not read or write arithmetic flags. + +dst[31:0] := (a * b)[31:0] +hi[31:0] := (a * b)[63:32] + + +
immintrin.h
+
+ + Integer + BMI2 + Arithmetic + + + + Multiply unsigned 64-bit integers "a" and "b", store the low 64-bits of the result in "dst", and store the high 64-bits in "hi". This does not read or write arithmetic flags. + +dst[63:0] := (a * b)[63:0] +hi[63:0] := (a * b)[127:64] + + +
immintrin.h
+
+ + Integer + BMI1 + Bit Manipulation + + Count the number of trailing zero bits in unsigned 32-bit integer "a", and return that count in "dst". + +tmp := 0 +dst := 0 +DO WHILE ((tmp < 32) AND a[tmp] = 0) + tmp := tmp + 1 + dst := dst + 1 +OD + + +
immintrin.h
+
+ + Integer + BMI1 + Bit Manipulation + + Count the number of trailing zero bits in unsigned 64-bit integer "a", and return that count in "dst". + +tmp := 0 +dst := 0 +DO WHILE ((tmp < 64) AND a[tmp] = 0) + tmp := tmp + 1 + dst := dst + 1 +OD + + +
immintrin.h
+
+ + RTM + General Support + + + Force an RTM abort. The EAX register is updated to reflect an XABORT instruction caused the abort, and the "imm8" parameter will be provided in bits [31:24] of EAX. + Following an RTM abort, the logical processor resumes execution at the fallback address computed through the outermost XBEGIN instruction. + + +IF RTM_ACTIVE = 0 + // nop +ELSE + // restore architectural register state + // discard memory updates performed in transaction + // update EAX with status and imm8 value + RTM_NEST_COUNT := 0 + RTM_ACTIVE := 0 + IF 64-bit Mode + RIP := fallbackRIP + ELSE + EIP := fallbackEIP + FI +FI + + +
immintrin.h
+
+ + RTM + General Support + + + Specify the start of an RTM code region. + If the logical processor was not already in transactional execution, then this call causes the logical processor to transition into transactional execution. + On an RTM abort, the logical processor discards all architectural register and memory updates performed during the RTM execution, restores architectural state, and starts execution beginning at the fallback address computed from the outermost XBEGIN instruction. Return status of ~0 (0xFFFF) if continuing inside transaction; all other codes are aborts. + + +IF RTM_NEST_COUNT < MAX_RTM_NEST_COUNT + RTM_NEST_COUNT := RTM_NEST_COUNT + 1 + IF RTM_NEST_COUNT = 1 + IF 64-bit Mode + fallbackRIP := RIP + SignExtend(IMM) + ELSE IF 32-bit Mode + fallbackEIP := EIP + SignExtend(IMM) + ELSE // 16-bit Mode + fallbackEIP := (EIP + SignExtend(IMM)) AND 0x0000FFFF + FI + + RTM_ACTIVE := 1 + // enter RTM execution, record register state, start tracking memory state + FI +ELSE + // RTM abort (see _xabort) +FI + + +
immintrin.h
+
+ + RTM + General Support + + + Specify the end of an RTM code region. + If this corresponds to the outermost scope, the logical processor will attempt to commit the logical processor state atomically. + If the commit fails, the logical processor will perform an RTM abort. + + +IF RTM_ACTIVE = 1 + RTM_NEST_COUNT := RTM_NEST_COUNT - 1 + IF RTM_NEST_COUNT = 0 + // try to commit transaction + IF fail to commit transaction + // RTM abort (see _xabort) + ELSE + RTM_ACTIVE = 0 + FI + FI +FI + + +
immintrin.h
+
+ + RTM + General Support + + Query the transactional execution status, return 0 if inside a transactionally executing RTM or HLE region, and return 1 otherwise. + +IF (RTM_ACTIVE = 1 OR HLE_ACTIVE = 1) + dst := 0 +ELSE + dst := 1 +FI + + +
immintrin.h
+
+ + RDTSCP + General Support + + Copy the current 64-bit value of the processor's time-stamp counter into "dst", and store the IA32_TSC_AUX MSR (signature value) into memory at "mem_addr". + +dst[63:0] := TimeStampCounter +MEM[mem_addr+31:mem_addr] := IA32_TSC_AUX[31:0] + + +
immintrin.h
+
+ + RDPID + General Support + + Copy the IA32_TSC_AUX MSR (signature value) into "dst". + +dst[31:0] := IA32_TSC_AUX[31:0] + + +
immintrin.h
+
+ + Integer + Bit Manipulation + + Set "dst" to the index of the lowest set bit in 32-bit integer "a". If no bits are set in "a" then "dst" is undefined. + +tmp := 0 +IF a = 0 + dst := undefined +ELSE + DO WHILE ((tmp < 32) AND a[tmp] = 0) + tmp := tmp + 1 + dst := tmp + OD +FI + + +
immintrin.h
+
+ + Integer + Bit Manipulation + + Set "dst" to the index of the highest set bit in 32-bit integer "a". If no bits are set in "a" then "dst" is undefined. + +tmp := 31 +IF a = 0 + dst := undefined +ELSE + DO WHILE ((tmp > 0) AND a[tmp] = 0) + tmp := tmp - 1 + dst := tmp + OD +FI + + +
immintrin.h
+
+ + Integer + Bit Manipulation + + + Set "index" to the index of the lowest set bit in 32-bit integer "mask". If no bits are set in "mask", then set "dst" to 0, otherwise set "dst" to 1. + +tmp := 0 +IF mask = 0 + dst := 0 +ELSE + DO WHILE ((tmp < 32) AND mask[tmp] = 0) + tmp := tmp + 1 + index := tmp + dst := 1 + OD +FI + + +
immintrin.h
+
+ + Integer + Bit Manipulation + + + Set "index" to the index of the highest set bit in 32-bit integer "mask". If no bits are set in "mask", then set "dst" to 0, otherwise set "dst" to 1. + +tmp := 31 +IF mask = 0 + dst := 0 +ELSE + DO WHILE ((tmp > 0) AND mask[tmp] = 0) + tmp := tmp - 1 + index := tmp + dst := 1 + OD +FI + + +
immintrin.h
+
+ + Integer + Bit Manipulation + + + Set "index" to the index of the lowest set bit in 64-bit integer "mask". If no bits are set in "mask", then set "dst" to 0, otherwise set "dst" to 1. + +tmp := 0 +IF mask = 0 + dst := 0 +ELSE + DO WHILE ((tmp < 64) AND mask[tmp] = 0) + tmp := tmp + 1 + index := tmp + dst := 1 + OD +FI + + +
immintrin.h
+
+ + Integer + Bit Manipulation + + + Set "index" to the index of the highest set bit in 64-bit integer "mask". If no bits are set in "mask", then set "dst" to 0, otherwise set "dst" to 1. + +tmp := 31 +IF mask = 0 + dst := 0 +ELSE + DO WHILE ((tmp > 0) AND mask[tmp] = 0) + tmp := tmp - 1 + index := tmp + dst := 1 + OD +FI + + +
immintrin.h
+
+ + Integer + Bit Manipulation + + + Return the bit at index "b" of 32-bit integer "a". + +dst := a[b] + + +
immintrin.h
+
+ + Integer + Bit Manipulation + + + Return the bit at index "b" of 32-bit integer "a", and set that bit to its complement. + +dst := a[b] +a[b] := ~a[b] + + +
immintrin.h
+
+ + Integer + Bit Manipulation + + + Return the bit at index "b" of 32-bit integer "a", and set that bit to zero. + +dst := a[b] +a[b] := 0 + + +
immintrin.h
+
+ + Integer + Bit Manipulation + + + Return the bit at index "b" of 32-bit integer "a", and set that bit to one. + +dst := a[b] +a[b] := 1 + + +
immintrin.h
+
+ + Integer + Bit Manipulation + + + Return the bit at index "b" of 64-bit integer "a". + +dst := a[b] + + +
immintrin.h
+
+ + Integer + Bit Manipulation + + + Return the bit at index "b" of 64-bit integer "a", and set that bit to its complement. + +dst := a[b] +a[b] := ~a[b] + + +
immintrin.h
+
+ + Integer + Bit Manipulation + + + Return the bit at index "b" of 64-bit integer "a", and set that bit to zero. + +dst := a[b] +a[b] := 0 + + +
immintrin.h
+
+ + Integer + Bit Manipulation + + + Return the bit at index "b" of 64-bit integer "a", and set that bit to one. + +dst := a[b] +a[b] := 1 + + +
immintrin.h
+
+ + Integer + Bit Manipulation + + Reverse the byte order of 32-bit integer "a", and store the result in "dst". This intrinsic is provided for conversion between little and big endian values. + +dst[7:0] := a[31:24] +dst[15:8] := a[23:16] +dst[23:16] := a[15:8] +dst[31:24] := a[7:0] + + +
immintrin.h
+
+ + Integer + Bit Manipulation + + Reverse the byte order of 64-bit integer "a", and store the result in "dst". This intrinsic is provided for conversion between little and big endian values. + +dst[7:0] := a[63:56] +dst[15:8] := a[55:48] +dst[23:16] := a[47:40] +dst[31:24] := a[39:32] +dst[39:32] := a[31:24] +dst[47:40] := a[23:16] +dst[55:48] := a[15:8] +dst[63:56] := a[7:0] + + +
immintrin.h
+
+ + Floating Point + Integer + Cast + + Cast from type float to type unsigned __int32 without conversion. + This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. +
immintrin.h
+
+ + Floating Point + Integer + Cast + + Cast from type double to type unsigned __int64 without conversion. + This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. +
immintrin.h
+
+ + Floating Point + Integer + Cast + + Cast from type unsigned __int32 to type float without conversion. + This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. +
immintrin.h
+
+ + Floating Point + Integer + Cast + + Cast from type unsigned __int64 to type double without conversion. + This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. +
immintrin.h
+
+ + FXSR + OS-Targeted + + Reload the x87 FPU, MMX technology, XMM, and MXCSR registers from the 512-byte memory image at "mem_addr". This data should have been written to memory previously using the FXSAVE instruction, and in the same format as required by the operating mode. "mem_addr" must be aligned on a 16-byte boundary. + +(x87 FPU, MMX, XMM7-XMM0, MXCSR) := Load(MEM[mem_addr]) + + +
immintrin.h
+
+ + FXSR + OS-Targeted + + Reload the x87 FPU, MMX technology, XMM, and MXCSR registers from the 512-byte memory image at "mem_addr". This data should have been written to memory previously using the FXSAVE64 instruction, and in the same format as required by the operating mode. "mem_addr" must be aligned on a 16-byte boundary. + +(x87 FPU, MMX, XMM7-XMM0, MXCSR) := Load(MEM[mem_addr]) + + +
immintrin.h
+
+ + FXSR + OS-Targeted + + Save the current state of the x87 FPU, MMX technology, XMM, and MXCSR registers to a 512-byte memory location at "mem_addr". The clayout of the 512-byte region depends on the operating mode. Bytes [511:464] are available for software use and will not be overwritten by the processor. + +MEM[mem_addr+511*8:mem_addr] := Fxsave(x87 FPU, MMX, XMM7-XMM0, MXCSR) + + +
immintrin.h
+
+ + FXSR + OS-Targeted + + Save the current state of the x87 FPU, MMX technology, XMM, and MXCSR registers to a 512-byte memory location at "mem_addr". The layout of the 512-byte region depends on the operating mode. Bytes [511:464] are available for software use and will not be overwritten by the processor. + +MEM[mem_addr+511*8:mem_addr] := Fxsave64(x87 FPU, MMX, XMM7-XMM0, MXCSR) + + +
immintrin.h
+
+ + Integer + Shift + + + Shift the bits of unsigned 64-bit integer "a" left by the number of bits specified in "shift", rotating the most-significant bit to the least-significant bit location, and store the unsigned result in "dst". + +dst := a +count := shift BITWISE AND 63 +DO WHILE (count > 0) + tmp[0] := dst[63] + dst := (dst << 1) OR tmp[0] + count := count - 1 +OD + + +
immintrin.h
+
+ + Integer + Shift + + + Shift the bits of unsigned 64-bit integer "a" right by the number of bits specified in "shift", rotating the least-significant bit to the most-significant bit location, and store the unsigned result in "dst". + +dst := a +count := shift BITWISE AND 63 +DO WHILE (count > 0) + tmp[63] := dst[0] + dst := (dst >> 1) OR tmp[63] + count := count - 1 +OD + + +
immintrin.h
+
+ + + + + General Support + + Treat the processor-specific feature(s) specified in "a" as available. Multiple features may be OR'd together. See the valid feature flags below: + +_FEATURE_GENERIC_IA32 +_FEATURE_FPU +_FEATURE_CMOV +_FEATURE_MMX +_FEATURE_FXSAVE +_FEATURE_SSE +_FEATURE_SSE2 +_FEATURE_SSE3 +_FEATURE_SSSE3 +_FEATURE_SSE4_1 +_FEATURE_SSE4_2 +_FEATURE_MOVBE +_FEATURE_POPCNT +_FEATURE_PCLMULQDQ +_FEATURE_AES +_FEATURE_F16C +_FEATURE_AVX +_FEATURE_RDRND +_FEATURE_FMA +_FEATURE_BMI +_FEATURE_LZCNT +_FEATURE_HLE +_FEATURE_RTM +_FEATURE_AVX2 +_FEATURE_KNCNI +_FEATURE_AVX512F +_FEATURE_ADX +_FEATURE_RDSEED +_FEATURE_AVX512ER +_FEATURE_AVX512PF +_FEATURE_AVX512CD +_FEATURE_SHA +_FEATURE_MPX + +
immintrin.h
+
+ + General Support + + Dynamically query the processor to determine if the processor-specific feature(s) specified in "a" are available, and return true or false (1 or 0) if the set of features is available. Multiple features may be OR'd together. This intrinsic does not check the processor vendor. See the valid feature flags below: + +_FEATURE_GENERIC_IA32 +_FEATURE_FPU +_FEATURE_CMOV +_FEATURE_MMX +_FEATURE_FXSAVE +_FEATURE_SSE +_FEATURE_SSE2 +_FEATURE_SSE3 +_FEATURE_SSSE3 +_FEATURE_SSE4_1 +_FEATURE_SSE4_2 +_FEATURE_MOVBE +_FEATURE_POPCNT +_FEATURE_PCLMULQDQ +_FEATURE_AES +_FEATURE_F16C +_FEATURE_AVX +_FEATURE_RDRND +_FEATURE_FMA +_FEATURE_BMI +_FEATURE_LZCNT +_FEATURE_HLE +_FEATURE_RTM +_FEATURE_AVX2 +_FEATURE_KNCNI +_FEATURE_AVX512F +_FEATURE_ADX +_FEATURE_RDSEED +_FEATURE_AVX512ER +_FEATURE_AVX512PF +_FEATURE_AVX512CD +_FEATURE_SHA +_FEATURE_MPX + +
immintrin.h
+
+ + Floating Point + SSE + Trigonometry + + Compute the inverse cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := ACOS(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Trigonometry + + Compute the inverse cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ACOS(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Trigonometry + + Compute the inverse hyperbolic cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := ACOSH(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Trigonometry + + Compute the inverse hyperbolic cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ACOSH(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Trigonometry + + Compute the inverse sine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := ASIN(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Trigonometry + + Compute the inverse sine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ASIN(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Trigonometry + + Compute the inverse hyperbolic sine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := ASINH(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Trigonometry + + Compute the inverse hyperbolic sine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ASINH(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Trigonometry + + Compute the inverse tangent of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := ATAN(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Trigonometry + + Compute the inverse tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ATAN(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Trigonometry + + + Compute the inverse tangent of packed double-precision (64-bit) floating-point elements in "a" divided by packed elements in "b", and store the results in "dst" expressed in radians. + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := ATAN(a[i+63:i] / b[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Trigonometry + + + Compute the inverse tangent of packed single-precision (32-bit) floating-point elements in "a" divided by packed elements in "b", and store the results in "dst" expressed in radians. + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ATAN(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Trigonometry + + Compute the inverse hyperbolic tangent of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := ATANH(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Trigonometry + + Compute the inverse hyperbolic tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ATANH(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Elementary Math Functions + + Compute the cube root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := CubeRoot(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Elementary Math Functions + + Compute the cube root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := CubeRoot(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Probability/Statistics + + Compute the cumulative distribution function of packed double-precision (64-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := CDFNormal(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Probability/Statistics + + Compute the cumulative distribution function of packed single-precision (32-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := CDFNormal(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Probability/Statistics + + Compute the inverse cumulative distribution function of packed double-precision (64-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := InverseCDFNormal(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Probability/Statistics + + Compute the inverse cumulative distribution function of packed single-precision (32-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := InverseCDFNormal(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Elementary Math Functions + + Compute the exponential value of "e" raised to the power of packed complex single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := e^(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Elementary Math Functions + + Compute the natural logarithm of packed complex single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ln(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Trigonometry + + Compute the cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := COS(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Trigonometry + + Compute the cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := COS(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Trigonometry + + Compute the cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := COSD(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Trigonometry + + Compute the cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := COSD(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Trigonometry + + Compute the hyperbolic cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := COSH(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Trigonometry + + Compute the hyperbolic cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := COSH(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Elementary Math Functions + + Compute the square root of packed complex single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := SQRT(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Integer + SSE + Arithmetic + + + Divide packed 8-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 15 + i := 8*j + dst[i+7:i] := TRUNCATE(a[i+7:i] / b[i+7:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Integer + SSE + Arithmetic + + + Divide packed 16-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 7 + i := 16*j + dst[i+15:i] := TRUNCATE(a[i+15:i] / b[i+15:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Integer + SSE + Arithmetic + + + Divide packed 32-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 3 + i := 32*j + dst[i+31:i] := TRUNCATE(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Integer + SSE + Arithmetic + + + Divide packed 64-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 1 + i := 64*j + dst[i+63:i] := TRUNCATE(a[i+63:i] / b[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Integer + SSE + Arithmetic + + + Divide packed unsigned 8-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 15 + i := 8*j + dst[i+7:i] := TRUNCATE(a[i+7:i] / b[i+7:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Integer + SSE + Arithmetic + + + Divide packed unsigned 16-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 7 + i := 16*j + dst[i+15:i] := TRUNCATE(a[i+15:i] / b[i+15:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Integer + SSE + Arithmetic + + + Divide packed unsigned 32-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 3 + i := 32*j + dst[i+31:i] := TRUNCATE(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Integer + SSE + Arithmetic + + + Divide packed unsigned 64-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 1 + i := 64*j + dst[i+63:i] := TRUNCATE(a[i+63:i] / b[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Arithmetic + + Compute the error function of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := ERF(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Probability/Statistics + + Compute the error function of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ERF(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Probability/Statistics + + Compute the complementary error function of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := 1.0 - ERF(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Probability/Statistics + + Compute the complementary error function of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := 1.0 - ERF(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Probability/Statistics + + Compute the inverse complementary error function of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := 1.0 / (1.0 - ERF(a[i+63:i])) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Probability/Statistics + + Compute the inverse complementary error function of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := 1.0 / (1.0 - ERF(a[i+31:i])) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Probability/Statistics + + Compute the inverse error function of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := 1.0 / ERF(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Probability/Statistics + + Compute the inverse error function of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := 1.0 / ERF(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Elementary Math Functions + + Compute the exponential value of "e" raised to the power of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := e^(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Elementary Math Functions + + Compute the exponential value of "e" raised to the power of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := e^(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Elementary Math Functions + + Compute the exponential value of 10 raised to the power of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := 10^(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Elementary Math Functions + + Compute the exponential value of 10 raised to the power of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := 10^(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Elementary Math Functions + + Compute the exponential value of 2 raised to the power of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := 2^(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Elementary Math Functions + + Compute the exponential value of 2 raised to the power of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := 2^(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Elementary Math Functions + + Compute the exponential value of "e" raised to the power of packed double-precision (64-bit) floating-point elements in "a", subtract one from each element, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := e^(a[i+63:i]) - 1.0 +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Elementary Math Functions + + Compute the exponential value of "e" raised to the power of packed single-precision (32-bit) floating-point elements in "a", subtract one from each element, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := e^(a[i+31:i]) - 1.0 +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Trigonometry + + + Compute the length of the hypotenous of a right triangle, with the lengths of the other two sides of the triangle stored as packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := SQRT(a[i+63:i]^2 + b[i+63:i]^2) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Trigonometry + + + Compute the length of the hypotenous of a right triangle, with the lengths of the other two sides of the triangle stored as packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := SQRT(a[i+31:i]^2 + b[i+31:i]^2) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Integer + SSE + Arithmetic + + + Divide packed 32-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 3 + i := 32*j + dst[i+31:i] := TRUNCATE(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Integer + SSE + Arithmetic + + + + Divide packed 32-bit integers in "a" by packed elements in "b", store the truncated results in "dst", and store the remainders as packed 32-bit integers into memory at "mem_addr". + +FOR j := 0 to 3 + i := 32*j + dst[i+31:i] := TRUNCATE(a[i+31:i] / b[i+31:i]) + MEM[mem_addr+i+31:mem_addr+i] := REMAINDER(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Elementary Math Functions + + Compute the inverse cube root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := InvCubeRoot(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Elementary Math Functions + + Compute the inverse cube root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := InvCubeRoot(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Elementary Math Functions + + Compute the inverse square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := InvSQRT(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Elementary Math Functions + + Compute the inverse square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := InvSQRT(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Integer + SSE + Arithmetic + + + Divide packed 32-bit integers in "a" by packed elements in "b", and store the remainders as packed 32-bit integers in "dst". + +FOR j := 0 to 3 + i := 32*j + dst[i+31:i] := REMAINDER(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Elementary Math Functions + + Compute the natural logarithm of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := ln(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Elementary Math Functions + + Compute the natural logarithm of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ln(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Elementary Math Functions + + Compute the base-10 logarithm of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := log10(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Elementary Math Functions + + Compute the base-10 logarithm of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := log10(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Elementary Math Functions + + Compute the natural logarithm of one plus packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := ln(1.0 + a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Elementary Math Functions + + Compute the natural logarithm of one plus packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ln(1.0 + a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Elementary Math Functions + + Compute the base-2 logarithm of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := log2(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Elementary Math Functions + + Compute the base-2 logarithm of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := log2(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Elementary Math Functions + + Convert the exponent of each packed double-precision (64-bit) floating-point element in "a" to a double-precision floating-point number representing the integer exponent, and store the results in "dst". This intrinsic essentially calculates "floor(log2(x))" for each element. + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := ConvertExpFP64(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Elementary Math Functions + + Convert the exponent of each packed single-precision (32-bit) floating-point element in "a" to a single-precision floating-point number representing the integer exponent, and store the results in "dst". This intrinsic essentially calculates "floor(log2(x))" for each element. + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ConvertExpFP32(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Elementary Math Functions + + + Compute the exponential value of packed double-precision (64-bit) floating-point elements in "a" raised by packed elements in "b", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := (a[i+63:i])^(b[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Elementary Math Functions + + + Compute the exponential value of packed single-precision (32-bit) floating-point elements in "a" raised by packed elements in "b", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := (a[i+31:i])^(b[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Integer + SSE + Arithmetic + + + Divide packed 8-bit integers in "a" by packed elements in "b", and store the remainders as packed 32-bit integers in "dst". + +FOR j := 0 to 15 + i := 8*j + dst[i+7:i] := REMAINDER(a[i+7:i] / b[i+7:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Integer + SSE + Arithmetic + + + Divide packed 16-bit integers in "a" by packed elements in "b", and store the remainders as packed 32-bit integers in "dst". + +FOR j := 0 to 7 + i := 16*j + dst[i+15:i] := REMAINDER(a[i+15:i] / b[i+15:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Integer + SSE + Arithmetic + + + Divide packed 32-bit integers in "a" by packed elements in "b", and store the remainders as packed 32-bit integers in "dst". + +FOR j := 0 to 3 + i := 32*j + dst[i+31:i] := REMAINDER(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Integer + SSE + Arithmetic + + + Divide packed 64-bit integers in "a" by packed elements in "b", and store the remainders as packed 32-bit integers in "dst". + +FOR j := 0 to 1 + i := 64*j + dst[i+63:i] := REMAINDER(a[i+63:i] / b[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Integer + SSE + Arithmetic + + + Divide packed unsigned 8-bit integers in "a" by packed elements in "b", and store the remainders as packed unsigned 32-bit integers in "dst". + +FOR j := 0 to 15 + i := 8*j + dst[i+7:i] := REMAINDER(a[i+7:i] / b[i+7:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Integer + SSE + Arithmetic + + + Divide packed unsigned 16-bit integers in "a" by packed elements in "b", and store the remainders as packed unsigned 32-bit integers in "dst". + +FOR j := 0 to 7 + i := 16*j + dst[i+15:i] := REMAINDER(a[i+15:i] / b[i+15:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Integer + SSE + Arithmetic + + + Divide packed unsigned 32-bit integers in "a" by packed elements in "b", and store the remainders as packed unsigned 32-bit integers in "dst". + +FOR j := 0 to 3 + i := 32*j + dst[i+31:i] := REMAINDER(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Integer + SSE + Arithmetic + + + Divide packed unsigned 64-bit integers in "a" by packed elements in "b", and store the remainders as packed unsigned 32-bit integers in "dst". + +FOR j := 0 to 1 + i := 64*j + dst[i+63:i] := REMAINDER(a[i+63:i] / b[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Trigonometry + + Compute the sine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := SIN(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Trigonometry + + Compute the sine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := SIN(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Trigonometry + + + Compute the sine and cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, store the sine in "dst", and store the cosine into memory at "mem_addr". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := SIN(a[i+63:i]) + MEM[mem_addr+i+63:mem_addr+i] := COS(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Trigonometry + + + Compute the sine and cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, store the sine in "dst", and store the cosine into memory at "mem_addr". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := SIN(a[i+31:i]) + MEM[mem_addr+i+31:mem_addr+i] := COS(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Trigonometry + + Compute the sine of packed double-precision (64-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := SIND(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Trigonometry + + Compute the sine of packed single-precision (32-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := SIND(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Trigonometry + + Compute the hyperbolic sine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := SINH(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Trigonometry + + Compute the hyperbolic sine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := SINH(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Special Math Functions + + Round the packed double-precision (64-bit) floating-point elements in "a" up to an integer value, and store the results as packed double-precision floating-point elements in "dst". This intrinsic may generate the "roundpd"/"vroundpd" instruction. + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := CEIL(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Special Math Functions + + Round the packed single-precision (32-bit) floating-point elements in "a" up to an integer value, and store the results as packed single-precision floating-point elements in "dst". This intrinsic may generate the "roundps"/"vroundps" instruction. + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := CEIL(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Special Math Functions + + Round the packed double-precision (64-bit) floating-point elements in "a" down to an integer value, and store the results as packed double-precision floating-point elements in "dst". This intrinsic may generate the "roundpd"/"vroundpd" instruction. + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := FLOOR(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Special Math Functions + + Round the packed single-precision (32-bit) floating-point elements in "a" down to an integer value, and store the results as packed single-precision floating-point elements in "dst". This intrinsic may generate the "roundps"/"vroundps" instruction. + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := FLOOR(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Special Math Functions + + Round the packed double-precision (64-bit) floating-point elements in "a" to the nearest integer value, and store the results as packed double-precision floating-point elements in "dst". This intrinsic may generate the "roundpd"/"vroundpd" instruction. + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := ROUND(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Special Math Functions + + Round the packed single-precision (32-bit) floating-point elements in "a" to the nearest integer value, and store the results as packed single-precision floating-point elements in "dst". This intrinsic may generate the "roundps"/"vroundps" instruction. + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ROUND(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Elementary Math Functions + + Compute the square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". Note that this intrinsic is less efficient than "_mm_sqrt_pd". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := SQRT(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Elementary Math Functions + + Compute the square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". Note that this intrinsic is less efficient than "_mm_sqrt_ps". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := SQRT(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Trigonometry + + Compute the tangent of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := TAN(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Trigonometry + + Compute the tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := TAN(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Trigonometry + + Compute the tangent of packed double-precision (64-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := TAND(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Trigonometry + + Compute the tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := TAND(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Trigonometry + + Compute the hyperbolic tangent of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := TANH(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Trigonometry + + Compute the hyperbolic tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := TANH(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Miscellaneous + + Truncate the packed double-precision (64-bit) floating-point elements in "a", and store the results as packed double-precision floating-point elements in "dst". This intrinsic may generate the "roundpd"/"vroundpd" instruction. + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := TRUNCATE(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + SSE + Miscellaneous + + Truncate the packed single-precision (32-bit) floating-point elements in "a", and store the results as packed single-precision floating-point elements in "dst". This intrinsic may generate the "roundps"/"vroundps" instruction. + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := TRUNCATE(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Integer + SSE + Arithmetic + + + Divide packed unsigned 32-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 3 + i := 32*j + dst[i+31:i] := TRUNCATE(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Integer + SSE + Arithmetic + + + + Divide packed unsigned 32-bit integers in "a" by packed elements in "b", store the truncated results in "dst", and store the remainders as packed unsigned 32-bit integers into memory at "mem_addr". + +FOR j := 0 to 3 + i := 32*j + dst[i+31:i] := TRUNCATE(a[i+31:i] / b[i+31:i]) + MEM[mem_addr+i+31:mem_addr+i] := REMAINDER(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Integer + SSE + Arithmetic + + + Divide packed unsigned 32-bit integers in "a" by packed elements in "b", and store the remainders as packed unsigned 32-bit integers in "dst". + +FOR j := 0 to 3 + i := 32*j + dst[i+31:i] := REMAINDER(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Trigonometry + + Compute the inverse cosine of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := ACOS(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Trigonometry + + Compute the inverse cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := ACOS(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Trigonometry + + Compute the inverse hyperbolic cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := ACOSH(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Trigonometry + + Compute the inverse hyperbolic cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := ACOSH(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Trigonometry + + Compute the inverse sine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := ASIN(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Trigonometry + + Compute the inverse sine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := ASIN(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Trigonometry + + Compute the inverse hyperbolic sine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := ASINH(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Trigonometry + + Compute the inverse hyperbolic sine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := ASINH(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Trigonometry + + Compute the inverse tangent of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := ATAN(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Trigonometry + + Compute the inverse tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := ATAN(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Trigonometry + + + Compute the inverse tangent of packed double-precision (64-bit) floating-point elements in "a" divided by packed elements in "b", and store the results in "dst" expressed in radians. + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := ATAN(a[i+63:i] / b[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Trigonometry + + + Compute the inverse tangent of packed single-precision (32-bit) floating-point elements in "a" divided by packed elements in "b", and store the results in "dst" expressed in radians. + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := ATAN(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Trigonometry + + Compute the inverse hyperbolic tangent of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := ATANH(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Trigonometry + + Compute the inverse hyperbolic tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := ATANH(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Elementary Math Functions + + Compute the cube root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := CubeRoot(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Elementary Math Functions + + Compute the cube root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := CubeRoot(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Probability/Statistics + + Compute the cumulative distribution function of packed double-precision (64-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := CDFNormal(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Probability/Statistics + + Compute the cumulative distribution function of packed single-precision (32-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := CDFNormal(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Probability/Statistics + + Compute the inverse cumulative distribution function of packed double-precision (64-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := InverseCDFNormal(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Probability/Statistics + + Compute the inverse cumulative distribution function of packed single-precision (32-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := InverseCDFNormal(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Elementary Math Functions + + Compute the exponential value of "e" raised to the power of packed complex single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := e^(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Elementary Math Functions + + Compute the natural logarithm of packed complex single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := ln(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Trigonometry + + Compute the cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := COS(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Trigonometry + + Compute the cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := COS(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Trigonometry + + Compute the cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := COSD(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Trigonometry + + Compute the cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := COSD(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Trigonometry + + Compute the hyperbolic cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := COSH(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Trigonometry + + Compute the hyperbolic cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := COSH(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Elementary Math Functions + + Compute the square root of packed complex single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := SQRT(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Integer + AVX + Arithmetic + + + Divide packed 8-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 31 + i := 8*j + dst[i+7:i] := TRUNCATE(a[i+7:i] / b[i+7:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Integer + AVX + Arithmetic + + + Divide packed 16-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 15 + i := 16*j + dst[i+15:i] := TRUNCATE(a[i+15:i] / b[i+15:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Integer + AVX + Arithmetic + + + Divide packed 32-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 7 + i := 32*j + dst[i+31:i] := TRUNCATE(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Integer + AVX + Arithmetic + + + Divide packed 64-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 3 + i := 64*j + dst[i+63:i] := TRUNCATE(a[i+63:i] / b[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Integer + AVX + Arithmetic + + + Divide packed unsigned 8-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 31 + i := 8*j + dst[i+7:i] := TRUNCATE(a[i+7:i] / b[i+7:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Integer + AVX + Arithmetic + + + Divide packed unsigned 16-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 15 + i := 16*j + dst[i+15:i] := TRUNCATE(a[i+15:i] / b[i+15:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Integer + AVX + Arithmetic + + + Divide packed unsigned 32-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 7 + i := 32*j + dst[i+31:i] := TRUNCATE(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Integer + AVX + Arithmetic + + + Divide packed unsigned 64-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 3 + i := 64*j + dst[i+63:i] := TRUNCATE(a[i+63:i] / b[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Probability/Statistics + + Compute the error function of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := ERF(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Probability/Statistics + + Compute the error function of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := ERF(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Probability/Statistics + + Compute the complementary error function of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := 1.0 - ERF(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Probability/Statistics + + Compute the complementary error function of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := 1.0 - ERF(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Probability/Statistics + + Compute the inverse complementary error function of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := 1.0 / (1.0 - ERF(a[i+63:i])) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Probability/Statistics + + Compute the inverse complementary error function of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := 1.0 / (1.0 - ERF(a[i+31:i])) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Probability/Statistics + + Compute the inverse error function of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := 1.0 / ERF(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Probability/Statistics + + Compute the inverse error function of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := 1.0 / ERF(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Elementary Math Functions + + Compute the exponential value of "e" raised to the power of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := e^(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Elementary Math Functions + + Compute the exponential value of "e" raised to the power of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := e^(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Elementary Math Functions + + Compute the exponential value of 10 raised to the power of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := 10^(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Elementary Math Functions + + Compute the exponential value of 10 raised to the power of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := 10^(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Elementary Math Functions + + Compute the exponential value of 2 raised to the power of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := 2^(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Elementary Math Functions + + Compute the exponential value of 2 raised to the power of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := 2^(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Elementary Math Functions + + Compute the exponential value of "e" raised to the power of packed double-precision (64-bit) floating-point elements in "a", subtract one from each element, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := e^(a[i+63:i]) - 1.0 +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Elementary Math Functions + + Compute the exponential value of "e" raised to the power of packed single-precision (32-bit) floating-point elements in "a", subtract one from each element, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := e^(a[i+31:i]) - 1.0 +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Trigonometry + + + Compute the length of the hypotenous of a right triangle, with the lengths of the other two sides of the triangle stored as packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := SQRT(a[i+63:i]^2 + b[i+63:i]^2) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Trigonometry + + + Compute the length of the hypotenous of a right triangle, with the lengths of the other two sides of the triangle stored as packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := SQRT(a[i+31:i]^2 + b[i+31:i]^2) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Integer + AVX + Arithmetic + + + Divide packed 32-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 7 + i := 32*j + dst[i+31:i] := TRUNCATE(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Integer + AVX + Arithmetic + + + + Divide packed 32-bit integers in "a" by packed elements in "b", store the truncated results in "dst", and store the remainders as packed 32-bit integers into memory at "mem_addr". + +FOR j := 0 to 7 + i := 32*j + dst[i+31:i] := TRUNCATE(a[i+31:i] / b[i+31:i]) + MEM[mem_addr+i+31:mem_addr+i] := REMAINDER(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Elementary Math Functions + + Compute the inverse cube root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := InvCubeRoot(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Elementary Math Functions + + Compute the inverse cube root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := InvCubeRoot(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Elementary Math Functions + + Compute the inverse square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := InvSQRT(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Elementary Math Functions + + Compute the inverse square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := InvSQRT(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Integer + AVX + Arithmetic + + + Divide packed 32-bit integers in "a" by packed elements in "b", and store the remainders as packed 32-bit integers in "dst". + +FOR j := 0 to 7 + i := 32*j + dst[i+31:i] := REMAINDER(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Elementary Math Functions + + Compute the natural logarithm of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := ln(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Elementary Math Functions + + Compute the natural logarithm of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := ln(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Elementary Math Functions + + Compute the base-10 logarithm of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := log10(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Elementary Math Functions + + Compute the base-10 logarithm of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := log10(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Elementary Math Functions + + Compute the natural logarithm of one plus packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := ln(1.0 + a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Elementary Math Functions + + Compute the natural logarithm of one plus packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := ln(1.0 + a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Elementary Math Functions + + Compute the base-2 logarithm of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := log2(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Elementary Math Functions + + Compute the base-2 logarithm of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := log2(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Elementary Math Functions + + Convert the exponent of each packed double-precision (64-bit) floating-point element in "a" to a double-precision floating-point number representing the integer exponent, and store the results in "dst". This intrinsic essentially calculates "floor(log2(x))" for each element. + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := ConvertExpFP64(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Elementary Math Functions + + Convert the exponent of each packed single-precision (32-bit) floating-point element in "a" to a single-precision floating-point number representing the integer exponent, and store the results in "dst". This intrinsic essentially calculates "floor(log2(x))" for each element. + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := ConvertExpFP32(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Elementary Math Functions + + + Compute the exponential value of packed double-precision (64-bit) floating-point elements in "a" raised by packed elements in "b", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := (a[i+63:i])^(b[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Elementary Math Functions + + + Compute the exponential value of packed single-precision (32-bit) floating-point elements in "a" raised by packed elements in "b", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := (a[i+31:i])^(b[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Integer + AVX + Arithmetic + + + Divide packed 8-bit integers in "a" by packed elements in "b", and store the remainders as packed 32-bit integers in "dst". + +FOR j := 0 to 31 + i := 8*j + dst[i+7:i] := REMAINDER(a[i+7:i] / b[i+7:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Integer + AVX + Arithmetic + + + Divide packed 16-bit integers in "a" by packed elements in "b", and store the remainders as packed 32-bit integers in "dst". + +FOR j := 0 to 15 + i := 16*j + dst[i+15:i] := REMAINDER(a[i+15:i] / b[i+15:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Integer + AVX + Arithmetic + + + Divide packed 32-bit integers in "a" by packed elements in "b", and store the remainders as packed 32-bit integers in "dst". + +FOR j := 0 to 7 + i := 32*j + dst[i+31:i] := REMAINDER(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Integer + AVX + Arithmetic + + + Divide packed 64-bit integers in "a" by packed elements in "b", and store the remainders as packed 32-bit integers in "dst". + +FOR j := 0 to 3 + i := 64*j + dst[i+63:i] := REMAINDER(a[i+63:i] / b[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Integer + AVX + Arithmetic + + + Divide packed unsigned 8-bit integers in "a" by packed elements in "b", and store the remainders as packed unsigned 32-bit integers in "dst". + +FOR j := 0 to 31 + i := 8*j + dst[i+7:i] := REMAINDER(a[i+7:i] / b[i+7:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Integer + AVX + Arithmetic + + + Divide packed unsigned 16-bit integers in "a" by packed elements in "b", and store the remainders as packed unsigned 32-bit integers in "dst". + +FOR j := 0 to 15 + i := 16*j + dst[i+15:i] := REMAINDER(a[i+15:i] / b[i+15:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Integer + AVX + Arithmetic + + + Divide packed unsigned 32-bit integers in "a" by packed elements in "b", and store the remainders as packed unsigned 32-bit integers in "dst". + +FOR j := 0 to 7 + i := 32*j + dst[i+31:i] := REMAINDER(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Integer + AVX + Arithmetic + + + Divide packed unsigned 64-bit integers in "a" by packed elements in "b", and store the remainders as packed unsigned 32-bit integers in "dst". + +FOR j := 0 to 3 + i := 64*j + dst[i+63:i] := REMAINDER(a[i+63:i] / b[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Trigonometry + + Compute the sine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := SIN(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Trigonometry + + Compute the sine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := SIN(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Trigonometry + + + Compute the sine and cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, store the sine in "dst", and store the cosine into memory at "mem_addr". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := SIN(a[i+63:i]) + MEM[mem_addr+i+63:mem_addr+i] := COS(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Trigonometry + + + Compute the sine and cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, store the sine in "dst", and store the cosine into memory at "mem_addr". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := SIN(a[i+31:i]) + MEM[mem_addr+i+31:mem_addr+i] := COS(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Trigonometry + + Compute the sine of packed double-precision (64-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := SIND(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Trigonometry + + Compute the sine of packed single-precision (32-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := SIND(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Trigonometry + + Compute the hyperbolic sine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := SINH(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Trigonometry + + Compute the hyperbolic sine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := SINH(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Special Math Functions + + Round the packed double-precision (64-bit) floating-point elements in "a" up to an integer value, and store the results as packed double-precision floating-point elements in "dst". This intrinsic may generate the "roundpd"/"vroundpd" instruction. + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := CEIL(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Special Math Functions + + Round the packed single-precision (32-bit) floating-point elements in "a" up to an integer value, and store the results as packed single-precision floating-point elements in "dst". This intrinsic may generate the "roundps"/"vroundps" instruction. + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := CEIL(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Special Math Functions + + Round the packed double-precision (64-bit) floating-point elements in "a" down to an integer value, and store the results as packed double-precision floating-point elements in "dst". This intrinsic may generate the "roundpd"/"vroundpd" instruction. + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := FLOOR(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Special Math Functions + + Round the packed single-precision (32-bit) floating-point elements in "a" down to an integer value, and store the results as packed single-precision floating-point elements in "dst". This intrinsic may generate the "roundps"/"vroundps" instruction. + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := FLOOR(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Special Math Functions + + Round the packed double-precision (64-bit) floating-point elements in "a" to the nearest integer value, and store the results as packed double-precision floating-point elements in "dst". This intrinsic may generate the "roundpd"/"vroundpd" instruction. + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := ROUND(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Special Math Functions + + Round the packed single-precision (32-bit) floating-point elements in "a" to the nearest integer value, and store the results as packed single-precision floating-point elements in "dst". This intrinsic may generate the "roundps"/"vroundps" instruction. + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := ROUND(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Elementary Math Functions + + Compute the square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". Note that this intrinsic is less efficient than "_mm_sqrt_pd". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := SQRT(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Elementary Math Functions + + Compute the square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". Note that this intrinsic is less efficient than "_mm_sqrt_ps". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := SQRT(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Trigonometry + + Compute the tangent of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := TAN(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Trigonometry + + Compute the tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := TAN(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Trigonometry + + Compute the tangent of packed double-precision (64-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := TAND(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Trigonometry + + Compute the tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := TAND(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Trigonometry + + Compute the hyperbolic tangent of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := TANH(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Trigonometry + + Compute the hyperbolic tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := TANH(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Miscellaneous + + Truncate the packed double-precision (64-bit) floating-point elements in "a", and store the results as packed double-precision floating-point elements in "dst". This intrinsic may generate the "roundpd"/"vroundpd" instruction. + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := TRUNCATE(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + AVX + Miscellaneous + + Truncate the packed single-precision (32-bit) floating-point elements in "a", and store the results as packed single-precision floating-point elements in "dst". This intrinsic may generate the "roundps"/"vroundps" instruction. + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := TRUNCATE(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Integer + AVX + Arithmetic + + + Divide packed unsigned 32-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 7 + i := 32*j + dst[i+31:i] := TRUNCATE(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Integer + AVX + Arithmetic + + + + Divide packed unsigned 32-bit integers in "a" by packed elements in "b", store the truncated results in "dst", and store the remainders as packed unsigned 32-bit integers into memory at "mem_addr". + +FOR j := 0 to 7 + i := 32*j + dst[i+31:i] := TRUNCATE(a[i+31:i] / b[i+31:i]) + MEM[mem_addr+i+31:mem_addr+i] := REMAINDER(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Integer + AVX + Arithmetic + + + Divide packed unsigned 32-bit integers in "a" by packed elements in "b", and store the remainders as packed unsigned 32-bit integers in "dst". + +FOR j := 0 to 7 + i := 32*j + dst[i+31:i] := REMAINDER(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Integer + POPCNT + Bit Manipulation + + + Count the number of bits set to 1 in 32-bit integer "a", and return that count in "dst". + + +dst := 0 +FOR i := 0 to 31 + IF a[i] + dst := dst + 1 + FI +ENDFOR + + +
immintrin.h
+
+ + Integer + POPCNT + Bit Manipulation + + + Count the number of bits set to 1 in 64-bit integer "a", and return that count in "dst". + + +dst := 0 +FOR i := 0 to 63 + IF a[i] + dst := dst + 1 + FI +ENDFOR + + +
immintrin.h
+
+ + General Support + + Read the Performance Monitor Counter (PMC) specified by "a", and store up to 64-bits in "dst". The width of performance counters is implementation specific. + +dst[63:0] := ReadPMC(a) + + +
immintrin.h
+
+ + TSC + General Support + + Copy the current 64-bit value of the processor's time-stamp counter into "dst". + +dst[63:0] := TimeStampCounter + + +
immintrin.h
+
+ + Integer + Shift + + + Shift the bits of unsigned 32-bit integer "a" left by the number of bits specified in "shift", rotating the most-significant bit to the least-significant bit location, and store the unsigned result in "dst". + +dst := a +count := shift BITWISE AND 31 +DO WHILE (count > 0) + tmp[0] := dst[31] + dst := (dst << 1) OR tmp[0] + count := count - 1 +OD + + +
immintrin.h
+
+ + Integer + Shift + + + Shift the bits of unsigned 32-bit integer "a" right by the number of bits specified in "shift", rotating the least-significant bit to the most-significant bit location, and store the unsigned result in "dst". + +dst := a +count := shift BITWISE AND 31 +DO WHILE (count > 0) + tmp[31] := dst[0] + dst := (dst >> 1) OR tmp[31] + count := count - 1 +OD + + +
immintrin.h
+
+ + Integer + Shift + + + Shift the bits of unsigned 16-bit integer "a" left by the number of bits specified in "shift", rotating the most-significant bit to the least-significant bit location, and store the unsigned result in "dst". + +dst := a +count := shift BITWISE AND 15 +DO WHILE (count > 0) + tmp[0] := dst[15] + dst := (dst << 1) OR tmp[0] + count := count - 1 +OD + + +
immintrin.h
+
+ + Integer + Shift + + + Shift the bits of unsigned 16-bit integer "a" right by the number of bits specified in "shift", rotating the least-significant bit to the most-significant bit location, and store the unsigned result in "dst". + +dst := a +count := shift BITWISE AND 15 +DO WHILE (count > 0) + tmp[15] := dst[0] + dst := (dst >> 1) OR tmp[15] + count := count - 1 +OD + + +
immintrin.h
+
+ + Integer + Shift + + + Shift the bits of unsigned 64-bit integer "a" left by the number of bits specified in "shift", rotating the most-significant bit to the least-significant bit location, and store the unsigned result in "dst". + +dst := a +count := shift BITWISE AND 63 +DO WHILE (count > 0) + tmp[0] := dst[63] + dst := (dst << 1) OR tmp[0] + count := count - 1 +OD + + +
immintrin.h
+
+ + Integer + Shift + + + Shift the bits of unsigned 64-bit integer "a" right by the number of bits specified in "shift", rotating the least-significant bit to the most-significant bit location, and store the unsigned result in "dst". + +dst := a +count := shift BITWISE AND 63 +DO WHILE (count > 0) + tmp[63] := dst[0] + dst := (dst >> 1) OR tmp[63] + count := count - 1 +OD + + +
immintrin.h
+
+ + XSAVE + OS-Targeted + + Copy up to 64-bits from the value of the extended control register (XCR) specified by "a" into "dst". Currently only XFEATURE_ENABLED_MASK XCR is supported. + +dst[63:0] := XCR[a] + + +
immintrin.h
+
+ + XSAVE + OS-Targeted + + + Perform a full or partial restore of the enabled processor states using the state information stored in memory at "mem_addr". State is restored based on bits [62:0] in "rs_mask", "XCR0", and "mem_addr.HEADER.XSTATE_BV". "mem_addr" must be aligned on a 64-byte boundary. + +st_mask = mem_addr.HEADER.XSTATE_BV[62:0] +FOR i := 0 to 62 + IF (rs_mask[i] AND XCR0[i]) + IF st_mask[i] + CASE (i) OF + 0: ProcessorState[x87 FPU] := mem_addr.FPUSSESave_Area[FPU] + 1: ProcessorState[SSE] := mem_addr.FPUSSESaveArea[SSE] + DEFAULT: ProcessorState[i] := mem_addr.Ext_Save_Area[i] + ESAC + ELSE + // ProcessorExtendedState := Processor Supplied Values + CASE (i) OF + 1: MXCSR := mem_addr.FPUSSESave_Area[SSE] + ESAC + FI + FI + i := i + 1 +ENDFOR + + +
immintrin.h
+
+ + XSAVE + OS-Targeted + + + Perform a full or partial restore of the enabled processor states using the state information stored in memory at "mem_addr". State is restored based on bits [62:0] in "rs_mask", "XCR0", and "mem_addr.HEADER.XSTATE_BV". "mem_addr" must be aligned on a 64-byte boundary. + +st_mask = mem_addr.HEADER.XSTATE_BV[62:0] +FOR i := 0 to 62 + IF (rs_mask[i] AND XCR0[i]) + IF st_mask[i] + CASE (i) OF + 0: ProcessorState[x87 FPU] := mem_addr.FPUSSESave_Area[FPU] + 1: ProcessorState[SSE] := mem_addr.FPUSSESaveArea[SSE] + DEFAULT: ProcessorState[i] := mem_addr.Ext_Save_Area[i] + ESAC + ELSE + // ProcessorExtendedState := Processor Supplied Values + CASE (i) OF + 1: MXCSR := mem_addr.FPUSSESave_Area[SSE] + ESAC + FI + FI + i := i + 1 +ENDFOR + + +
immintrin.h
+
+ + XSAVE + OS-Targeted + + + Perform a full or partial save of the enabled processor states to memory at "mem_addr". State is saved based on bits [62:0] in "save_mask" and "XCR0". "mem_addr" must be aligned on a 64-byte boundary. + +mask[62:0] := save_mask[62:0] BITWISE AND XCR0[62:0] +FOR i := 0 to 62 + IF mask[i] + CASE (i) OF + 0: mem_addr.FPUSSESave_Area[FPU] := ProcessorState[x87 FPU] + 1: mem_addr.FPUSSESaveArea[SSE] := ProcessorState[SSE] + DEFAULT: mem_addr.Ext_Save_Area[i] := ProcessorState[i] + ESAC + mem_addr.HEADER.XSTATE_BV[i] := INIT_FUNCTION[i] + FI + i := i + 1 +ENDFOR + + +
immintrin.h
+
+ + XSAVE + OS-Targeted + + + Perform a full or partial save of the enabled processor states to memory at "mem_addr". State is saved based on bits [62:0] in "save_mask" and "XCR0". "mem_addr" must be aligned on a 64-byte boundary. + +mask[62:0] := save_mask[62:0] BITWISE AND XCR0[62:0] +FOR i := 0 to 62 + IF mask[i] + CASE (i) OF + 0: mem_addr.FPUSSESave_Area[FPU] := ProcessorState[x87 FPU] + 1: mem_addr.FPUSSESaveArea[SSE] := ProcessorState[SSE] + DEFAULT: mem_addr.Ext_Save_Area[i] := ProcessorState[i] + ESAC + mem_addr.HEADER.XSTATE_BV[i] := INIT_FUNCTION[i] + FI + i := i + 1 +ENDFOR + + +
immintrin.h
+
+ + XSAVE + XSAVEOPT + OS-Targeted + + + Perform a full or partial save of the enabled processor states to memory at "mem_addr". State is saved based on bits [62:0] in "save_mask" and "XCR0". "mem_addr" must be aligned on a 64-byte boundary. The hardware may optimize the manner in which data is saved. The performance of this instruction will be equal to or better than using the XSAVE instruction. + +mask[62:0] := save_mask[62:0] BITWISE AND XCR0[62:0] +FOR i := 0 to 62 + IF mask[i] + CASE (i) OF + 0: mem_addr.FPUSSESave_Area[FPU] := ProcessorState[x87 FPU] + 1: mem_addr.FPUSSESaveArea[SSE] := ProcessorState[SSE] + 2: mem_addr.EXT_SAVE_Area2[YMM] := ProcessorState[YMM] + DEFAULT: mem_addr.Ext_Save_Area[i] := ProcessorState[i] + ESAC + mem_addr.HEADER.XSTATE_BV[i] := INIT_FUNCTION[i] + FI + i := i + 1 +ENDFOR + + +
immintrin.h
+
+ + XSAVE + XSAVEOPT + OS-Targeted + + + Perform a full or partial save of the enabled processor states to memory at "mem_addr". State is saved based on bits [62:0] in "save_mask" and "XCR0". "mem_addr" must be aligned on a 64-byte boundary. The hardware may optimize the manner in which data is saved. The performance of this instruction will be equal to or better than using the XSAVE64 instruction. + +mask[62:0] := save_mask[62:0] BITWISE AND XCR0[62:0] +FOR i := 0 to 62 + IF mask[i] + CASE (i) OF + 0: mem_addr.FPUSSESave_Area[FPU] := ProcessorState[x87 FPU] + 1: mem_addr.FPUSSESaveArea[SSE] := ProcessorState[SSE] + 2: mem_addr.EXT_SAVE_Area2[YMM] := ProcessorState[YMM] + DEFAULT: mem_addr.Ext_Save_Area[i] := ProcessorState[i] + ESAC + mem_addr.HEADER.XSTATE_BV[i] := INIT_FUNCTION[i] + FI + i := i + 1 +ENDFOR + + +
immintrin.h
+
+ + XSAVE + OS-Targeted + + + Copy 64-bits from "val" to the extended control register (XCR) specified by "a". Currently only XFEATURE_ENABLED_MASK XCR is supported. + +XCR[a] := val[63:0] + + +
immintrin.h
+
+ + + + + Integer + SSE2 + Load + + Load unaligned 32-bit integer from memory into the first element of "dst". + +dst[31:0] := MEM[mem_addr+31:mem_addr] +dst[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + SSE2 + Store + + + Store 32-bit integer from the first element of "a" into memory. "mem_addr" does not need to be aligned on any particular boundary. + +MEM[mem_addr+31:mem_addr] := a[31:0] + + +
immintrin.h
+
+ + Integer + SSE + Store + + + Store 16-bit integer from the first element of "a" into memory. "mem_addr" does not need to be aligned on any particular boundary. + +MEM[mem_addr+15:mem_addr] := a[15:0] + +
immintrin.h
+
+ + Integer + SSE + Load + + Load unaligned 64-bit integer from memory into the first element of "dst". + +dst[63:0] := MEM[mem_addr+63:mem_addr] +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + SSE + Store + + + Store 64-bit integer from the first element of "a" into memory. "mem_addr" does not need to be aligned on any particular boundary. + +MEM[mem_addr+63:mem_addr] := a[63:0] + + +
immintrin.h
+
+ + Integer + SSE + Load + + Load unaligned 16-bit integer from memory into the first element of "dst". + +dst[15:0] := MEM[mem_addr+15:mem_addr] +dst[MAX:16] := 0 + +
immintrin.h
+
+ + Integer + FSGSBASE + General Support + Read the FS segment base register and store the 32-bit result in "dst". + +dst[31:0] := FS_Segment_Base_Register; +dst[63:32] := 0 + + +
immintrin.h
+
+ + Integer + FSGSBASE + General Support + Read the FS segment base register and store the 64-bit result in "dst". + +dst[63:0] := FS_Segment_Base_Register; + + +
immintrin.h
+
+ + Integer + FSGSBASE + General Support + Read the GS segment base register and store the 32-bit result in "dst". + +dst[31:0] := GS_Segment_Base_Register; +dst[63:32] := 0 + + +
immintrin.h
+
+ + Integer + FSGSBASE + General Support + Read the GS segment base register and store the 64-bit result in "dst". + +dst[63:0] := GS_Segment_Base_Register; + + +
immintrin.h
+
+ + Integer + RDRAND + Random + + Read a hardware generated 16-bit random value and store the result in "val". Return 1 if a random value was generated, and 0 otherwise. + + +IF HW_RND_GEN.ready = 1 + val[15:0] := HW_RND_GEN.data; + RETURN 1; +ELSE + val[15:0] := 0; + RETURN 0; +FI + + +
immintrin.h
+
+ + Integer + RDRAND + Random + + Read a hardware generated 32-bit random value and store the result in "val". Return 1 if a random value was generated, and 0 otherwise. + + +IF HW_RND_GEN.ready = 1 + val[31:0] := HW_RND_GEN.data; + RETURN 1; +ELSE + val[31:0] := 0; + RETURN 0; +FI + + +
immintrin.h
+
+ + Integer + RDRAND + Random + + Read a hardware generated 64-bit random value and store the result in "val". Return 1 if a random value was generated, and 0 otherwise. + + +IF HW_RND_GEN.ready = 1 + val[63:0] := HW_RND_GEN.data; + RETURN 1; +ELSE + val[63:0] := 0; + RETURN 0; +FI + + +
immintrin.h
+
+ + Integer + FSGSBASE + General Support + + Write the unsigned 32-bit integer "a" to the FS segment base register. + +FS_Segment_Base_Register[31:0] := a[31:0]; +FS_Segment_Base_Register[63:32] := 0 + + +
immintrin.h
+
+ + Integer + FSGSBASE + General Support + + Write the unsigned 64-bit integer "a" to the FS segment base register. + +FS_Segment_Base_Register[63:0] := a[63:0]; + + +
immintrin.h
+
+ + Integer + FSGSBASE + General Support + + Write the unsigned 32-bit integer "a" to the GS segment base register. + +GS_Segment_Base_Register[31:0] := a[31:0]; +GS_Segment_Base_Register[63:32] := 0 + + +
immintrin.h
+
+ + Integer + FSGSBASE + General Support + + Write the unsigned 64-bit integer "a" to the GS segment base register. + +GS_Segment_Base_Register[63:0] := a[63:0]; + + +
immintrin.h
+
+ + Floating Point + FP16C + Convert + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + m := j*16 + dst[i+31:i] := Convert_FP16_To_FP32(a[m+15:m]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + FP16C + Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst". + [round_note] + + +FOR j := 0 to 7 + i := 16*j + l := 32*j + dst[i+15:i] := Convert_FP32_To_FP16FP(a[l+31:l]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + RDSEED + Random + + Read a 16-bit NIST SP800-90B and SP800-90C compliant random value and store in "val". Return 1 if a random value was generated, and 0 otherwise. + +IF HW_NRND_GEN.ready = 1 THEN + val[15:0] := HW_NRND_GEN.data + RETURN 1 +ELSE + val[15:0] := 0 + RETURN 0 +FI + + +
immintrin.h
+
+ + RDSEED + Random + + Read a 32-bit NIST SP800-90B and SP800-90C compliant random value and store in "val". Return 1 if a random value was generated, and 0 otherwise. + +IF HW_NRND_GEN.ready = 1 THEN + val[31:0] := HW_NRND_GEN.data + RETURN 1 +ELSE + val[31:0] := 0 + RETURN 0 +FI + + +
immintrin.h
+
+ + RDSEED + Random + + Read a 64-bit NIST SP800-90B and SP800-90C compliant random value and store in "val". Return 1 if a random value was generated, and 0 otherwise. + +IF HW_NRND_GEN.ready = 1 THEN + val[63:0] := HW_NRND_GEN.data + RETURN 1 +ELSE + val[63:0] := 0 + RETURN 0 +FI + + +
immintrin.h
+
+ + Integer + Arithmetic + + + + + Add unsigned 32-bit integers "a" and "b" with unsigned 8-bit carry-in "c_in" (carry flag), and store the unsigned 32-bit result in "out", and the carry-out in "dst" (carry or overflow flag). + +dst:out[31:0] := a[31:0] + b[31:0] + c_in; + + +
immintrin.h
+
+ + Integer + Arithmetic + + + + + Add unsigned 64-bit integers "a" and "b" with unsigned 8-bit carry-in "c_in" (carry flag), and store the unsigned 64-bit result in "out", and the carry-out in "dst" (carry or overflow flag). + +dst:out[63:0] := a[63:0] + b[63:0] + c_in; + + +
immintrin.h
+
+ + Integer + Arithmetic + + + + + Add unsigned 8-bit borrow "b_in" (carry flag) to unsigned 32-bit integer "b", and subtract the result from unsigned 32-bit integer "a". Store the unsigned 32-bit result in "out", and the carry-out in "dst" (carry or overflow flag). + +dst:out[31:0] := (a[31:0] - (b[31:0] + b_in)); + + +
immintrin.h
+
+ + Integer + Arithmetic + + + + + Add unsigned 8-bit borrow "b_in" (carry flag) to unsigned 64-bit integer "b", and subtract the result from unsigned 64-bit integer "a". Store the unsigned 64-bit result in "out", and the carry-out in "dst" (carry or overflow flag). + +dst:out[63:0] := (a[63:0] - (b[63:0] + b_in)); + + +
immintrin.h
+
+ + Integer + ADX + Arithmetic + + + + + Add unsigned 32-bit integers "a" and "b" with unsigned 8-bit carry-in "c_in" (carry or overflow flag), and store the unsigned 32-bit result in "out", and the carry-out in "dst" (carry or overflow flag). + +dst:out[31:0] := a[31:0] + b[31:0] + c_in; + + + +
immintrin.h
+
+ + Integer + ADX + Arithmetic + + + + + Add unsigned 64-bit integers "a" and "b" with unsigned 8-bit carry-in "c_in" (carry or overflow flag), and store the unsigned 64-bit result in "out", and the carry-out in "dst" (carry or overflow flag). + +dst:out[63:0] := a[63:0] + b[63:0] + c_in; + + + +
immintrin.h
+
+ + Integer + SHA + Cryptography + + + Perform an intermediate calculation for the next four SHA1 message values (unsigned 32-bit integers) using previous message values from "a" and "b", and store the result in "dst". + +W0 := a[127:96]; +W1 := a[95:64]; +W2 := a[63:32]; +W3 := a[31:0]; +W4 := b[127:96]; +W5 := b[95:64]; + +dst[127:96] := W2 XOR W0; +dst[95:64] := W3 XOR W1; +dst[63:32] := W4 XOR W2; +dst[31:0] := W5 XOR W3; + + +
immintrin.h
+
+ + Integer + SHA + Cryptography + + + Perform the final calculation for the next four SHA1 message values (unsigned 32-bit integers) using the intermediate result in "a" and the previous message values in "b", and store the result in "dst". + +W13 := b[95:64]; +W14 := b[63:32]; +W15 := b[31:0]; +W16 := (a[127:96] XOR W13) <<< 1; +W17 := (a[95:64] XOR W14) <<< 1; +W18 := (a[63:32] XOR W15) <<< 1; +W19 := (a[31:0] XOR W16) <<< 1; + +dst[127:96] := W16; +dst[95:64] := W17; +dst[63:32] := W18; +dst[31:0] := W19; + + +
immintrin.h
+
+ + Integer + SHA + Cryptography + + + Calculate SHA1 state variable E after four rounds of operation from the current SHA1 state variable "a", add that value to the scheduled values (unsigned 32-bit integers) in "b", and store the result in "dst". + +tmp := (a[127:96] <<< 30); +dst[127:96] := b[127:96] + tmp; +dst[95:64] := b[95:64]; +dst[63:32] := b[63:32]; +dst[31:0] := b[31:0]; + + +
immintrin.h
+
+ + Integer + SHA + Cryptography + + + + Perform four rounds of SHA1 operation using an initial SHA1 state (A,B,C,D) from "a" and some pre-computed sum of the next 4 round message values (unsigned 32-bit integers), and state variable E from "b", and store the updated SHA1 state (A,B,C,D) in "dst". "func" contains the logic functions and round constants. + +IF (func[1:0] = 0) THEN + f() := f0(), K := K0; +ELSE IF (func[1:0] = 1) THEN + f() := f1(), K := K1; +ELSE IF (func[1:0] = 2) THEN + f() := f2(), K := K2; +ELSE IF (func[1:0] = 3) THEN + f() := f3(), K := K3; +FI; + +A := a[127:96]; +B := a[95:64]; +C := a[63:32]; +D := a[31:0]; + +W[0] := b[127:96]; +W[1] := b[95:64]; +W[2] := b[63:32]; +W[3] := b[31:0]; + +A[1] := f(B, C, D) + (A <<< 5) + W[0] + K; +B[1] := A; +C[1] := B <<< 30; +D[1] := C; +E[1] := D; + +FOR i = 1 to 3 + A[i+1] := f(B[i], C[i], D[i]) + (A[i] <<< 5) + W[i] + E[i] + K; + B[i+1] := A[i]; + C[i+1] := B[i] <<< 30; + D[i+1] := C[i]; + E[i+1] := D[i]; +ENDFOR; + +dst[127:96] := A[4]; +dst[95:64] := B[4]; +dst[63:32] := C[4]; +dst[31:0] := D[4]; + + +
immintrin.h
+
+ + Integer + SHA + Cryptography + + + Perform an intermediate calculation for the next four SHA256 message values (unsigned 32-bit integers) using previous message values from "a" and "b", and store the result in "dst". + +W4 := b[31:0]; +W3 := a[127:96]; +W2 := a[95:64]; +W1 := a[63:32]; +W0 := a[31:0]; + +dst[127:96] := W3 + sigma0(W4); +dst[95:64] := W2 + sigma0(W3); +dst[63:32] := W1 + sigma0(W2); +dst[31:0] := W0 + sigma0(W1); + + +
immintrin.h
+
+ + Integer + SHA + Cryptography + + + Perform the final calculation for the next four SHA256 message values (unsigned 32-bit integers) using previous message values from "a" and "b", and store the result in "dst"." + +W14 := b[95:64]; +W15 := b[127:96]; +W16 := a[31:0] + sigma1(W14); +W17 := a[63:32] + sigma1(W15); +W18 := a[95:64] + sigma1(W16); +W19 := a[127:96] + sigma1(W17); + +dst[127:96] := W19; +dst[95:64] := W18; +dst[63:32] := W17; +dst[31:0] := W16; + + +
immintrin.h
+
+ + Integer + SHA + Cryptography + + + + Perform 2 rounds of SHA256 operation using an initial SHA256 state (C,D,G,H) from "a", an initial SHA256 state (A,B,E,F) from "b", and a pre-computed sum of the next 2 round message values (unsigned 32-bit integers) and the corresponding round constants from "k", and store the updated SHA256 state (A,B,E,F) in "dst". + +A[0] := b[127:96]; +B[0] := b[95:64]; +C[0] := a[127:96]; +D[0] := a[95:64]; +E[0] := b[63:32]; +F[0] := b[31:0]; +G[0] := a[63:32]; +H[0] := a[31:0]; + +W_K0 := k[31:0]; +W_K1 := k[63:32]; + +FOR i = 0 to 1 + A_(i+1) := Ch(E[i], F[i], G[i]) + sum1(E[i]) + WKi + H[i] + Maj(A[i], B[i], C[i]) + sum0(A[i]); + B_(i+1) := A[i]; + C_(i+1) := B[i]; + D_(i+1) := C[i]; + E_(i+1) := Ch(E[i], F[i], G[i]) + sum1(E[i]) + WKi + H[i] + D[i]; + F_(i+1) := E[i]; + G_(i+1) := F[i]; + H_(i+1) := G[i]; +ENDFOR; + +dst[127:96] := A[2]; +dst[95:64] := B[2]; +dst[63:32] := E[2]; +dst[31:0] := F[2]; + + +
immintrin.h
+
+ + MPX + Miscellaneous + + + Make a pointer with the value of "srcmem" and bounds set to ["srcmem", "srcmem" + "size" - 1], and store the result in "dst". + +dst := srcmem; +dst.LB := srcmem.LB; +dst.UB := srcmem + size - 1; + + +
immintrin.h
+
+ + MPX + Miscellaneous + + + + Narrow the bounds for pointer "q" to the intersection of the bounds of "r" and the bounds ["q", "q" + "size" - 1], and store the result in "dst". + +dst := q; +IF r.LB > (q + size - 1) OR r.UB < q THEN + dst.LB := 1; + dst.UB := 0; +ELSE + dst.LB := MAX(r.LB, q); + dst.UB := MIN(r.UB, (q + size - 1)); +FI; + +
immintrin.h
+
+ + MPX + Miscellaneous + + + Make a pointer with the value of "q" and bounds set to the bounds of "r" (e.g. copy the bounds of "r" to pointer "q"), and store the result in "dst". + +dst := q; +dst.LB := r.LB; +dst.UB := r.UB; + +
immintrin.h
+
+ + MPX + Miscellaneous + + Make a pointer with the value of "q" and open bounds, which allow the pointer to access the entire virtual address space, and store the result in "dst". + +dst := q; +dst.LB := 0; +dst.UB := 0; + +
immintrin.h
+
+ + MPX + Miscellaneous + + + Stores the bounds of "ptr_val" pointer in memory at address "ptr_addr". + +MEM[ptr_addr].LB := ptr_val.LB; +MEM[ptr_addr].UB := ptr_val.UB; + + +
immintrin.h
+
+ + MPX + Miscellaneous + + Checks if "q" is within its lower bound, and throws a #BR if not. + +IF q < q.LB THEN + #BR; +FI; + + +
immintrin.h
+
+ + MPX + Miscellaneous + + Checks if "q" is within its upper bound, and throws a #BR if not. + +IF q > q.UB THEN + #BR; +FI; + + + +
immintrin.h
+
+ + MPX + Miscellaneous + + + Checks if ["q", "q" + "size" - 1] is within the lower and upper bounds of "q" and throws a #BR if not. + +IF (q + size - 1) < q.LB OR (q + size - 1) > q.UB THEN + #BR; +FI; + + + +
immintrin.h
+
+ + MPX + Miscellaneous + + Return the lower bound of "q". + +dst := q.LB + +
immintrin.h
+
+ + MPX + Miscellaneous + + Return the upper bound of "q". + +dst := q.UB + +
immintrin.h
+
+ + Load + + Loads a big-endian word (16-bit) value from address "ptr" and stores the result in "dst". + + +addr := MEM[ptr] +FOR j := 0 to 1 + i := j*8 + dst[i+7:i] := addr[15-i:15-i-7] +ENDFOR + +
immintrin.h
+
+ + Load + + Loads a big-endian double word (32-bit) value from address "ptr" and stores the result in "dst". + + +addr := MEM[ptr] +FOR j := 0 to 4 + i := j*8 + dst[i+7:i] := addr[31-i:31-i-7] +ENDFOR + +
immintrin.h
+
+ + Load + + Loads a big-endian quad word (64-bit) value from address "ptr" and stores the result in "dst". + + +addr := MEM[ptr] +FOR j := 0 to 8 + i := j*8 + dst[i+7:i] := addr[63-i:63-i-7] +ENDFOR + +
immintrin.h
+
+ + Store + + + Stores word-sized (16-bit) "data" to address "ptr" in big-endian format. + + +addr := MEM[ptr] +FOR j := 0 to 1 + i := j*8 + addr[i+7:i] := data[15-i:15-i-7] +ENDFOR + +
immintrin.h
+
+ + Store + + + Stores double word-sized (32-bit) "data" to address "ptr" in big-endian format. + + +addr := MEM[ptr] +FOR j := 0 to 4 + i := j*8 + addr[i+7:i] := data[31-i:31-i-7] +ENDFOR + +
immintrin.h
+
+ + Store + + + Stores quad word-sized (64-bit) "data" to address "ptr" in big-endian format. + + +addr := MEM[ptr] +FOR j := 0 to 7 + i := j*8 + addr[i+7:i] := data[63-i:63-i-7] +ENDFOR + +
immintrin.h
+
+ + XSAVE + XSAVEC + OS-Targeted + + + Perform a full or partial save of the enabled processor states to memory at "mem_addr"; xsavec differs from xsave in that it uses compaction and that it may use init optimization. State is saved based on bits [62:0] in "save_mask" and "XCR0". "mem_addr" must be aligned on a 64-byte boundary. + +mask[62:0] := save_mask[62:0] BITWISE AND XCR0[62:0] +FOR i := 0 to 62 + IF mask[i] + CASE (i) OF + 0: mem_addr.FPUSSESave_Area[FPU] := ProcessorState[x87 FPU] + 1: mem_addr.FPUSSESaveArea[SSE] := ProcessorState[SSE] + DEFAULT: mem_addr.Ext_Save_Area[i] := ProcessorState[i] + ESAC + mem_addr.HEADER.XSTATE_BV[i] := INIT_FUNCTION[i] + FI + i := i + 1 +ENDFOR + + +
immintrin.h
+
+ + XSAVE + XSS + OS-Targeted + + + Perform a full or partial save of the enabled processor states to memory at "mem_addr"; xsaves differs from xsave in that it can save state components corresponding to bits set in IA32_XSS MSR and that it may use the modified optimization. State is saved based on bits [62:0] in "save_mask" and "XCR0". "mem_addr" must be aligned on a 64-byte boundary. + +mask[62:0] := save_mask[62:0] BITWISE AND XCR0[62:0] +FOR i := 0 to 62 + IF mask[i] + CASE (i) OF + 0: mem_addr.FPUSSESave_Area[FPU] := ProcessorState[x87 FPU] + 1: mem_addr.FPUSSESaveArea[SSE] := ProcessorState[SSE] + DEFAULT: mem_addr.Ext_Save_Area[i] := ProcessorState[i] + ESAC + mem_addr.HEADER.XSTATE_BV[i] := INIT_FUNCTION[i] + FI + i := i + 1 +ENDFOR + + +
immintrin.h
+
+ + XSAVE + XSAVEC + OS-Targeted + + + Perform a full or partial save of the enabled processor states to memory at "mem_addr"; xsavec differs from xsave in that it uses compaction and that it may use init optimization. State is saved based on bits [62:0] in "save_mask" and "XCR0". "mem_addr" must be aligned on a 64-byte boundary. + +mask[62:0] := save_mask[62:0] BITWISE AND XCR0[62:0] +FOR i := 0 to 62 + IF mask[i] + CASE (i) OF + 0: mem_addr.FPUSSESave_Area[FPU] := ProcessorState[x87 FPU] + 1: mem_addr.FPUSSESaveArea[SSE] := ProcessorState[SSE] + DEFAULT: mem_addr.Ext_Save_Area[i] := ProcessorState[i] + ESAC + mem_addr.HEADER.XSTATE_BV[i] := INIT_FUNCTION[i] + FI + i := i + 1 +ENDFOR + + +
immintrin.h
+
+ + XSAVE + XSS + OS-Targeted + + + Perform a full or partial save of the enabled processor states to memory at "mem_addr"; xsaves differs from xsave in that it can save state components corresponding to bits set in IA32_XSS MSR and that it may use the modified optimization. State is saved based on bits [62:0] in "save_mask" and "XCR0". "mem_addr" must be aligned on a 64-byte boundary. + +mask[62:0] := save_mask[62:0] BITWISE AND XCR0[62:0] +FOR i := 0 to 62 + IF mask[i] + CASE (i) OF + 0: mem_addr.FPUSSESave_Area[FPU] := ProcessorState[x87 FPU] + 1: mem_addr.FPUSSESaveArea[SSE] := ProcessorState[SSE] + DEFAULT: mem_addr.Ext_Save_Area[i] := ProcessorState[i] + ESAC + mem_addr.HEADER.XSTATE_BV[i] := INIT_FUNCTION[i] + FI + i := i + 1 +ENDFOR + + +
immintrin.h
+
+ + XSAVE + XSS + OS-Targeted + + + Perform a full or partial restore of the enabled processor states using the state information stored in memory at "mem_addr". xrstors differs from xrstor in that it can restore state components corresponding to bits set in the IA32_XSS MSR; xrstors cannot restore from an xsave area in which the extended region is in the standard form. State is restored based on bits [62:0] in "rs_mask", "XCR0", and "mem_addr.HEADER.XSTATE_BV". "mem_addr" must be aligned on a 64-byte boundary. + +st_mask = mem_addr.HEADER.XSTATE_BV[62:0] +FOR i := 0 to 62 + IF (rs_mask[i] AND XCR0[i]) + IF st_mask[i] + CASE (i) OF + 0: ProcessorState[x87 FPU] := mem_addr.FPUSSESave_Area[FPU] + 1: ProcessorState[SSE] := mem_addr.FPUSSESaveArea[SSE] + DEFAULT: ProcessorState[i] := mem_addr.Ext_Save_Area[i] + ESAC + ELSE + // ProcessorExtendedState := Processor Supplied Values + CASE (i) OF + 1: MXCSR := mem_addr.FPUSSESave_Area[SSE] + ESAC + FI + FI + i := i + 1 +ENDFOR + + +
immintrin.h
+
+ + XSAVE + XSS + OS-Targeted + + + Perform a full or partial restore of the enabled processor states using the state information stored in memory at "mem_addr". xrstors differs from xrstor in that it can restore state components corresponding to bits set in the IA32_XSS MSR; xrstors cannot restore from an xsave area in which the extended region is in the standard form. State is restored based on bits [62:0] in "rs_mask", "XCR0", and "mem_addr.HEADER.XSTATE_BV". "mem_addr" must be aligned on a 64-byte boundary. + +st_mask = mem_addr.HEADER.XSTATE_BV[62:0] +FOR i := 0 to 62 + IF (rs_mask[i] AND XCR0[i]) + IF st_mask[i] + CASE (i) OF + 0: ProcessorState[x87 FPU] := mem_addr.FPUSSESave_Area[FPU] + 1: ProcessorState[SSE] := mem_addr.FPUSSESaveArea[SSE] + DEFAULT: ProcessorState[i] := mem_addr.Ext_Save_Area[i] + ESAC + ELSE + // ProcessorExtendedState := Processor Supplied Values + CASE (i) OF + 1: MXCSR := mem_addr.FPUSSESave_Area[SSE] + ESAC + FI + FI + i := i + 1 +ENDFOR + + +
immintrin.h
+
+ + Floating Point + AVX + Convert + + Copy the lower single-precision (32-bit) floating-point element of "a" to "dst". + dst[31:0] := a[31:0] + +
immintrin.h
+
+ + Floating Point + AVX2 + Convert + + Copy the lower double-precision (64-bit) floating-point element of "a" to "dst". + dst[63:0] := a[63:0] + +
immintrin.h
+
+ + Integer + AVX2 + Convert + + Copy the lower 32-bit integer in "a" to "dst". + +dst[31:0] := a[31:0] + + +
immintrin.h
+
+ + + + Mask + AVX512F + Mask + + + Compute the bitwise NOT of 16-bit masks "a" and then AND with "b", and store the result in "k". + +k[15:0] := (NOT a[15:0]) AND b[15:0] +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Mask + KNCNI + Mask + + + Compute the bitwise NOT of 16-bit masks "a" and then AND with "b", and store the result in "k". + +k[15:0] := (NOT a[15:0]) AND b[15:0] +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Mask + AVX512F + Mask + + + Compute the bitwise AND of 16-bit masks "a" and "b", and store the result in "k". + +k[15:0] := a[15:0] AND b[15:0] +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Mask + KNCNI + Mask + + + Compute the bitwise AND of 16-bit masks "a" and "b", and store the result in "k". + +k[15:0] := a[15:0] AND b[15:0] +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Mask + AVX512F + Mask + + Copy 16-bit mask "a" to "k". + +k[15:0] := a[15:0] +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Mask + KNCNI + Mask + + Copy 16-bit mask "a" to "k". + +k[15:0] := a[15:0] +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Mask + AVX512F + Mask + + Compute the bitwise NOT of 16-bit mask "a", and store the result in "k". + +k[15:0] := NOT a[15:0] +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Mask + KNCNI + Mask + + Compute the bitwise NOT of 16-bit mask "a", and store the result in "k". + +k[15:0] := NOT a[15:0] +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Mask + AVX512F + Mask + + + Compute the bitwise OR of 16-bit masks "a" and "b", and store the result in "k". + +k[15:0] := a[15:0] OR b[15:0] +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Mask + KNCNI + Mask + + + Compute the bitwise OR of 16-bit masks "a" and "b", and store the result in "k". + +k[15:0] := a[15:0] OR b[15:0] +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Mask + AVX512F + Mask + + + Unpack and interleave 8 bits from masks "a" and "b", and store the 16-bit result in "k". + +k[7:0] := b[7:0] +k[15:8] := a[7:0] +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Mask + AVX512F + Mask + + + Compute the bitwise XNOR of 16-bit masks "a" and "b", and store the result in "k". + +k[15:0] := NOT (a[15:0] XOR b[15:0]) +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Mask + KNCNI + Mask + + + Compute the bitwise XNOR of 16-bit masks "a" and "b", and store the result in "k". + +k[15:0] := NOT (a[15:0] XOR b[15:0]) +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Mask + AVX512F + Mask + + + Compute the bitwise XOR of 16-bit masks "a" and "b", and store the result in "k". + +k[15:0] := a[15:0] XOR b[15:0] +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Mask + KNCNI + Mask + + + Compute the bitwise XOR of 16-bit masks "a" and "b", and store the result in "k". + +k[15:0] := a[15:0] XOR b[15:0] +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + Add packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := a[i+63:i] + b[i+63:i] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + Add packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst". + [round_note] + + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := a[i+63:i] + b[i+63:i] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + Add packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + + Add packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + Add packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Add packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + Add packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := a[i+31:i] + b[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + Add packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst". + [round_note] + + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := a[i+31:i] + b[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + Add packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + + Add packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + Add packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Add packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + Add the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + [round_note] + + +dst[63:0] := a[63:0] + b[63:0] +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Add the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + [round_note] + + +IF k[0] + dst[63:0] := a[63:0] + b[63:0] +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Add the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := a[63:0] + b[63:0] +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Add the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + [round_note] + + +IF k[0] + dst[63:0] := a[63:0] + b[63:0] +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + Add the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := a[63:0] + b[63:0] +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + Add the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + + +dst[31:0] := a[31:0] + b[31:0] +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Add the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + + +IF k[0] + dst[31:0] := a[31:0] + b[31:0] +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Add the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst[31:0] := a[31:0] + b[31:0] +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Add the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + + +IF k[0] + dst[31:0] := a[31:0] + b[31:0] +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + Add the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst[31:0] := a[31:0] + b[31:0] +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Miscellaneous + + + + Concatenate "a" and "b" into a 128-byte immediate result, shift the result right by "count" 32-bit elements, and store the low 64 bytes (16 elements) in "dst". + +temp[1023:512] := a[511:0] +temp[511:0] := b[511:0] +temp[1023:0] := temp[1023:0] >> (32*count) +dst[511:0] := temp[511:0] +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Miscellaneous + + + + + + Concatenate "a" and "b" into a 128-byte immediate result, shift the result right by "count" 32-bit elements, and store the low 64 bytes (16 elements) in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +temp[1023:512] := a[511:0] +temp[511:0] := b[511:0] +temp[1023:0] := temp[1023:0] >> (32*count) +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := temp[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Miscellaneous + + + + + Concatenate "a" and "b" into a 128-byte immediate result, shift the result right by "count" 32-bit elements, and stores the low 64 bytes (16 elements) in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +temp[1023:512] := a[511:0] +temp[511:0] := b[511:0] +temp[1023:0] := temp[1023:0] >> (32*count) +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := temp[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Miscellaneous + + + + Concatenate "a" and "b" into a 128-byte immediate result, shift the result right by "count" 64-bit elements, and store the low 64 bytes (8 elements) in "dst". + +temp[1023:512] := a[511:0] +temp[511:0] := b[511:0] +temp[1023:0] := temp[1023:0] >> (64*count) +dst[511:0] := temp[511:0] +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Miscellaneous + + + + + + Concatenate "a" and "b" into a 128-byte immediate result, shift the result right by "count" 64-bit elements, and store the low 64 bytes (8 elements) in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +temp[1023:512] := a[511:0] +temp[511:0] := b[511:0] +temp[1023:0] := temp[1023:0] >> (64*count) +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := temp[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Miscellaneous + + + + + Concatenate "a" and "b" into a 128-byte immediate result, shift the result right by "count" 64-bit elements, and stores the low 64 bytes (8 elements) in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +temp[1023:512] := a[511:0] +temp[511:0] := b[511:0] +temp[1023:0] := temp[1023:0] >> (64*count) +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := temp[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Swizzle + + + + Blend packed double-precision (64-bit) floating-point elements from "a" and "b" using control mask "k", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := b[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Swizzle + + + + Blend packed single-precision (32-bit) floating-point elements from "a" and "b" using control mask "k", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := b[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + Broadcast the 4 packed single-precision (32-bit) floating-point elements from "a" to all elements of "dst". + +FOR j := 0 to 15 + i := j*32 + n := (j mod 4)*32 + dst[i+31:i] := a[n+31:n] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + Broadcast the 4 packed single-precision (32-bit) floating-point elements from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + n := (j mod 4)*32 + IF k[j] + dst[i+31:i] := a[n+31:n] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + Broadcast the 4 packed single-precision (32-bit) floating-point elements from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + n := (j mod 4)*32 + IF k[j] + dst[i+31:i] := a[n+31:n] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + Broadcast the 4 packed double-precision (64-bit) floating-point elements from "a" to all elements of "dst". + +FOR j := 0 to 7 + i := j*64 + n := (j mod 4)*64 + dst[i+63:i] := a[n+63:n] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + Broadcast the 4 packed double-precision (64-bit) floating-point elements from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + n := (j mod 4)*64 + IF k[j] + dst[i+63:i] := a[n+63:n] + ELSE + dst[i+63:i] := src[n+63:n] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + Broadcast the 4 packed double-precision (64-bit) floating-point elements from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + n := (j mod 4)*64 + IF k[j] + dst[i+63:i] := a[n+63:n] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + Broadcast the 4 packed 32-bit integers from "a" to all elements of "dst". + +FOR j := 0 to 15 + i := j*32 + n := (j mod 4)*32 + dst[i+31:i] := a[n+31:n] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + + Broadcast the 4 packed 32-bit integers from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + n := (j mod 4)*32 + IF k[j] + dst[i+31:i] := a[n+31:n] + ELSE + dst[i+31:i] := src[n+31:n] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + Broadcast the 4 packed 32-bit integers from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + n := (j mod 4)*32 + IF k[j] + dst[i+31:i] := a[n+31:n] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + Broadcast the 4 packed 64-bit integers from "a" to all elements of "dst". + +FOR j := 0 to 7 + i := j*64 + n := (j mod 4)*64 + dst[i+63:i] := a[n+63:n] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + + Broadcast the 4 packed 64-bit integers from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + n := (j mod 4)*64 + IF k[j] + dst[i+63:i] := a[n+63:n] + ELSE + dst[i+63:i] := src[n+63:n] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + Broadcast the 4 packed 64-bit integers from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + n := (j mod 4)*64 + IF k[j] + dst[i+63:i] := a[n+63:n] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + Broadcast the low double-precision (64-bit) floating-point element from "a" to all elements of "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := a[63:0] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + Broadcast the low double-precision (64-bit) floating-point element from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[63:0] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + Broadcast the low double-precision (64-bit) floating-point element from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[63:0] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + Broadcast the low single-precision (32-bit) floating-point element from "a" to all elements of "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := a[31:0] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + Broadcast the low single-precision (32-bit) floating-point element from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[31:0] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + Broadcast the low single-precision (32-bit) floating-point element from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[31:0] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512F/KNCNI + Compare + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + +CASE (imm8[7:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +FOR j := 0 to 7 + i := j*64 + k[j] := (a[i+63:i] OP b[i+63:i]) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512F/KNCNI + Compare + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +CASE (imm8[7:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +FOR j := 0 to 7 + i := j*64 + k[j] := (a[i+63:i] OP b[i+63:i]) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512F/KNCNI + Compare + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*64 + k[j] := (a[i+63:i] == b[i+63:i]) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512F/KNCNI + Compare + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*64 + k[j] := (a[i+63:i] <= b[i+63:i]) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512F/KNCNI + Compare + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*64 + k[j] := (a[i+63:i] < b[i+63:i]) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512F/KNCNI + Compare + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*64 + k[j] := (a[i+63:i] != b[i+63:i]) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512F/KNCNI + Compare + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for not-less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*64 + k[j] := !(a[i+63:i] <= b[i+63:i]) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512F/KNCNI + Compare + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for not-less-than, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*64 + k[j] := !(a[i+63:i] < b[i+63:i]) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512F/KNCNI + Compare + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" to see if neither is NaN, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*64 + k[j] := (a[i+63:i] != NaN AND b[i+63:i] != NaN) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512F/KNCNI + Compare + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" to see if either is NaN, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*64 + k[j] := (a[i+63:i] == NaN OR b[i+63:i] == NaN) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512F/KNCNI + Compare + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +CASE (imm8[7:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512F/KNCNI + Compare + + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +CASE (imm8[7:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512F/KNCNI + Compare + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := (a[i+63:i] == b[i+63:i]) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512F/KNCNI + Compare + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := (a[i+63:i] <= b[i+63:i]) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512F/KNCNI + Compare + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := (a[i+63:i] < b[i+63:i]) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512F/KNCNI + Compare + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := (a[i+63:i] != b[i+63:i]) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512F/KNCNI + Compare + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for not-less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := !(a[i+63:i] <= b[i+63:i]) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512F/KNCNI + Compare + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for not-less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := !(a[i+63:i] < b[i+63:i]) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512F/KNCNI + Compare + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" to see if neither is NaN, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := (a[i+63:i] != NaN AND b[i+63:i] != NaN) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512F/KNCNI + Compare + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" to see if either is NaN, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := (a[i+63:i] == NaN OR b[i+63:i] == NaN) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512F/KNCNI + Compare + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + +CASE (imm8[7:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +FOR j := 0 to 15 + i := j*32 + k[j] := (a[i+31:i] OP b[i+31:i]) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512F/KNCNI + Compare + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +CASE (imm8[7:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +FOR j := 0 to 15 + i := j*32 + k[j] := (a[i+31:i] OP b[i+31:i]) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512F/KNCNI + Compare + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*32 + k[j] := (a[i+31:i] == b[i+31:i]) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512F/KNCNI + Compare + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*32 + k[j] := (a[i+31:i] <= b[i+31:i]) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512F/KNCNI + Compare + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*32 + k[j] := (a[i+31:i] < b[i+31:i]) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512F/KNCNI + Compare + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*32 + k[j] := (a[i+31:i] != b[i+31:i]) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512F/KNCNI + Compare + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for not-less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*32 + k[j] := !(a[i+31:i] <= b[i+31:i]) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512F/KNCNI + Compare + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for not-less-than, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*32 + k[j] := !(a[i+31:i] < b[i+31:i]) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512F/KNCNI + Compare + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" to see if neither is NaN, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*32 + k[j] := (a[i+31:i] != NaN AND b[i+31:i] != NaN) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512F/KNCNI + Compare + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" to see if either is NaN, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*32 + k[j] := (a[i+31:i] == NaN OR b[i+31:i] == NaN) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512F/KNCNI + Compare + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +CASE (imm8[7:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512F/KNCNI + Compare + + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +CASE (imm8[7:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512F/KNCNI + Compare + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + y +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := (a[i+31:i] == b[i+31:i]) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512F/KNCNI + Compare + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := (a[i+31:i] <= b[i+31:i]) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512F/KNCNI + Compare + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := (a[i+31:i] < b[i+31:i]) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512F/KNCNI + Compare + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := (a[i+31:i] != b[i+31:i]) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512F/KNCNI + Compare + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for not-less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := !(a[i+31:i] <= b[i+31:i]) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512F/KNCNI + Compare + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for not-less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := !(a[i+31:i] < b[i+31:i]) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512F/KNCNI + Compare + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" to see if neither is NaN, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := (a[i+31:i] != NaN AND b[i+31:i] != NaN) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512F/KNCNI + Compare + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" to see if either is NaN, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := (a[i+31:i] == NaN OR b[i+31:i] == NaN) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512F + Compare + + + + + Compare the lower double-precision (64-bit) floating-point element in "a" and "b" based on the comparison operand specified by "imm8", and store the result in mask vector "k". + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +CASE (imm8[7:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC + +k[0] := ( a[63:0] OP b[63:0] ) ? 1 : 0 + +k[MAX:1] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512F + Compare + + + + Compare the lower double-precision (64-bit) floating-point element in "a" and "b" based on the comparison operand specified by "imm8", and store the result in mask vector "k". + +CASE (imm8[7:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC + +k[0] := ( a[63:0] OP b[63:0] ) ? 1 : 0 + +k[MAX:1] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512F + Compare + + + + + + Compare the lower double-precision (64-bit) floating-point element in "a" and "b" based on the comparison operand specified by "imm8", and store the result in mask vector "k" using zeromask "k1" (the element is zeroed out when mask bit 0 is not set). + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +CASE (imm8[7:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC + +IF k1[0] + k[0] := ( a[63:0] OP b[63:0] ) ? 1 : 0 +ELSE + k[0] := 0 +FI +k[MAX:1] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512F + Compare + + + + + Compare the lower double-precision (64-bit) floating-point element in "a" and "b" based on the comparison operand specified by "imm8", and store the result in mask vector "k" using zeromask "k1" (the element is zeroed out when mask bit 0 is not set). + +CASE (imm8[7:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC + +IF k1[0] + k[0] := ( a[63:0] OP b[63:0] ) ? 1 : 0 +ELSE + k[0] := 0 +FI +k[MAX:1] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512F + Compare + + + + + Compare the lower single-precision (32-bit) floating-point element in "a" and "b" based on the comparison operand specified by "imm8", and store the result in mask vector "k". + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +CASE (imm8[7:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC + +k[0] := ( a[31:0] OP b[31:0] ) ? 1 : 0 + +k[MAX:1] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512F + Compare + + + + Compare the lower single-precision (32-bit) floating-point element in "a" and "b" based on the comparison operand specified by "imm8", and store the result in mask vector "k". + +CASE (imm8[7:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC + +k[0] := ( a[31:0] OP b[31:0] ) ? 1 : 0 + +k[MAX:1] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512F + Compare + + + + + + Compare the lower single-precision (32-bit) floating-point element in "a" and "b" based on the comparison operand specified by "imm8", and store the result in mask vector "k" using zeromask "k1" (the element is zeroed out when mask bit 0 is not set). + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +CASE (imm8[7:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC + +IF k1[0] + k[0] := ( a[31:0] OP b[31:0] ) ? 1 : 0 +ELSE + k[0] := 0 +FI +k[MAX:1] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512F + Compare + + + + + Compare the lower single-precision (32-bit) floating-point element in "a" and "b" based on the comparison operand specified by "imm8", and store the result in mask vector "k" using zeromask "k1" (the element is zeroed out when mask bit 0 is not set). + +CASE (imm8[7:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC + +IF k1[0] + k[0] := ( a[31:0] OP b[31:0] ) ? 1 : 0 +ELSE + k[0] := 0 +FI +k[MAX:1] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Compare + + + + + Compare the lower double-precision (64-bit) floating-point element in "a" and "b" based on the comparison operand specified by "imm8", and return the boolean result (0 or 1). + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +CASE (imm8[7:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC + +RETURN ( a[63:0] OP b[63:0] ) ? 1 : 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Compare + + + + + Compare the lower single-precision (32-bit) floating-point element in "a" and "b" based on the comparison operand specified by "imm8", and return the boolean result (0 or 1). + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +CASE (imm8[7:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC + +RETURN ( a[31:0] OP b[31:0] ) ? 1 : 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + Contiguously store the active double-precision (64-bit) floating-point elements in "a" (those with their respective bit set in writemask "k") to "dst", and pass through the remaining elements from "src". + +size := 64 +m := 0 +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[m+size-1:m] := a[i+63:i] + m := m + size + FI +ENDFOR +dst[511:m] := src[511:m] +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Store + Swizzle + + + + Contiguously store the active double-precision (64-bit) floating-point elements in "a" (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +size := 64 +m := base_addr +FOR j := 0 to 7 + i := j*64 + IF k[j] + MEM[m+size-1:m] := a[i+63:i] + m := m + size + FI +ENDFOR + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + Contiguously store the active double-precision (64-bit) floating-point elements in "a" (those with their respective bit set in zeromask "k") to "dst", and set the remaining elements to zero. + +size := 64 +m := 0 +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[m+size-1:m] := a[i+63:i] + m := m + size + FI +ENDFOR +dst[511:m] := 0 +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + Contiguously store the active single-precision (32-bit) floating-point elements in "a" (those with their respective bit set in writemask "k") to "dst", and pass through the remaining elements from "src". + +size := 32 +m := 0 +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[m+size-1:m] := a[i+31:i] + m := m + size + FI +ENDFOR +dst[511:m] := src[511:m] +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Store + Swizzle + + + + Contiguously store the active single-precision (32-bit) floating-point elements in "a" (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +size := 32 +m := base_addr +FOR j := 0 to 15 + i := j*32 + IF k[j] + MEM[m+size-1:m] := a[i+31:i] + m := m + size + FI +ENDFOR + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + Contiguously store the active single-precision (32-bit) floating-point elements in "a" (those with their respective bit set in zeromask "k") to "dst", and set the remaining elements to zero. + +size := 32 +m := 0 +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[m+size-1:m] := a[i+31:i] + m := m + size + FI +ENDFOR +dst[511:m] := 0 +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + Convert packed 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + m := j*64 + dst[m+63:m] := Convert_Int32_To_FP64(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + + Convert packed 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + m := j*64 + IF k[j] + dst[m+63:m] := Convert_Int32_To_FP64(a[i+31:i]) + ELSE + dst[m+63:m] := src[m+63:m] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + Convert packed 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + m := j*64 + IF k[j] + dst[m+63:m] := Convert_Int32_To_FP64(a[i+31:i]) + ELSE + dst[m+63:m] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + Convert packed 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + [round_note] + + +FOR j := 0 to 15 + i := 32*j + dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + Convert packed 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 15 + i := 32*j + dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + + + Convert packed 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + + Convert packed 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + + Convert packed 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 15 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + Convert packed 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst". + [round_note] + + +FOR j := 0 to 7 + i := 32*j + k := 64*j + dst[i+31:i] := Convert_FP64_To_Int32(a[k+63:k]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst". + +FOR j := 0 to 7 + i := 32*j + k := 64*j + dst[i+31:i] := Convert_FP64_To_Int32(a[k+63:k]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 7 + i := j*32 + l := j*64 + IF k[j] + dst[i+31:i] := Convert_FP64_To_Int32(a[l+63:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + l := j*64 + IF k[j] + dst[i+31:i] := Convert_FP64_To_Int32(a[l+63:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 7 + i := 32*j + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_Int32(a[l+63:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_Int32(a[l+63:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + [round_note] + + +FOR j := 0 to 7 + i := 32*j + k := 64*j + dst[i+31:i] := Convert_FP64_To_FP32(a[k+63:k]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 7 + i := 32*j + k := 64*j + dst[i+31:i] := Convert_FP64_To_FP32(a[k+63:k]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 7 + i := j*32 + l := j*64 + IF k[j] + dst[i+31:i] := Convert_FP64_To_FP32(a[l+63:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_FP32(a[l+63:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 7 + i := j*32 + l := j*64 + IF k[j] + dst[i+31:i] := Convert_FP64_To_FP32(a[l+63:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + l := j*64 + IF k[j] + dst[i+31:i] := Convert_FP64_To_FP32(a[l+63:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst". + [round_note] + + +FOR j := 0 to 7 + i := 32*j + k := 64*j + dst[i+31:i] := Convert_FP64_To_UnsignedInt32(a[k+63:k]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst". + +FOR j := 0 to 7 + i := 32*j + k := 64*j + dst[i+31:i] := Convert_FP64_To_UnsignedInt32(a[k+63:k]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 7 + i := j*32 + l := j*64 + IF k[j] + dst[i+31:i] := Convert_FP64_To_UnsignedInt32(a[l+63:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + l := j*64 + IF k[j] + dst[i+31:i] := Convert_FP64_To_UnsignedInt32(a[l+63:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 7 + i := 32*j + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_UnsignedInt32(a[l+63:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_UnsignedInt32(a[l+63:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + + +FOR j := 0 to 15 + i := j*32 + m := j*16 + dst[i+31:i] := Convert_FP16_To_FP32(a[m+15:m]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + m := j*16 + dst[i+31:i] := Convert_FP16_To_FP32(a[m+15:m]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +FOR j := 0 to 15 + i := j*32 + m := j*16 + IF k[j] + dst[i+31:i] := Convert_FP16_To_FP32(a[m+15:m]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + m := j*16 + IF k[j] + dst[i+31:i] := Convert_FP16_To_FP32(a[m+15:m]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + + +FOR j := 0 to 15 + i := j*32 + m := j*16 + IF k[j] + dst[i+31:i] := Convert_FP16_To_FP32(a[m+15:m]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + m := j*16 + IF k[j] + dst[i+31:i] := Convert_FP16_To_FP32(a[m+15:m]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst". + [round_note] + + +FOR j := 0 to 15 + i := 32*j + dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst". + +FOR j := 0 to 15 + i := 32*j + dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 15 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst". + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + + +FOR j := 0 to 7 + i := 64*j + k := 32*j + dst[i+63:i] := Convert_FP32_To_FP64(a[k+31:k]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 7 + i := 64*j + k := 32*j + dst[i+63:i] := Convert_FP32_To_FP64(a[k+31:k]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + + +FOR j := 0 to 7 + i := 64*j + l := 32*j + IF k[j] + dst[i+63:i] := Convert_FP32_To_FP64(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 32*j + IF k[j] + dst[i+63:i] := Convert_FP32_To_FP64(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + + +FOR j := 0 to 7 + i := 64*j + l := 32*j + IF k[j] + dst[i+63:i] := Convert_FP32_To_FP64(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 32*j + IF k[j] + dst[i+63:i] := Convert_FP32_To_FP64(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst". + [round_note] + + +FOR j := 0 to 15 + i := 16*j + l := 32*j + dst[i+15:i] := Convert_FP32_To_FP16FP(a[l+31:l]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst". + [round_note] + + +FOR j := 0 to 15 + i := 16*j + l := 32*j + dst[i+15:i] := Convert_FP32_To_FP16FP(a[l+31:l]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 15 + i := 16*j + l := 32*j + IF k[j] + dst[i+15:i] := Convert_FP32_To_FP16FP(a[l+31:l]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 15 + i := 16*j + l := 32*j + IF k[j] + dst[i+15:i] := Convert_FP32_To_FP16FP(a[l+31:l]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 15 + i := 16*j + l := 32*j + IF k[j] + dst[i+15:i] := Convert_FP32_To_FP16FP(a[l+31:l]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 15 + i := 16*j + l := 32*j + IF k[j] + dst[i+15:i] := Convert_FP32_To_FP16FP(a[l+31:l]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst". + [round_note] + + +FOR j := 0 to 15 + i := 32*j + dst[i+31:i] := Convert_FP32_To_UnsignedInt32(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst". + +FOR j := 0 to 15 + i := 32*j + dst[i+31:i] := Convert_FP32_To_UnsignedInt32(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := Convert_FP32_To_UnsignedInt32(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_FP32_To_UnsignedInt32(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 15 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_FP32_To_UnsignedInt32(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_FP32_To_UnsignedInt32(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + + Convert the lower double-precision (64-bit) floating-point element in "a" to a 32-bit integer, and store the result in "dst". + [round_note] + + +dst[31:0] := Convert_FP64_To_Int32(a[63:0]) + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + + Convert the lower double-precision (64-bit) floating-point element in "a" to a 64-bit integer, and store the result in "dst". + [round_note] + + +dst[63:0] := Convert_FP64_To_Int64(a[63:0]) + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + Convert the lower double-precision (64-bit) floating-point element in "a" to a 32-bit integer, and store the result in "dst". + [round_note] + + +dst[31:0] := Convert_FP64_To_Int32(a[63:0]) + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + Convert the lower double-precision (64-bit) floating-point element in "a" to a 64-bit integer, and store the result in "dst". + [round_note] + + +dst[63:0] := Convert_FP64_To_Int64(a[63:0]) + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + Convert the lower double-precision (64-bit) floating-point element in "a" to a 32-bit integer, and store the result in "dst". + +dst[31:0] := Convert_FP64_To_Int32(a[63:0]) + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + Convert the lower double-precision (64-bit) floating-point element in "a" to a 64-bit integer, and store the result in "dst". + +dst[63:0] := Convert_FP64_To_Int64(a[63:0]) + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + + + Convert the lower double-precision (64-bit) floating-point element in "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + + +dst[31:0] := Convert_FP64_To_FP32(b[63:0]) +dst[127:32] := a[127:31] +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + + + + + Convert the lower double-precision (64-bit) floating-point element in "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + [round_note] + + +IF k[0] + dst[31:0] := Convert_FP64_To_FP32(b[63:0]) +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:31] +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + + + + Convert the lower double-precision (64-bit) floating-point element in "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + + +IF k[0] + dst[31:0] := Convert_FP64_To_FP32(b[63:0]) +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:31] +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + + + + Convert the lower double-precision (64-bit) floating-point element in "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + + +IF k[0] + dst[31:0] := Convert_FP64_To_FP32(b[63:0]) +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:31] +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + + + Convert the lower double-precision (64-bit) floating-point element in "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + + +IF k[0] + dst[31:0] := Convert_FP64_To_FP32(b[63:0]) +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:31] +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + Convert the lower double-precision (64-bit) floating-point element in "a" to an unsigned 32-bit integer, and store the result in "dst". + [round_note] + + +dst[31:0] := Convert_FP64_To_UnsignedInt32(a[63:0]) + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + Convert the lower double-precision (64-bit) floating-point element in "a" to an unsigned 64-bit integer, and store the result in "dst". + [round_note] + + +dst[63:0] := Convert_FP64_To_UnsignedInt64(a[63:0]) + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + Convert the lower double-precision (64-bit) floating-point element in "a" to an unsigned 32-bit integer, and store the result in "dst". + +dst[31:0] := Convert_FP64_To_UnsignedInt32(a[63:0]) + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + Convert the lower double-precision (64-bit) floating-point element in "a" to an unsigned 64-bit integer, and store the result in "dst". + +dst[63:0] := Convert_FP64_To_UnsignedInt64(a[63:0]) + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + + + Convert the 64-bit integer "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + [round_note] + + +dst[63:0] := Convert_Int64_To_FP64(b[63:0]) +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + + Convert the 64-bit integer "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + [round_note] + + +dst[63:0] := Convert_Int64_To_FP64(b[63:0]) +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + + Convert the 32-bit integer "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := Convert_Int32_To_FP64(b[31:0]) +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + + Convert the 64-bit integer "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := Convert_Int64_To_FP64(b[63:0]) +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + + + Convert the 32-bit integer "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + + +dst[31:0] := Convert_Int32_To_FP32(b[31:0]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + + + Convert the 64-bit integer "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + + +dst[31:0] := Convert_Int64_To_FP32(b[63:0]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + + + Convert the 32-bit integer "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + + +dst[31:0] := Convert_Int32_To_FP32(b[31:0]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + + Convert the 64-bit integer "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + + +dst[31:0] := Convert_Int64_To_FP32(b[63:0]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + + Convert the 32-bit integer "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := Convert_Int32_To_FP32(b[31:0]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + + Convert the 64-bit integer "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := Convert_Int64_To_FP32(b[63:0]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + + + Convert the lower single-precision (32-bit) floating-point element in "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + [round_note] + + +dst[63:0] := Convert_FP32_To_FP64(b[31:0]) +dst[127:64] := a[127:64] +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + + + + + Convert the lower single-precision (32-bit) floating-point element in "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + [round_note] + + +IF k[0] + dst[63:0] := Convert_FP32_To_FP64(b[31:0]) +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + + + + Convert the lower single-precision (32-bit) floating-point element in "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + + +IF k[0] + dst[63:0] := Convert_FP32_To_FP64(b[31:0]) +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + + + + Convert the lower single-precision (32-bit) floating-point element in "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + [round_note] + + +IF k[0] + dst[63:0] := Convert_FP32_To_FP64(b[31:0]) +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + + + Convert the lower single-precision (32-bit) floating-point element in "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := Convert_FP32_To_FP64(b[31:0]) +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + + Convert the lower single-precision (32-bit) floating-point element in "a" to a 32-bit integer, and store the result in "dst". + [round_note] + + +dst[31:0] := Convert_FP32_To_Int32(a[31:0]) + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + + Convert the lower single-precision (32-bit) floating-point element in "a" to a 64-bit integer, and store the result in "dst". + [round_note] + + +dst[63:0] := Convert_FP32_To_Int64(a[31:0]) + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + Convert the lower single-precision (32-bit) floating-point element in "a" to a 32-bit integer, and store the result in "dst". + [round_note] + + +dst[31:0] := Convert_FP32_To_Int32(a[31:0]) + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + Convert the lower single-precision (32-bit) floating-point element in "a" to a 64-bit integer, and store the result in "dst". + [round_note] + + +dst[63:0] := Convert_FP32_To_Int64(a[31:0]) + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + Convert the lower single-precision (32-bit) floating-point element in "a" to a 32-bit integer, and store the result in "dst". + +dst[31:0] := Convert_FP32_To_Int32(a[31:0]) + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + Convert the lower single-precision (32-bit) floating-point element in "a" to a 64-bit integer, and store the result in "dst". + +dst[63:0] := Convert_FP32_To_Int64(a[31:0]) + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + Convert the lower single-precision (32-bit) floating-point element in "a" to an unsigned 32-bit integer, and store the result in "dst". + [round_note] + + +dst[31:0] := Convert_FP32_To_UnsignedInt32(a[31:0]) + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + Convert the lower single-precision (32-bit) floating-point element in "a" to an unsigned 64-bit integer, and store the result in "dst". + [round_note] + + +dst[63:0] := Convert_FP32_To_UnsignedInt64(a[31:0]) + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + Convert the lower single-precision (32-bit) floating-point element in "a" to an unsigned 32-bit integer, and store the result in "dst". + +dst[31:0] := Convert_FP32_To_UnsignedInt32(a[31:0]) + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + Convert the lower single-precision (32-bit) floating-point element in "a" to an unsigned 64-bit integer, and store the result in "dst". + +dst[63:0] := Convert_FP32_To_UnsignedInt64(a[31:0]) + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst". + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +FOR j := 0 to 7 + i := 32*i + k := 64*j + dst[i+31:i] := Convert_FP64_To_IntegerTruncate(a[k+63:k]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 7 + i := 32*j + k := 64*j + dst[i+31:i] := Convert_FP64_To_Int32_Truncate(a[k+63:k]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +FOR j := 0 to 7 + i := 32*i + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_IntegerTruncate(a[l+63:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_Int32_Truncate(a[l+63:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +FOR j := 0 to 7 + i := 32*i + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_IntegerTruncate(a[l+63:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_Int32_Truncate(a[l+63:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst". + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +FOR j := 0 to 7 + i := 32*i + k := 64*j + dst[i+31:i] := Convert_FP64_To_UnsignedIntegerTruncate(a[k+63:k]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 7 + i := 32*j + k := 64*j + dst[i+31:i] := Convert_FP64_To_UnsignedInt32_Truncate(a[k+63:k]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +FOR j := 0 to 7 + i := 32*i + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_UnsignedIntegerTruncate(a[l+63:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_UnsignedInt32_Truncate(a[l+63:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +FOR j := 0 to 7 + i := 32*i + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_UnsignedIntegerTruncate(a[l+63:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_UnsignedInt32_Truncate(a[l+63:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst". + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +FOR j := 0 to 15 + i := 32*i + dst[i+31:i] := Convert_FP32_To_IntegerTruncate(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 15 + i := 32*j + dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +FOR j := 0 to 15 + i := 32*i + IF k[j] + dst[i+31:i] := Convert_FP32_To_IntegerTruncate(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +FOR j := 0 to 15 + i := 32*i + IF k[j] + dst[i+31:i] := Convert_FP32_To_IntegerTruncate(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst". + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +FOR j := 0 to 15 + i := 32*i + dst[i+31:i] := Convert_FP32_To_UnsignedIntegerTruncate(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 15 + i := 32*j + dst[i+31:i] := Convert_FP32_To_UnsignedInt32_Truncate(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +FOR j := 0 to 15 + i := 32*i + IF k[j] + dst[i+31:i] := Convert_FP32_To_UnsignedIntegerTruncate(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + + Convert packed double-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_UnsignedInt32_Truncate(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +FOR j := 0 to 15 + i := 32*i + IF k[j] + dst[i+31:i] := Convert_FP32_To_UnsignedIntegerTruncate(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + Convert packed double-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_UnsignedInt32_Truncate(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + + Convert the lower double-precision (64-bit) floating-point element in "a" to a 32-bit integer with truncation, and store the result in "dst". + [round_note] + + +dst[31:0] := Convert_FP64_To_Int32_Truncate(a[63:0]) + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + + Convert the lower double-precision (64-bit) floating-point element in "a" to a 64-bit integer with truncation, and store the result in "dst". + [round_note] + + +dst[63:0] := Convert_FP64_To_Int64_Truncate(a[63:0]) + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + Convert the lower double-precision (64-bit) floating-point element in "a" to a 32-bit integer with truncation, and store the result in "dst". + [round_note] + + +dst[31:0] := Convert_FP64_To_Int32_Truncate(a[63:0]) + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + Convert the lower double-precision (64-bit) floating-point element in "a" to a 64-bit integer with truncation, and store the result in "dst". + [round_note] + + +dst[63:0] := Convert_FP64_To_Int64_Truncate(a[63:0]) + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + Convert the lower double-precision (64-bit) floating-point element in "a" to a 32-bit integer with truncation, and store the result in "dst". + +dst[31:0] := Convert_FP64_To_Int32_Truncate(a[63:0]) + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + Convert the lower double-precision (64-bit) floating-point element in "a" to a 64-bit integer with truncation, and store the result in "dst". + +dst[63:0] := Convert_FP64_To_Int64_Truncate(a[63:0]) + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + Convert the lower double-precision (64-bit) floating-point element in "a" to an unsigned 32-bit integer with truncation, and store the result in "dst". + [round_note] + + +dst[31:0] := Convert_FP64_To_UnsignedInt32_Truncate(a[63:0]) + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + Convert the lower double-precision (64-bit) floating-point element in "a" to an unsigned 64-bit integer with truncation, and store the result in "dst". + [round_note] + + +dst[63:0] := Convert_FP64_To_UnsignedInt64_Truncate(a[63:0]) + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + Convert the lower double-precision (64-bit) floating-point element in "a" to an unsigned 32-bit integer with truncation, and store the result in "dst". + +dst[31:0] := Convert_FP64_To_UnsignedInt32_Truncate(a[63:0]) + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + Convert the lower double-precision (64-bit) floating-point element in "a" to an unsigned 64-bit integer with truncation, and store the result in "dst". + +dst[63:0] := Convert_FP64_To_UnsignedInt64_Truncate(a[63:0]) + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + + Convert the lower single-precision (32-bit) floating-point element in "a" to a 32-bit integer with truncation, and store the result in "dst". + [round_note] + + +dst[31:0] := Convert_FP32_To_Int32_Truncate(a[31:0]) + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + + Convert the lower single-precision (32-bit) floating-point element in "a" to a 64-bit integer with truncation, and store the result in "dst". + [round_note] + + +dst[63:0] := Convert_FP32_To_Int64_Truncate(a[31:0]) + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + Convert the lower single-precision (32-bit) floating-point element in "a" to a 32-bit integer with truncation, and store the result in "dst". + [round_note] + + +dst[31:0] := Convert_FP32_To_Int32_Truncate(a[31:0]) + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + Convert the lower single-precision (32-bit) floating-point element in "a" to a 64-bit integer with truncation, and store the result in "dst". + [round_note] + + +dst[63:0] := Convert_FP32_To_Int64_Truncate(a[31:0]) + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + Convert the lower single-precision (32-bit) floating-point element in "a" to a 32-bit integer with truncation, and store the result in "dst". + +dst[31:0] := Convert_FP32_To_Int32_Truncate(a[31:0]) + + +
immintrin.h
+
+ + Floating Point + AVX512F + Convert + + Convert the lower single-precision (32-bit) floating-point element in "a" to a 64-bit integer with truncation, and store the result in "dst". + +dst[63:0] := Convert_FP32_To_Int64_Truncate(a[31:0]) + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + Convert the lower single-precision (32-bit) floating-point element in "a" to an unsigned 32-bit integer with truncation, and store the result in "dst". + [round_note] + + +dst[31:0] := Convert_FP32_To_UnsignedInt32_Truncate(a[31:0]) + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + Convert the lower single-precision (32-bit) floating-point element in "a" to an unsigned 64-bit integer with truncation, and store the result in "dst". + [round_note] + + +dst[63:0] := Convert_FP32_To_UnsignedInt64_Truncate(a[31:0]) + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + Convert the lower single-precision (32-bit) floating-point element in "a" to an unsigned 32-bit integer with truncation, and store the result in "dst". + +dst[31:0] := Convert_FP32_To_UnsignedInt32_Truncate(a[31:0]) + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + Convert the lower single-precision (32-bit) floating-point element in "a" to an unsigned 64-bit integer with truncation, and store the result in "dst". + +dst[63:0] := Convert_FP32_To_UnsignedInt64_Truncate(a[31:0]) + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + Convert packed unsigned 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + l := j*32 + dst[i+63:i] := ConvertUnsignedIntegerTo_FP64(a[l+31:l]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + + Convert packed unsigned 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := ConvertUnsignedIntegerTo_FP64(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + Convert packed unsigned 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := ConvertUnsignedIntegerTo_FP64(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + Convert packed unsigned 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + [round_note] + + +FOR j := 0 to 15 + i := 32*j + dst[i+31:i] := ConvertUnsignedInt32_To_FP32(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + Convert packed unsigned 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 15 + i := 32*j + dst[i+31:i] := ConvertUnsignedInt32_To_FP32(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + + + Convert packed unsigned 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ConvertUnsignedInt32_To_FP32(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + + Convert packed unsigned 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ConvertUnsignedInt32_To_FP32(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + + Convert packed unsigned 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 15 + i := 32*j + IF k[j] + dst[i+31:i] := ConvertUnsignedInt32_To_FP32(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + Convert packed unsigned 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 15 + i := 32*j + IF k[j] + dst[i+31:i] := ConvertUnsignedInt32_To_FP32(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + + Convert the unsigned 64-bit integer "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + [round_note] + + +dst[63:0] := Convert_UnsignedInt64_To_FP64(b[63:0]) +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + Convert the unsigned 32-bit integer "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := Convert_UnsignedInt32_To_FP64(b[31:0]) +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + Convert the unsigned 64-bit integer "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := Convert_UnsignedInt64_To_FP64(b[63:0]) +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + + Convert the unsigned 32-bit integer "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + + +dst[31:0] := Convert_UnsignedInt32_To_FP32(b[31:0]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + + Convert the unsigned 64-bit integer "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + + +dst[31:0] := Convert_UnsignedInt64_To_FP32(b[63:0]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + Convert the unsigned 32-bit integer "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := Convert_UnsignedInt32_To_FP32(b[31:0]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F + Convert + + + Convert the unsigned 64-bit integer "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := Convert_UnsignedInt64_To_FP32(b[63:0]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + Divide packed double-precision (64-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst". + +FOR j := 0 to 7 + i := 64*j + dst[i+63:i] := a[i+63:i] / b[i+63:i] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + Divide packed double-precision (64-bit) floating-point elements in "a" by packed elements in "b", =and store the results in "dst". + [round_note] + + +FOR j := 0 to 7 + i := 64*j + dst[i+63:i] := a[i+63:i] / b[i+63:i] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Divide packed double-precision (64-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + IF k[j] + dst[i+63:i] := a[i+63:i] / b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + + Divide packed double-precision (64-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 7 + i := 64*j + IF k[j] + dst[i+63:i] := a[i+63:i] / b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + Divide packed double-precision (64-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + IF k[j] + dst[i+63:i] := a[i+63:i] / b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Divide packed double-precision (64-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 7 + i := 64*j + IF k[j] + dst[i+63:i] := a[i+63:i] / b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + Divide packed single-precision (32-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst". + +FOR j := 0 to 15 + i := 32*j + dst[i+31:i] := a[i+31:i] / b[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + Divide packed single-precision (32-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst". + [round_note] + + +FOR j := 0 to 15 + i := 32*j + dst[i+31:i] := a[i+31:i] / b[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Divide packed single-precision (32-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + IF k[j] + dst[i+31:i] := a[i+31:i] / b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + + Divide packed single-precision (32-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 15 + i := 32*j + IF k[j] + dst[i+31:i] := a[i+31:i] / b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + Divide packed single-precision (32-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + IF k[j] + dst[i+31:i] := a[i+31:i] / b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Divide packed single-precision (32-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 15 + i := 32*j + IF k[j] + dst[i+31:i] := a[i+31:i] / b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + Divide the lower double-precision (64-bit) floating-point element in "a" by the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + [round_note] + + +dst[63:0] := a[63:0] / b[63:0] +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Divide the lower double-precision (64-bit) floating-point element in "a" by the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + [round_note] + + +IF k[0] + dst[63:0] := a[63:0] / b[63:0] +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Divide the lower double-precision (64-bit) floating-point element in "a" by the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := a[63:0] / b[63:0] +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Divide the lower double-precision (64-bit) floating-point element in "a" by the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + [round_note] + + +IF k[0] + dst[63:0] := a[63:0] / b[63:0] +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + Divide the lower double-precision (64-bit) floating-point element in "a" by the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := a[63:0] / b[63:0] +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + Divide the lower single-precision (32-bit) floating-point element in "a" by the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + + +dst[31:0] := a[31:0] / b[31:0] +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Divide the lower single-precision (32-bit) floating-point element in "a" by the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + + +IF k[0] + dst[31:0] := a[31:0] / b[31:0] +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Divide the lower single-precision (32-bit) floating-point element in "a" by the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst[31:0] := a[31:0] / b[31:0] +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Divide the lower single-precision (32-bit) floating-point element in "a" by the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + + +IF k[0] + dst[31:0] := a[31:0] / b[31:0] +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + Divide the lower single-precision (32-bit) floating-point element in "a" by the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst[31:0] := a[31:0] / b[31:0] +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + Load contiguous active double-precision (64-bit) floating-point elements from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[m+63:m] + m := m + 64 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Load + Swizzle + + + + Load contiguous active double-precision (64-bit) floating-point elements from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+m+63:mem_addr+m] + m := m + 64 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + Load contiguous active double-precision (64-bit) floating-point elements from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[m+63:m] + m := m + 64 + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Load + Swizzle + + + Load contiguous active double-precision (64-bit) floating-point elements from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+m+63:mem_addr+m] + m := m + 64 + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + Load contiguous active single-precision (32-bit) floating-point elements from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[m+31:m] + m := m + 32 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Load + Swizzle + + + + Load contiguous active single-precision (32-bit) floating-point elements from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+m+31:mem_addr+m] + m := m + 32 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + Load contiguous active single-precision (32-bit) floating-point elements from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[m+31:m] + m := m + 32 + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Load + Swizzle + + + Load contiguous active single-precision (32-bit) floating-point elements from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+m+31:mem_addr+m] + m := m + 32 + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + Extract 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "a", selected with "imm8", and store the result in "dst". + +CASE imm8[7:0] of +0: dst[127:0] := a[127:0] +1: dst[127:0] := a[255:128] +2: dst[127:0] := a[383:256] +3: dst[127:0] := a[511:384] +ESAC +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + + Extract 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "a", selected with "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +CASE imm8[7:0] of +0: tmp[127:0] := a[127:0] +1: tmp[127:0] := a[255:128] +2: tmp[127:0] := a[383:256] +3: tmp[127:0] := a[511:384] +ESAC +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + Extract 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "a", selected with "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +CASE imm8[7:0] of +0: tmp[127:0] := a[127:0] +1: tmp[127:0] := a[255:128] +2: tmp[127:0] := a[383:256] +3: tmp[127:0] := a[511:384] +ESAC +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + Extract 256 bits (composed of 4 packed double-precision (64-bit) floating-point elements) from "a", selected with "imm8", and store the result in "dst". + +CASE imm8[7:0] of +0: dst[255:0] := a[255:0] +1: dst[255:0] := a[511:256] +ESAC +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + + Extract 256 bits (composed of 4 packed double-precision (64-bit) floating-point elements) from "a", selected with "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +CASE imm8[7:0] of +0: tmp[255:0] := a[255:0] +1: tmp[255:0] := a[511:256] +ESAC +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + Extract 256 bits (composed of 4 packed double-precision (64-bit) floating-point elements) from "a", selected with "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +CASE imm8[7:0] of +0: tmp[255:0] := a[255:0] +1: tmp[255:0] := a[511:256] +ESAC +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + Extract 128 bits (composed of 4 packed 32-bit integers) from "a", selected with "imm8", and store the result in "dst". + +CASE imm8[7:0] of +0: dst[127:0] := a[127:0] +1: dst[127:0] := a[255:128] +2: dst[127:0] := a[383:256] +3: dst[127:0] := a[511:384] +ESAC +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + + + Extract 128 bits (composed of 4 packed 32-bit integers) from "a", selected with "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +CASE imm8[7:0] of +0: dst[127:0] := a[127:0] +1: dst[127:0] := a[255:128] +2: dst[127:0] := a[383:256] +3: dst[127:0] := a[511:384] +ESAC +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + + Extract 128 bits (composed of 4 packed 32-bit integers) from "a", selected with "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +CASE imm8[7:0] of +0: dst[127:0] := a[127:0] +1: dst[127:0] := a[255:128] +2: dst[127:0] := a[383:256] +3: dst[127:0] := a[511:384] +ESAC +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + Extract 256 bits (composed of 4 packed 64-bit integers) from "a", selected with "imm8", and store the result in "dst". + +CASE imm8[7:0] of +0: dst[255:0] := a[255:0] +1: dst[255:0] := a[511:256] +ESAC +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + + + Extract 256 bits (composed of 4 packed 64-bit integers) from "a", selected with "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +CASE imm8[7:0] of +0: dst[255:0] := a[255:0] +1: dst[255:0] := a[511:256] +ESAC +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + + Extract 256 bits (composed of 4 packed 64-bit integers) from "a", selected with "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +CASE imm8[7:0] of +0: dst[255:0] := a[255:0] +1: dst[255:0] := a[511:256] +ESAC +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + Fix up packed double-precision (64-bit) floating-point elements in "a" and "b" using packed 64-bit integers in "c", and store the results in "dst". "imm8" is used to set the required flags reporting. + +enum TOKEN_TYPE { + QNAN_TOKEN := 0, + SNAN_TOKEN := 1, + ZERO_VALUE_TOKEN := 2, + ONE_VALUE_TOKEN := 3, + NEG_INF_TOKEN := 4, + POS_INF_TOKEN := 5, + NEG_VALUE_TOKEN := 6, + POS_VALUE_TOKEN := 7 +} +FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]){ + tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] + CASE(tsrc[63:0] of TOKEN_TYPE) + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) of + 0 : dest[63:0] := src1[63:0] + 1 : dest[63:0] := tsrc[63:0] + 2 : dest[63:0] := QNaN(tsrc[63:0]) + 3 : dest[63:0] := QNAN_Indefinite + 4 : dest[63:0] := -INF + 5 : dest[63:0] := +INF + 6 : dest[63:0] := tsrc.sign? –INF : +INF + 7 : dest[63:0] := -0 + 8 : dest[63:0] := +0 + 9 : dest[63:0] := -1 + 10: dest[63:0] := +1 + 11: dest[63:0] := 1⁄2 + 12: dest[63:0] := 90.0 + 13: dest[63:0] := PI/2 + 14: dest[63:0] := MAX_FLOAT + 15: dest[63:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0] of TOKEN_TYPE) + ZERO_VALUE_TOKEN: if imm8[0] then set #ZE + ZERO_VALUE_TOKEN: if imm8[1] then set #IE + ONE_VALUE_TOKEN: if imm8[2] then set #ZE + ONE_VALUE_TOKEN: if imm8[3] then set #IE + SNAN_TOKEN: if imm8[4] then set #IE + NEG_INF_TOKEN: if imm8[5] then set #IE + NEG_VALUE_TOKEN: if imm8[6] then set #IE + POS_INF_TOKEN: if imm8[7] then set #IE + ESAC + RETURN dest[63:0] +} + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := FIXUPIMMPD(a[i+63:i], b[i+63:i], c[i+63:i], imm8[7:0]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + + Fix up packed double-precision (64-bit) floating-point elements in "a" and "b" using packed 64-bit integers in "c", and store the results in "dst". "imm8" is used to set the required flags reporting. + [round_note] + + +enum TOKEN_TYPE { + QNAN_TOKEN := 0, + SNAN_TOKEN := 1, + ZERO_VALUE_TOKEN := 2, + ONE_VALUE_TOKEN := 3, + NEG_INF_TOKEN := 4, + POS_INF_TOKEN := 5, + NEG_VALUE_TOKEN := 6, + POS_VALUE_TOKEN := 7 +} +FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]){ + tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] + CASE(tsrc[63:0] of TOKEN_TYPE) + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) of + 0 : dest[63:0] := src1[63:0] + 1 : dest[63:0] := tsrc[63:0] + 2 : dest[63:0] := QNaN(tsrc[63:0]) + 3 : dest[63:0] := QNAN_Indefinite + 4 : dest[63:0] := -INF + 5 : dest[63:0] := +INF + 6 : dest[63:0] := tsrc.sign? –INF : +INF + 7 : dest[63:0] := -0 + 8 : dest[63:0] := +0 + 9 : dest[63:0] := -1 + 10: dest[63:0] := +1 + 11: dest[63:0] := 1⁄2 + 12: dest[63:0] := 90.0 + 13: dest[63:0] := PI/2 + 14: dest[63:0] := MAX_FLOAT + 15: dest[63:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0] of TOKEN_TYPE) + ZERO_VALUE_TOKEN: if imm8[0] then set #ZE + ZERO_VALUE_TOKEN: if imm8[1] then set #IE + ONE_VALUE_TOKEN: if imm8[2] then set #ZE + ONE_VALUE_TOKEN: if imm8[3] then set #IE + SNAN_TOKEN: if imm8[4] then set #IE + NEG_INF_TOKEN: if imm8[5] then set #IE + NEG_VALUE_TOKEN: if imm8[6] then set #IE + POS_INF_TOKEN: if imm8[7] then set #IE + ESAC + RETURN dest[63:0] +} + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := FIXUPIMMPD(a[i+63:i], b[i+63:i], c[i+63:i], imm8[7:0]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + + Fix up packed double-precision (64-bit) floating-point elements in "a" and "b" using packed 64-bit integers in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). "imm8" is used to set the required flags reporting. + +enum TOKEN_TYPE { + QNAN_TOKEN := 0, + SNAN_TOKEN := 1, + ZERO_VALUE_TOKEN := 2, + ONE_VALUE_TOKEN := 3, + NEG_INF_TOKEN := 4, + POS_INF_TOKEN := 5, + NEG_VALUE_TOKEN := 6, + POS_VALUE_TOKEN := 7 +} +FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]){ + tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] + CASE(tsrc[63:0] of TOKEN_TYPE) + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) of + 0 : dest[63:0] := src1[63:0] + 1 : dest[63:0] := tsrc[63:0] + 2 : dest[63:0] := QNaN(tsrc[63:0]) + 3 : dest[63:0] := QNAN_Indefinite + 4 : dest[63:0] := -INF + 5 : dest[63:0] := +INF + 6 : dest[63:0] := tsrc.sign? –INF : +INF + 7 : dest[63:0] := -0 + 8 : dest[63:0] := +0 + 9 : dest[63:0] := -1 + 10: dest[63:0] := +1 + 11: dest[63:0] := 1⁄2 + 12: dest[63:0] := 90.0 + 13: dest[63:0] := PI/2 + 14: dest[63:0] := MAX_FLOAT + 15: dest[63:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0] of TOKEN_TYPE) + ZERO_VALUE_TOKEN: if imm8[0] then set #ZE + ZERO_VALUE_TOKEN: if imm8[1] then set #IE + ONE_VALUE_TOKEN: if imm8[2] then set #ZE + ONE_VALUE_TOKEN: if imm8[3] then set #IE + SNAN_TOKEN: if imm8[4] then set #IE + NEG_INF_TOKEN: if imm8[5] then set #IE + NEG_VALUE_TOKEN: if imm8[6] then set #IE + POS_INF_TOKEN: if imm8[7] then set #IE + ESAC + RETURN dest[63:0] +} + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := FIXUPIMMPD(a[i+63:i], b[i+63:i], c[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + + + Fix up packed double-precision (64-bit) floating-point elements in "a" and "b" using packed 64-bit integers in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). "imm8" is used to set the required flags reporting. + [round_note] + + +enum TOKEN_TYPE { + QNAN_TOKEN := 0, + SNAN_TOKEN := 1, + ZERO_VALUE_TOKEN := 2, + ONE_VALUE_TOKEN := 3, + NEG_INF_TOKEN := 4, + POS_INF_TOKEN := 5, + NEG_VALUE_TOKEN := 6, + POS_VALUE_TOKEN := 7 +} +FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]){ + tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] + CASE(tsrc[63:0] of TOKEN_TYPE) + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) of + 0 : dest[63:0] := src1[63:0] + 1 : dest[63:0] := tsrc[63:0] + 2 : dest[63:0] := QNaN(tsrc[63:0]) + 3 : dest[63:0] := QNAN_Indefinite + 4 : dest[63:0] := -INF + 5 : dest[63:0] := +INF + 6 : dest[63:0] := tsrc.sign? –INF : +INF + 7 : dest[63:0] := -0 + 8 : dest[63:0] := +0 + 9 : dest[63:0] := -1 + 10: dest[63:0] := +1 + 11: dest[63:0] := 1⁄2 + 12: dest[63:0] := 90.0 + 13: dest[63:0] := PI/2 + 14: dest[63:0] := MAX_FLOAT + 15: dest[63:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0] of TOKEN_TYPE) + ZERO_VALUE_TOKEN: if imm8[0] then set #ZE + ZERO_VALUE_TOKEN: if imm8[1] then set #IE + ONE_VALUE_TOKEN: if imm8[2] then set #ZE + ONE_VALUE_TOKEN: if imm8[3] then set #IE + SNAN_TOKEN: if imm8[4] then set #IE + NEG_INF_TOKEN: if imm8[5] then set #IE + NEG_VALUE_TOKEN: if imm8[6] then set #IE + POS_INF_TOKEN: if imm8[7] then set #IE + ESAC + RETURN dest[63:0] +} + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := FIXUPIMMPD(a[i+63:i], b[i+63:i], c[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + + Fix up packed double-precision (64-bit) floating-point elements in "a" and "b" using packed 64-bit integers in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). "imm8" is used to set the required flags reporting. + +enum TOKEN_TYPE { + QNAN_TOKEN := 0, + SNAN_TOKEN := 1, + ZERO_VALUE_TOKEN := 2, + ONE_VALUE_TOKEN := 3, + NEG_INF_TOKEN := 4, + POS_INF_TOKEN := 5, + NEG_VALUE_TOKEN := 6, + POS_VALUE_TOKEN := 7 +} +FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]){ + tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] + CASE(tsrc[63:0] of TOKEN_TYPE) + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) of + 0 : dest[63:0] := src1[63:0] + 1 : dest[63:0] := tsrc[63:0] + 2 : dest[63:0] := QNaN(tsrc[63:0]) + 3 : dest[63:0] := QNAN_Indefinite + 4 : dest[63:0] := -INF + 5 : dest[63:0] := +INF + 6 : dest[63:0] := tsrc.sign? –INF : +INF + 7 : dest[63:0] := -0 + 8 : dest[63:0] := +0 + 9 : dest[63:0] := -1 + 10: dest[63:0] := +1 + 11: dest[63:0] := 1⁄2 + 12: dest[63:0] := 90.0 + 13: dest[63:0] := PI/2 + 14: dest[63:0] := MAX_FLOAT + 15: dest[63:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0] of TOKEN_TYPE) + ZERO_VALUE_TOKEN: if imm8[0] then set #ZE + ZERO_VALUE_TOKEN: if imm8[1] then set #IE + ONE_VALUE_TOKEN: if imm8[2] then set #ZE + ONE_VALUE_TOKEN: if imm8[3] then set #IE + SNAN_TOKEN: if imm8[4] then set #IE + NEG_INF_TOKEN: if imm8[5] then set #IE + NEG_VALUE_TOKEN: if imm8[6] then set #IE + POS_INF_TOKEN: if imm8[7] then set #IE + ESAC + RETURN dest[63:0] +} + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := FIXUPIMMPD(a[i+63:i], b[i+63:i], c[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + + + Fix up packed double-precision (64-bit) floating-point elements in "a" and "b" using packed 64-bit integers in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). "imm8" is used to set the required flags reporting. + [round_note] + + +enum TOKEN_TYPE { + QNAN_TOKEN := 0, + SNAN_TOKEN := 1, + ZERO_VALUE_TOKEN := 2, + ONE_VALUE_TOKEN := 3, + NEG_INF_TOKEN := 4, + POS_INF_TOKEN := 5, + NEG_VALUE_TOKEN := 6, + POS_VALUE_TOKEN := 7 +} +FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]){ + tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] + CASE(tsrc[63:0] of TOKEN_TYPE) + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) of + 0 : dest[63:0] := src1[63:0] + 1 : dest[63:0] := tsrc[63:0] + 2 : dest[63:0] := QNaN(tsrc[63:0]) + 3 : dest[63:0] := QNAN_Indefinite + 4 : dest[63:0] := -INF + 5 : dest[63:0] := +INF + 6 : dest[63:0] := tsrc.sign? –INF : +INF + 7 : dest[63:0] := -0 + 8 : dest[63:0] := +0 + 9 : dest[63:0] := -1 + 10: dest[63:0] := +1 + 11: dest[63:0] := 1⁄2 + 12: dest[63:0] := 90.0 + 13: dest[63:0] := PI/2 + 14: dest[63:0] := MAX_FLOAT + 15: dest[63:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0] of TOKEN_TYPE) + ZERO_VALUE_TOKEN: if imm8[0] then set #ZE + ZERO_VALUE_TOKEN: if imm8[1] then set #IE + ONE_VALUE_TOKEN: if imm8[2] then set #ZE + ONE_VALUE_TOKEN: if imm8[3] then set #IE + SNAN_TOKEN: if imm8[4] then set #IE + NEG_INF_TOKEN: if imm8[5] then set #IE + NEG_VALUE_TOKEN: if imm8[6] then set #IE + POS_INF_TOKEN: if imm8[7] then set #IE + ESAC + RETURN dest[63:0] +} + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := FIXUPIMMPD(a[i+63:i], b[i+63:i], c[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + Fix up packed single-precision (32-bit) floating-point elements in "a" and "b" using packed 32-bit integers in "c", and store the results in "dst". "imm8" is used to set the required flags reporting. + +enum TOKEN_TYPE { + QNAN_TOKEN := 0, + SNAN_TOKEN L= 1, + ZERO_VALUE_TOKEN := 2, + ONE_VALUE_TOKEN := 3, + NEG_INF_TOKEN := 4, + POS_INF_TOKEN := 5, + NEG_VALUE_TOKEN := 6, + POS_VALUE_TOKEN := 7 +} +FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]){ + tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] + CASE(tsrc[31:0] of TOKEN_TYPE) + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) of + 0 : dest[31:0] := src1[31:0] + 1 : dest[31:0] := tsrc[31:0] + 2 : dest[31:0] := QNaN(tsrc[31:0]) + 3 : dest[31:0] := QNAN_Indefinite + 4 : dest[31:0] := -INF + 5 : dest[31:0] := +INF + 6 : dest[31:0] := tsrc.sign? –INF : +INF + 7 : dest[31:0] := -0 + 8 : dest[31:0] := +0 + 9 : dest[31:0] := -1 + 10: dest[31:0] := +1 + 11: dest[31:0] := 1⁄2 + 12: dest[31:0] := 90.0 + 13: dest[31:0] := PI/2 + 14: dest[31:0] := MAX_FLOAT + 15: dest[31:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0] of TOKEN_TYPE) + ZERO_VALUE_TOKEN: if imm8[0] then set #ZE + ZERO_VALUE_TOKEN: if imm8[1] then set #IE + ONE_VALUE_TOKEN: if imm8[2] then set #ZE + ONE_VALUE_TOKEN: if imm8[3] then set #IE + SNAN_TOKEN: if imm8[4] then set #IE + NEG_INF_TOKEN: if imm8[5] then set #IE + NEG_VALUE_TOKEN: if imm8[6] then set #IE + POS_INF_TOKEN: if imm8[7] then set #IE + ESAC + RETURN dest[31:0] +} + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := FIXUPIMMPD(a[i+31:i], b[i+31:i], c[i+31:i], imm8[7:0]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + + Fix up packed single-precision (32-bit) floating-point elements in "a" and "b" using packed 32-bit integers in "c", and store the results in "dst". "imm8" is used to set the required flags reporting. + [round_note] + + +enum TOKEN_TYPE { + QNAN_TOKEN := 0, + SNAN_TOKEN L= 1, + ZERO_VALUE_TOKEN := 2, + ONE_VALUE_TOKEN := 3, + NEG_INF_TOKEN := 4, + POS_INF_TOKEN := 5, + NEG_VALUE_TOKEN := 6, + POS_VALUE_TOKEN := 7 +} +FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]){ + tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] + CASE(tsrc[31:0] of TOKEN_TYPE) + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) of + 0 : dest[31:0] := src1[31:0] + 1 : dest[31:0] := tsrc[31:0] + 2 : dest[31:0] := QNaN(tsrc[31:0]) + 3 : dest[31:0] := QNAN_Indefinite + 4 : dest[31:0] := -INF + 5 : dest[31:0] := +INF + 6 : dest[31:0] := tsrc.sign? –INF : +INF + 7 : dest[31:0] := -0 + 8 : dest[31:0] := +0 + 9 : dest[31:0] := -1 + 10: dest[31:0] := +1 + 11: dest[31:0] := 1⁄2 + 12: dest[31:0] := 90.0 + 13: dest[31:0] := PI/2 + 14: dest[31:0] := MAX_FLOAT + 15: dest[31:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0] of TOKEN_TYPE) + ZERO_VALUE_TOKEN: if imm8[0] then set #ZE + ZERO_VALUE_TOKEN: if imm8[1] then set #IE + ONE_VALUE_TOKEN: if imm8[2] then set #ZE + ONE_VALUE_TOKEN: if imm8[3] then set #IE + SNAN_TOKEN: if imm8[4] then set #IE + NEG_INF_TOKEN: if imm8[5] then set #IE + NEG_VALUE_TOKEN: if imm8[6] then set #IE + POS_INF_TOKEN: if imm8[7] then set #IE + ESAC + RETURN dest[31:0] +} + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := FIXUPIMMPD(a[i+31:i], b[i+31:i], c[i+31:i], imm8[7:0]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + + Fix up packed single-precision (32-bit) floating-point elements in "a" and "b" using packed 32-bit integers in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). "imm8" is used to set the required flags reporting. + +enum TOKEN_TYPE { + QNAN_TOKEN := 0, + SNAN_TOKEN L= 1, + ZERO_VALUE_TOKEN := 2, + ONE_VALUE_TOKEN := 3, + NEG_INF_TOKEN := 4, + POS_INF_TOKEN := 5, + NEG_VALUE_TOKEN := 6, + POS_VALUE_TOKEN := 7 +} +FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]){ + tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] + CASE(tsrc[31:0] of TOKEN_TYPE) + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) of + 0 : dest[31:0] := src1[31:0] + 1 : dest[31:0] := tsrc[31:0] + 2 : dest[31:0] := QNaN(tsrc[31:0]) + 3 : dest[31:0] := QNAN_Indefinite + 4 : dest[31:0] := -INF + 5 : dest[31:0] := +INF + 6 : dest[31:0] := tsrc.sign? –INF : +INF + 7 : dest[31:0] := -0 + 8 : dest[31:0] := +0 + 9 : dest[31:0] := -1 + 10: dest[31:0] := +1 + 11: dest[31:0] := 1⁄2 + 12: dest[31:0] := 90.0 + 13: dest[31:0] := PI/2 + 14: dest[31:0] := MAX_FLOAT + 15: dest[31:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0] of TOKEN_TYPE) + ZERO_VALUE_TOKEN: if imm8[0] then set #ZE + ZERO_VALUE_TOKEN: if imm8[1] then set #IE + ONE_VALUE_TOKEN: if imm8[2] then set #ZE + ONE_VALUE_TOKEN: if imm8[3] then set #IE + SNAN_TOKEN: if imm8[4] then set #IE + NEG_INF_TOKEN: if imm8[5] then set #IE + NEG_VALUE_TOKEN: if imm8[6] then set #IE + POS_INF_TOKEN: if imm8[7] then set #IE + ESAC + RETURN dest[31:0] +} + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := FIXUPIMMPD(a[i+31:i], b[i+31:i], c[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + + + Fix up packed single-precision (32-bit) floating-point elements in "a" and "b" using packed 32-bit integers in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). "imm8" is used to set the required flags reporting. + [round_note] + + +enum TOKEN_TYPE { + QNAN_TOKEN := 0, + SNAN_TOKEN L= 1, + ZERO_VALUE_TOKEN := 2, + ONE_VALUE_TOKEN := 3, + NEG_INF_TOKEN := 4, + POS_INF_TOKEN := 5, + NEG_VALUE_TOKEN := 6, + POS_VALUE_TOKEN := 7 +} +FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]){ + tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] + CASE(tsrc[31:0] of TOKEN_TYPE) + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) of + 0 : dest[31:0] := src1[31:0] + 1 : dest[31:0] := tsrc[31:0] + 2 : dest[31:0] := QNaN(tsrc[31:0]) + 3 : dest[31:0] := QNAN_Indefinite + 4 : dest[31:0] := -INF + 5 : dest[31:0] := +INF + 6 : dest[31:0] := tsrc.sign? –INF : +INF + 7 : dest[31:0] := -0 + 8 : dest[31:0] := +0 + 9 : dest[31:0] := -1 + 10: dest[31:0] := +1 + 11: dest[31:0] := 1⁄2 + 12: dest[31:0] := 90.0 + 13: dest[31:0] := PI/2 + 14: dest[31:0] := MAX_FLOAT + 15: dest[31:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0] of TOKEN_TYPE) + ZERO_VALUE_TOKEN: if imm8[0] then set #ZE + ZERO_VALUE_TOKEN: if imm8[1] then set #IE + ONE_VALUE_TOKEN: if imm8[2] then set #ZE + ONE_VALUE_TOKEN: if imm8[3] then set #IE + SNAN_TOKEN: if imm8[4] then set #IE + NEG_INF_TOKEN: if imm8[5] then set #IE + NEG_VALUE_TOKEN: if imm8[6] then set #IE + POS_INF_TOKEN: if imm8[7] then set #IE + ESAC + RETURN dest[31:0] +} + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := FIXUPIMMPD(a[i+31:i], b[i+31:i], c[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + + Fix up packed single-precision (32-bit) floating-point elements in "a" and "b" using packed 32-bit integers in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). "imm8" is used to set the required flags reporting. + +enum TOKEN_TYPE { + QNAN_TOKEN := 0, + SNAN_TOKEN L= 1, + ZERO_VALUE_TOKEN := 2, + ONE_VALUE_TOKEN := 3, + NEG_INF_TOKEN := 4, + POS_INF_TOKEN := 5, + NEG_VALUE_TOKEN := 6, + POS_VALUE_TOKEN := 7 +} +FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]){ + tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] + CASE(tsrc[31:0] of TOKEN_TYPE) + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) of + 0 : dest[31:0] := src1[31:0] + 1 : dest[31:0] := tsrc[31:0] + 2 : dest[31:0] := QNaN(tsrc[31:0]) + 3 : dest[31:0] := QNAN_Indefinite + 4 : dest[31:0] := -INF + 5 : dest[31:0] := +INF + 6 : dest[31:0] := tsrc.sign? –INF : +INF + 7 : dest[31:0] := -0 + 8 : dest[31:0] := +0 + 9 : dest[31:0] := -1 + 10: dest[31:0] := +1 + 11: dest[31:0] := 1⁄2 + 12: dest[31:0] := 90.0 + 13: dest[31:0] := PI/2 + 14: dest[31:0] := MAX_FLOAT + 15: dest[31:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0] of TOKEN_TYPE) + ZERO_VALUE_TOKEN: if imm8[0] then set #ZE + ZERO_VALUE_TOKEN: if imm8[1] then set #IE + ONE_VALUE_TOKEN: if imm8[2] then set #ZE + ONE_VALUE_TOKEN: if imm8[3] then set #IE + SNAN_TOKEN: if imm8[4] then set #IE + NEG_INF_TOKEN: if imm8[5] then set #IE + NEG_VALUE_TOKEN: if imm8[6] then set #IE + POS_INF_TOKEN: if imm8[7] then set #IE + ESAC + RETURN dest[31:0] +} + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := FIXUPIMMPD(a[i+31:i], b[i+31:i], c[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + + + Fix up packed single-precision (32-bit) floating-point elements in "a" and "b" using packed 32-bit integers in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). "imm8" is used to set the required flags reporting. + [round_note] + + +enum TOKEN_TYPE { + QNAN_TOKEN := 0, + SNAN_TOKEN L= 1, + ZERO_VALUE_TOKEN := 2, + ONE_VALUE_TOKEN := 3, + NEG_INF_TOKEN := 4, + POS_INF_TOKEN := 5, + NEG_VALUE_TOKEN := 6, + POS_VALUE_TOKEN := 7 +} +FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]){ + tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] + CASE(tsrc[31:0] of TOKEN_TYPE) + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) of + 0 : dest[31:0] := src1[31:0] + 1 : dest[31:0] := tsrc[31:0] + 2 : dest[31:0] := QNaN(tsrc[31:0]) + 3 : dest[31:0] := QNAN_Indefinite + 4 : dest[31:0] := -INF + 5 : dest[31:0] := +INF + 6 : dest[31:0] := tsrc.sign? –INF : +INF + 7 : dest[31:0] := -0 + 8 : dest[31:0] := +0 + 9 : dest[31:0] := -1 + 10: dest[31:0] := +1 + 11: dest[31:0] := 1⁄2 + 12: dest[31:0] := 90.0 + 13: dest[31:0] := PI/2 + 14: dest[31:0] := MAX_FLOAT + 15: dest[31:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0] of TOKEN_TYPE) + ZERO_VALUE_TOKEN: if imm8[0] then set #ZE + ZERO_VALUE_TOKEN: if imm8[1] then set #IE + ONE_VALUE_TOKEN: if imm8[2] then set #ZE + ONE_VALUE_TOKEN: if imm8[3] then set #IE + SNAN_TOKEN: if imm8[4] then set #IE + NEG_INF_TOKEN: if imm8[5] then set #IE + NEG_VALUE_TOKEN: if imm8[6] then set #IE + POS_INF_TOKEN: if imm8[7] then set #IE + ESAC + RETURN dest[31:0] +} + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := FIXUPIMMPD(a[i+31:i], b[i+31:i], c[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + + Fix up the lower double-precision (64-bit) floating-point elements in "a" and "b" using the lower 64-bit integer in "c", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". "imm8" is used to set the required flags reporting. + [round_note] + + +enum TOKEN_TYPE { + QNAN_TOKEN := 0, + SNAN_TOKEN := 1, + ZERO_VALUE_TOKEN := 2, + ONE_VALUE_TOKEN := 3, + NEG_INF_TOKEN := 4, + POS_INF_TOKEN := 5, + NEG_VALUE_TOKEN := 6, + POS_VALUE_TOKEN := 7 +} +FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]){ + tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] + CASE(tsrc[63:0] of TOKEN_TYPE) + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) of + 0 : dest[63:0] := src1[63:0] + 1 : dest[63:0] := tsrc[63:0] + 2 : dest[63:0] := QNaN(tsrc[63:0]) + 3 : dest[63:0] := QNAN_Indefinite + 4 : dest[63:0] := -INF + 5 : dest[63:0] := +INF + 6 : dest[63:0] := tsrc.sign? –INF : +INF + 7 : dest[63:0] := -0 + 8 : dest[63:0] := +0 + 9 : dest[63:0] := -1 + 10: dest[63:0] := +1 + 11: dest[63:0] := 1⁄2 + 12: dest[63:0] := 90.0 + 13: dest[63:0] := PI/2 + 14: dest[63:0] := MAX_FLOAT + 15: dest[63:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0] of TOKEN_TYPE) + ZERO_VALUE_TOKEN: if imm8[0] then set #ZE + ZERO_VALUE_TOKEN: if imm8[1] then set #IE + ONE_VALUE_TOKEN: if imm8[2] then set #ZE + ONE_VALUE_TOKEN: if imm8[3] then set #IE + SNAN_TOKEN: if imm8[4] then set #IE + NEG_INF_TOKEN: if imm8[5] then set #IE + NEG_VALUE_TOKEN: if imm8[6] then set #IE + POS_INF_TOKEN: if imm8[7] then set #IE + ESAC + RETURN dest[63:0] +} + +dst[63:0] := FIXUPIMMPD(a[63:0], b[63:0], c[63:0], imm8[7:0]) +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + Fix up the lower double-precision (64-bit) floating-point elements in "a" and "b" using the lower 64-bit integer in "c", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". "imm8" is used to set the required flags reporting. + +enum TOKEN_TYPE { + QNAN_TOKEN := 0, + SNAN_TOKEN := 1, + ZERO_VALUE_TOKEN := 2, + ONE_VALUE_TOKEN := 3, + NEG_INF_TOKEN := 4, + POS_INF_TOKEN := 5, + NEG_VALUE_TOKEN := 6, + POS_VALUE_TOKEN := 7 +} +FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]){ + tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] + CASE(tsrc[63:0] of TOKEN_TYPE) + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) of + 0 : dest[63:0] := src1[63:0] + 1 : dest[63:0] := tsrc[63:0] + 2 : dest[63:0] := QNaN(tsrc[63:0]) + 3 : dest[63:0] := QNAN_Indefinite + 4 : dest[63:0] := -INF + 5 : dest[63:0] := +INF + 6 : dest[63:0] := tsrc.sign? –INF : +INF + 7 : dest[63:0] := -0 + 8 : dest[63:0] := +0 + 9 : dest[63:0] := -1 + 10: dest[63:0] := +1 + 11: dest[63:0] := 1⁄2 + 12: dest[63:0] := 90.0 + 13: dest[63:0] := PI/2 + 14: dest[63:0] := MAX_FLOAT + 15: dest[63:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0] of TOKEN_TYPE) + ZERO_VALUE_TOKEN: if imm8[0] then set #ZE + ZERO_VALUE_TOKEN: if imm8[1] then set #IE + ONE_VALUE_TOKEN: if imm8[2] then set #ZE + ONE_VALUE_TOKEN: if imm8[3] then set #IE + SNAN_TOKEN: if imm8[4] then set #IE + NEG_INF_TOKEN: if imm8[5] then set #IE + NEG_VALUE_TOKEN: if imm8[6] then set #IE + POS_INF_TOKEN: if imm8[7] then set #IE + ESAC + RETURN dest[63:0] +} + +dst[63:0] := FIXUPIMMPD(a[63:0], b[63:0], c[63:0], imm8[7:0]) +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + + + Fix up the lower double-precision (64-bit) floating-point elements in "a" and "b" using the lower 64-bit integer in "c", store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". "imm8" is used to set the required flags reporting. + [round_note] + + +enum TOKEN_TYPE { + QNAN_TOKEN := 0, + SNAN_TOKEN := 1, + ZERO_VALUE_TOKEN := 2, + ONE_VALUE_TOKEN := 3, + NEG_INF_TOKEN := 4, + POS_INF_TOKEN := 5, + NEG_VALUE_TOKEN := 6, + POS_VALUE_TOKEN := 7 +} +FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]){ + tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] + CASE(tsrc[63:0] of TOKEN_TYPE) + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) of + 0 : dest[63:0] := src1[63:0] + 1 : dest[63:0] := tsrc[63:0] + 2 : dest[63:0] := QNaN(tsrc[63:0]) + 3 : dest[63:0] := QNAN_Indefinite + 4 : dest[63:0] := -INF + 5 : dest[63:0] := +INF + 6 : dest[63:0] := tsrc.sign? –INF : +INF + 7 : dest[63:0] := -0 + 8 : dest[63:0] := +0 + 9 : dest[63:0] := -1 + 10: dest[63:0] := +1 + 11: dest[63:0] := 1⁄2 + 12: dest[63:0] := 90.0 + 13: dest[63:0] := PI/2 + 14: dest[63:0] := MAX_FLOAT + 15: dest[63:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0] of TOKEN_TYPE) + ZERO_VALUE_TOKEN: if imm8[0] then set #ZE + ZERO_VALUE_TOKEN: if imm8[1] then set #IE + ONE_VALUE_TOKEN: if imm8[2] then set #ZE + ONE_VALUE_TOKEN: if imm8[3] then set #IE + SNAN_TOKEN: if imm8[4] then set #IE + NEG_INF_TOKEN: if imm8[5] then set #IE + NEG_VALUE_TOKEN: if imm8[6] then set #IE + POS_INF_TOKEN: if imm8[7] then set #IE + ESAC + RETURN dest[63:0] +} + +IF k[0] + dst[63:0] := FIXUPIMMPD(a[63:0], b[63:0], c[63:0], imm8[7:0]) +ELSE + dst[63:0] := a[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + + Fix up the lower double-precision (64-bit) floating-point elements in "a" and "b" using the lower 64-bit integer in "c", store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". "imm8" is used to set the required flags reporting. + +enum TOKEN_TYPE { + QNAN_TOKEN := 0, + SNAN_TOKEN := 1, + ZERO_VALUE_TOKEN := 2, + ONE_VALUE_TOKEN := 3, + NEG_INF_TOKEN := 4, + POS_INF_TOKEN := 5, + NEG_VALUE_TOKEN := 6, + POS_VALUE_TOKEN := 7 +} +FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]){ + tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] + CASE(tsrc[63:0] of TOKEN_TYPE) + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) of + 0 : dest[63:0] := src1[63:0] + 1 : dest[63:0] := tsrc[63:0] + 2 : dest[63:0] := QNaN(tsrc[63:0]) + 3 : dest[63:0] := QNAN_Indefinite + 4 : dest[63:0] := -INF + 5 : dest[63:0] := +INF + 6 : dest[63:0] := tsrc.sign? –INF : +INF + 7 : dest[63:0] := -0 + 8 : dest[63:0] := +0 + 9 : dest[63:0] := -1 + 10: dest[63:0] := +1 + 11: dest[63:0] := 1⁄2 + 12: dest[63:0] := 90.0 + 13: dest[63:0] := PI/2 + 14: dest[63:0] := MAX_FLOAT + 15: dest[63:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0] of TOKEN_TYPE) + ZERO_VALUE_TOKEN: if imm8[0] then set #ZE + ZERO_VALUE_TOKEN: if imm8[1] then set #IE + ONE_VALUE_TOKEN: if imm8[2] then set #ZE + ONE_VALUE_TOKEN: if imm8[3] then set #IE + SNAN_TOKEN: if imm8[4] then set #IE + NEG_INF_TOKEN: if imm8[5] then set #IE + NEG_VALUE_TOKEN: if imm8[6] then set #IE + POS_INF_TOKEN: if imm8[7] then set #IE + ESAC + RETURN dest[63:0] +} + +IF k[0] + dst[63:0] := FIXUPIMMPD(a[63:0], b[63:0], c[63:0], imm8[7:0]) +ELSE + dst[63:0] := a[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + + + Fix up the lower double-precision (64-bit) floating-point elements in "a" and "b" using the lower 64-bit integer in "c", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". "imm8" is used to set the required flags reporting. + [round_note] + + +enum TOKEN_TYPE { + QNAN_TOKEN := 0, + SNAN_TOKEN := 1, + ZERO_VALUE_TOKEN := 2, + ONE_VALUE_TOKEN := 3, + NEG_INF_TOKEN := 4, + POS_INF_TOKEN := 5, + NEG_VALUE_TOKEN := 6, + POS_VALUE_TOKEN := 7 +} +FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]){ + tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] + CASE(tsrc[63:0] of TOKEN_TYPE) + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) of + 0 : dest[63:0] := src1[63:0] + 1 : dest[63:0] := tsrc[63:0] + 2 : dest[63:0] := QNaN(tsrc[63:0]) + 3 : dest[63:0] := QNAN_Indefinite + 4 : dest[63:0] := -INF + 5 : dest[63:0] := +INF + 6 : dest[63:0] := tsrc.sign? –INF : +INF + 7 : dest[63:0] := -0 + 8 : dest[63:0] := +0 + 9 : dest[63:0] := -1 + 10: dest[63:0] := +1 + 11: dest[63:0] := 1⁄2 + 12: dest[63:0] := 90.0 + 13: dest[63:0] := PI/2 + 14: dest[63:0] := MAX_FLOAT + 15: dest[63:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0] of TOKEN_TYPE) + ZERO_VALUE_TOKEN: if imm8[0] then set #ZE + ZERO_VALUE_TOKEN: if imm8[1] then set #IE + ONE_VALUE_TOKEN: if imm8[2] then set #ZE + ONE_VALUE_TOKEN: if imm8[3] then set #IE + SNAN_TOKEN: if imm8[4] then set #IE + NEG_INF_TOKEN: if imm8[5] then set #IE + NEG_VALUE_TOKEN: if imm8[6] then set #IE + POS_INF_TOKEN: if imm8[7] then set #IE + ESAC + RETURN dest[63:0] +} + +IF k[0] + dst[63:0] := FIXUPIMMPD(a[63:0], b[63:0], c[63:0], imm8[7:0]) +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + + Fix up the lower double-precision (64-bit) floating-point elements in "a" and "b" using the lower 64-bit integer in "c", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". "imm8" is used to set the required flags reporting. + +enum TOKEN_TYPE { + QNAN_TOKEN := 0, + SNAN_TOKEN := 1, + ZERO_VALUE_TOKEN := 2, + ONE_VALUE_TOKEN := 3, + NEG_INF_TOKEN := 4, + POS_INF_TOKEN := 5, + NEG_VALUE_TOKEN := 6, + POS_VALUE_TOKEN := 7 +} +FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]){ + tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] + CASE(tsrc[63:0] of TOKEN_TYPE) + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) of + 0 : dest[63:0] := src1[63:0] + 1 : dest[63:0] := tsrc[63:0] + 2 : dest[63:0] := QNaN(tsrc[63:0]) + 3 : dest[63:0] := QNAN_Indefinite + 4 : dest[63:0] := -INF + 5 : dest[63:0] := +INF + 6 : dest[63:0] := tsrc.sign? –INF : +INF + 7 : dest[63:0] := -0 + 8 : dest[63:0] := +0 + 9 : dest[63:0] := -1 + 10: dest[63:0] := +1 + 11: dest[63:0] := 1⁄2 + 12: dest[63:0] := 90.0 + 13: dest[63:0] := PI/2 + 14: dest[63:0] := MAX_FLOAT + 15: dest[63:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0] of TOKEN_TYPE) + ZERO_VALUE_TOKEN: if imm8[0] then set #ZE + ZERO_VALUE_TOKEN: if imm8[1] then set #IE + ONE_VALUE_TOKEN: if imm8[2] then set #ZE + ONE_VALUE_TOKEN: if imm8[3] then set #IE + SNAN_TOKEN: if imm8[4] then set #IE + NEG_INF_TOKEN: if imm8[5] then set #IE + NEG_VALUE_TOKEN: if imm8[6] then set #IE + POS_INF_TOKEN: if imm8[7] then set #IE + ESAC + RETURN dest[63:0] +} + +IF k[0] + dst[63:0] := FIXUPIMMPD(a[63:0], b[63:0], c[63:0], imm8[7:0]) +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + + Fix up the lower single-precision (32-bit) floating-point elements in "a" and "b" using the lower 32-bit integer in "c", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". "imm8" is used to set the required flags reporting. + [round_note] + + +enum TOKEN_TYPE { + QNAN_TOKEN := 0, + SNAN_TOKEN L= 1, + ZERO_VALUE_TOKEN := 2, + ONE_VALUE_TOKEN := 3, + NEG_INF_TOKEN := 4, + POS_INF_TOKEN := 5, + NEG_VALUE_TOKEN := 6, + POS_VALUE_TOKEN := 7 +} +FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]){ + tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] + CASE(tsrc[31:0] of TOKEN_TYPE) + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) of + 0 : dest[31:0] := src1[31:0] + 1 : dest[31:0] := tsrc[31:0] + 2 : dest[31:0] := QNaN(tsrc[31:0]) + 3 : dest[31:0] := QNAN_Indefinite + 4 : dest[31:0] := -INF + 5 : dest[31:0] := +INF + 6 : dest[31:0] := tsrc.sign? –INF : +INF + 7 : dest[31:0] := -0 + 8 : dest[31:0] := +0 + 9 : dest[31:0] := -1 + 10: dest[31:0] := +1 + 11: dest[31:0] := 1⁄2 + 12: dest[31:0] := 90.0 + 13: dest[31:0] := PI/2 + 14: dest[31:0] := MAX_FLOAT + 15: dest[31:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0] of TOKEN_TYPE) + ZERO_VALUE_TOKEN: if imm8[0] then set #ZE + ZERO_VALUE_TOKEN: if imm8[1] then set #IE + ONE_VALUE_TOKEN: if imm8[2] then set #ZE + ONE_VALUE_TOKEN: if imm8[3] then set #IE + SNAN_TOKEN: if imm8[4] then set #IE + NEG_INF_TOKEN: if imm8[5] then set #IE + NEG_VALUE_TOKEN: if imm8[6] then set #IE + POS_INF_TOKEN: if imm8[7] then set #IE + ESAC + RETURN dest[31:0] +} + +dst[31:0] := FIXUPIMMPD(a[31:0], b[31:0], c[31:0], imm8[7:0]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + Fix up the lower single-precision (32-bit) floating-point elements in "a" and "b" using the lower 32-bit integer in "c", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". "imm8" is used to set the required flags reporting. + +enum TOKEN_TYPE { + QNAN_TOKEN := 0, + SNAN_TOKEN L= 1, + ZERO_VALUE_TOKEN := 2, + ONE_VALUE_TOKEN := 3, + NEG_INF_TOKEN := 4, + POS_INF_TOKEN := 5, + NEG_VALUE_TOKEN := 6, + POS_VALUE_TOKEN := 7 +} +FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]){ + tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] + CASE(tsrc[31:0] of TOKEN_TYPE) + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) of + 0 : dest[31:0] := src1[31:0] + 1 : dest[31:0] := tsrc[31:0] + 2 : dest[31:0] := QNaN(tsrc[31:0]) + 3 : dest[31:0] := QNAN_Indefinite + 4 : dest[31:0] := -INF + 5 : dest[31:0] := +INF + 6 : dest[31:0] := tsrc.sign? –INF : +INF + 7 : dest[31:0] := -0 + 8 : dest[31:0] := +0 + 9 : dest[31:0] := -1 + 10: dest[31:0] := +1 + 11: dest[31:0] := 1⁄2 + 12: dest[31:0] := 90.0 + 13: dest[31:0] := PI/2 + 14: dest[31:0] := MAX_FLOAT + 15: dest[31:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0] of TOKEN_TYPE) + ZERO_VALUE_TOKEN: if imm8[0] then set #ZE + ZERO_VALUE_TOKEN: if imm8[1] then set #IE + ONE_VALUE_TOKEN: if imm8[2] then set #ZE + ONE_VALUE_TOKEN: if imm8[3] then set #IE + SNAN_TOKEN: if imm8[4] then set #IE + NEG_INF_TOKEN: if imm8[5] then set #IE + NEG_VALUE_TOKEN: if imm8[6] then set #IE + POS_INF_TOKEN: if imm8[7] then set #IE + ESAC + RETURN dest[31:0] +} + +dst[31:0] := FIXUPIMMPD(a[31:0], b[31:0], c[31:0], imm8[7:0]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + + + Fix up the lower single-precision (32-bit) floating-point elements in "a" and "b" using the lower 32-bit integer in "c", store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". "imm8" is used to set the required flags reporting. + [round_note] + + +enum TOKEN_TYPE { + QNAN_TOKEN := 0, + SNAN_TOKEN L= 1, + ZERO_VALUE_TOKEN := 2, + ONE_VALUE_TOKEN := 3, + NEG_INF_TOKEN := 4, + POS_INF_TOKEN := 5, + NEG_VALUE_TOKEN := 6, + POS_VALUE_TOKEN := 7 +} +FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]){ + tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] + CASE(tsrc[31:0] of TOKEN_TYPE) + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) of + 0 : dest[31:0] := src1[31:0] + 1 : dest[31:0] := tsrc[31:0] + 2 : dest[31:0] := QNaN(tsrc[31:0]) + 3 : dest[31:0] := QNAN_Indefinite + 4 : dest[31:0] := -INF + 5 : dest[31:0] := +INF + 6 : dest[31:0] := tsrc.sign? –INF : +INF + 7 : dest[31:0] := -0 + 8 : dest[31:0] := +0 + 9 : dest[31:0] := -1 + 10: dest[31:0] := +1 + 11: dest[31:0] := 1⁄2 + 12: dest[31:0] := 90.0 + 13: dest[31:0] := PI/2 + 14: dest[31:0] := MAX_FLOAT + 15: dest[31:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0] of TOKEN_TYPE) + ZERO_VALUE_TOKEN: if imm8[0] then set #ZE + ZERO_VALUE_TOKEN: if imm8[1] then set #IE + ONE_VALUE_TOKEN: if imm8[2] then set #ZE + ONE_VALUE_TOKEN: if imm8[3] then set #IE + SNAN_TOKEN: if imm8[4] then set #IE + NEG_INF_TOKEN: if imm8[5] then set #IE + NEG_VALUE_TOKEN: if imm8[6] then set #IE + POS_INF_TOKEN: if imm8[7] then set #IE + ESAC + RETURN dest[31:0] +} + +IF k[0] + dst[31:0] := FIXUPIMMPD(a[31:0], b[31:0], c[31:0], imm8[7:0]) +ELSE + dst[31:0] := a[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + + Fix up the lower single-precision (32-bit) floating-point elements in "a" and "b" using the lower 32-bit integer in "c", store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". "imm8" is used to set the required flags reporting. + +enum TOKEN_TYPE { + QNAN_TOKEN := 0, + SNAN_TOKEN L= 1, + ZERO_VALUE_TOKEN := 2, + ONE_VALUE_TOKEN := 3, + NEG_INF_TOKEN := 4, + POS_INF_TOKEN := 5, + NEG_VALUE_TOKEN := 6, + POS_VALUE_TOKEN := 7 +} +FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]){ + tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] + CASE(tsrc[31:0] of TOKEN_TYPE) + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) of + 0 : dest[31:0] := src1[31:0] + 1 : dest[31:0] := tsrc[31:0] + 2 : dest[31:0] := QNaN(tsrc[31:0]) + 3 : dest[31:0] := QNAN_Indefinite + 4 : dest[31:0] := -INF + 5 : dest[31:0] := +INF + 6 : dest[31:0] := tsrc.sign? –INF : +INF + 7 : dest[31:0] := -0 + 8 : dest[31:0] := +0 + 9 : dest[31:0] := -1 + 10: dest[31:0] := +1 + 11: dest[31:0] := 1⁄2 + 12: dest[31:0] := 90.0 + 13: dest[31:0] := PI/2 + 14: dest[31:0] := MAX_FLOAT + 15: dest[31:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0] of TOKEN_TYPE) + ZERO_VALUE_TOKEN: if imm8[0] then set #ZE + ZERO_VALUE_TOKEN: if imm8[1] then set #IE + ONE_VALUE_TOKEN: if imm8[2] then set #ZE + ONE_VALUE_TOKEN: if imm8[3] then set #IE + SNAN_TOKEN: if imm8[4] then set #IE + NEG_INF_TOKEN: if imm8[5] then set #IE + NEG_VALUE_TOKEN: if imm8[6] then set #IE + POS_INF_TOKEN: if imm8[7] then set #IE + ESAC + RETURN dest[31:0] +} + +IF k[0] + dst[31:0] := FIXUPIMMPD(a[31:0], b[31:0], c[31:0], imm8[7:0]) +ELSE + dst[31:0] := a[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + + + Fix up the lower single-precision (32-bit) floating-point elements in "a" and "b" using the lower 32-bit integer in "c", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". "imm8" is used to set the required flags reporting. + [round_note] + + +enum TOKEN_TYPE { + QNAN_TOKEN := 0, + SNAN_TOKEN L= 1, + ZERO_VALUE_TOKEN := 2, + ONE_VALUE_TOKEN := 3, + NEG_INF_TOKEN := 4, + POS_INF_TOKEN := 5, + NEG_VALUE_TOKEN := 6, + POS_VALUE_TOKEN := 7 +} +FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]){ + tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] + CASE(tsrc[31:0] of TOKEN_TYPE) + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) of + 0 : dest[31:0] := src1[31:0] + 1 : dest[31:0] := tsrc[31:0] + 2 : dest[31:0] := QNaN(tsrc[31:0]) + 3 : dest[31:0] := QNAN_Indefinite + 4 : dest[31:0] := -INF + 5 : dest[31:0] := +INF + 6 : dest[31:0] := tsrc.sign? –INF : +INF + 7 : dest[31:0] := -0 + 8 : dest[31:0] := +0 + 9 : dest[31:0] := -1 + 10: dest[31:0] := +1 + 11: dest[31:0] := 1⁄2 + 12: dest[31:0] := 90.0 + 13: dest[31:0] := PI/2 + 14: dest[31:0] := MAX_FLOAT + 15: dest[31:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0] of TOKEN_TYPE) + ZERO_VALUE_TOKEN: if imm8[0] then set #ZE + ZERO_VALUE_TOKEN: if imm8[1] then set #IE + ONE_VALUE_TOKEN: if imm8[2] then set #ZE + ONE_VALUE_TOKEN: if imm8[3] then set #IE + SNAN_TOKEN: if imm8[4] then set #IE + NEG_INF_TOKEN: if imm8[5] then set #IE + NEG_VALUE_TOKEN: if imm8[6] then set #IE + POS_INF_TOKEN: if imm8[7] then set #IE + ESAC + RETURN dest[31:0] +} + +IF k[0] + dst[31:0] := FIXUPIMMPD(a[31:0], b[31:0], c[31:0], imm8[7:0]) +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + + Fix up the lower single-precision (32-bit) floating-point elements in "a" and "b" using the lower 32-bit integer in "c", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". "imm8" is used to set the required flags reporting. + +enum TOKEN_TYPE { + QNAN_TOKEN := 0, + SNAN_TOKEN L= 1, + ZERO_VALUE_TOKEN := 2, + ONE_VALUE_TOKEN := 3, + NEG_INF_TOKEN := 4, + POS_INF_TOKEN := 5, + NEG_VALUE_TOKEN := 6, + POS_VALUE_TOKEN := 7 +} +FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]){ + tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] + CASE(tsrc[31:0] of TOKEN_TYPE) + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) of + 0 : dest[31:0] := src1[31:0] + 1 : dest[31:0] := tsrc[31:0] + 2 : dest[31:0] := QNaN(tsrc[31:0]) + 3 : dest[31:0] := QNAN_Indefinite + 4 : dest[31:0] := -INF + 5 : dest[31:0] := +INF + 6 : dest[31:0] := tsrc.sign? –INF : +INF + 7 : dest[31:0] := -0 + 8 : dest[31:0] := +0 + 9 : dest[31:0] := -1 + 10: dest[31:0] := +1 + 11: dest[31:0] := 1⁄2 + 12: dest[31:0] := 90.0 + 13: dest[31:0] := PI/2 + 14: dest[31:0] := MAX_FLOAT + 15: dest[31:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0] of TOKEN_TYPE) + ZERO_VALUE_TOKEN: if imm8[0] then set #ZE + ZERO_VALUE_TOKEN: if imm8[1] then set #IE + ONE_VALUE_TOKEN: if imm8[2] then set #ZE + ONE_VALUE_TOKEN: if imm8[3] then set #IE + SNAN_TOKEN: if imm8[4] then set #IE + NEG_INF_TOKEN: if imm8[5] then set #IE + NEG_VALUE_TOKEN: if imm8[6] then set #IE + POS_INF_TOKEN: if imm8[7] then set #IE + ESAC + RETURN dest[31:0] +} + +IF k[0] + dst[31:0] := FIXUPIMMPD(a[31:0], b[31:0], c[31:0], imm8[7:0]) +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst". + + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst". + [round_note] + + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := c[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := c[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst". + + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst". + [round_note] + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := c[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := c[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "a" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + a[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + [round_note] + +dst[63:0] := (a[63:0] * b[63:0]) + c[63:0] +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + [round_note] + +IF k[0] + dst[63:0] := (a[63:0] * b[63:0]) + c[63:0] +ELSE + dst[63:0] := c[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + + +IF k[0] + dst[63:0] := (a[63:0] * b[63:0]) + c[63:0] +ELSE + dst[63:0] := c[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + [round_note] + +IF k[0] + dst[63:0] := (a[63:0] * b[63:0]) + c[63:0] +ELSE + dst[63:0] := a[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + + +IF k[0] + dst[63:0] := (a[63:0] * b[63:0]) + c[63:0] +ELSE + dst[63:0] := a[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + [round_note] + +IF k[0] + dst[63:0] := (a[63:0] * b[63:0]) + c[63:0] +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + + +IF k[0] + dst[63:0] := (a[63:0] * b[63:0]) + c[63:0] +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst[31:0] := (a[31:0] * b[31:0]) + c[31:0] +ELSE + dst[31:0] := c[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + + +IF k[0] + dst[31:0] := (a[31:0] * b[31:0]) + c[31:0] +ELSE + dst[31:0] := c[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +dst[31:0] := (a[31:0] * b[31:0]) + c[31:0] +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst[31:0] := (a[31:0] * b[31:0]) + c[31:0] +ELSE + dst[31:0] := a[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + + +IF k[0] + dst[31:0] := (a[31:0] * b[31:0]) + c[31:0] +ELSE + dst[31:0] := a[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst[31:0] := (a[31:0] * b[31:0]) + c[31:0] +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + + +IF k[0] + dst[31:0] := (a[31:0] * b[31:0]) + c[31:0] +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst". + + +FOR j := 0 to 7 + i := j*64 + IF (j is even) + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst". + [round_note] + +FOR j := 0 to 7 + i := j*64 + IF (j is even) + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF (j is even) + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + FI + ELSE + dst[i+63:i] := c[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF (j is even) + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + FI + ELSE + dst[i+63:i] := c[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF (j is even) + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + FI + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF (j is even) + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + FI + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF (j is even) + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF (j is even) + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst". + + +FOR j := 0 to 15 + i := j*32 + IF (j is even) + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst". + [round_note] + +FOR j := 0 to 15 + i := j*32 + IF (j is even) + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF (j is even) + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + FI + ELSE + dst[i+31:i] := c[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF (j is even) + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + FI + ELSE + dst[i+31:i] := c[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF (j is even) + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + FI + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF (j is even) + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + FI + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF (j is even) + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF (j is even) + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst". + [round_note] + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := c[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := c[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst". + + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst". + [round_note] + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := c[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := c[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + [round_note] + +dst[63:0] := (a[63:0] * b[63:0]) - c[63:0] +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + + Floating Point + AVX512F + Arithmetic + + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + [round_note] + +IF k[0] + dst[63:0] := (a[63:0] * b[63:0]) - c[63:0] +ELSE + dst[63:0] := c[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := (a[63:0] * b[63:0]) - c[63:0] +ELSE + dst[63:0] := c[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + [round_note] + +IF k[0] + dst[63:0] := (a[63:0] * b[63:0]) - c[63:0] +ELSE + dst[63:0] := a[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := (a[63:0] * b[63:0]) - c[63:0] +ELSE + dst[63:0] := a[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + [round_note] + +IF k[0] + dst[63:0] := (a[63:0] * b[63:0]) - c[63:0] +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := (a[63:0] * b[63:0]) - c[63:0] +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +dst[31:0] := (a[31:0] * b[31:0]) - c[31:0] +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst[31:0] := (a[31:0] * b[31:0]) - c[31:0] +ELSE + dst[31:0] := c[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + + +IF k[0] + dst[31:0] := (a[31:0] * b[31:0]) - c[31:0] +ELSE + dst[31:0] := c[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst[31:0] := (a[31:0] * b[31:0]) - c[31:0] +ELSE + dst[31:0] := a[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst[31:0] := (a[31:0] * b[31:0]) - c[31:0] +ELSE + dst[31:0] := a[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst[31:0] := (a[31:0] * b[31:0]) - c[31:0] +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst[31:0] := (a[31:0] * b[31:0]) - c[31:0] +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst". + + +FOR j := 0 to 7 + i := j*64 + IF (j is even) + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst". + [round_note] + +FOR j := 0 to 7 + i := j*64 + IF (j is even) + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF (j is even) + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + FI + ELSE + dst[i+63:i] := c[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF (j is even) + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + FI + ELSE + dst[i+63:i] := c[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF (j is even) + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + FI + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF (j is even) + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + FI + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF (j is even) + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF (j is even) + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst". + + +FOR j := 0 to 15 + i := j*32 + IF (j is even) + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst". + [round_note] + +FOR j := 0 to 15 + i := j*32 + IF (j is even) + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF (j is even) + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + FI + ELSE + dst[i+31:i] := c[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF (j is even) + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + FI + ELSE + dst[i+31:i] := c[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF (j is even) + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + FI + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF (j is even) + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + FI + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF (j is even) + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF (j is even) + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst". + + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i] +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst". + [round_note] + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i] +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := c[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := c[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst". + + +FOR j := 0 to 15 + i := j*32 + a[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst". + [round_note] + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := c[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := c[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + [round_note] + +dst[63:0] := -(a[63:0] * b[63:0]) + c[63:0] +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + [round_note] + +IF k[0] + dst[63:0] := -(a[63:0] * b[63:0]) + c[63:0] +ELSE + dst[63:0] := c[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := -(a[63:0] * b[63:0]) + c[63:0] +ELSE + dst[63:0] := c[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + [round_note] + +IF k[0] + dst[63:0] := -(a[63:0] * b[63:0]) + c[63:0] +ELSE + dst[63:0] := a[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := -(a[63:0] * b[63:0]) + c[63:0] +ELSE + dst[63:0] := a[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + [round_note] + +IF k[0] + dst[63:0] := -(a[63:0] * b[63:0]) + c[63:0] +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := -(a[63:0] * b[63:0]) + c[63:0] +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +dst[31:0] := -(a[31:0] * b[31:0]) + c[31:0] +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst[31:0] := -(a[31:0] * b[31:0]) + c[31:0] +ELSE + dst[31:0] := c[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst[31:0] := -(a[31:0] * b[31:0]) + c[31:0] +ELSE + dst[31:0] := c[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst[31:0] := -(a[31:0] * b[31:0]) + c[31:0] +ELSE + dst[31:0] := a[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst[31:0] := -(a[31:0] * b[31:0]) + c[31:0] +ELSE + dst[31:0] := a[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst[31:0] := -(a[31:0] * b[31:0]) + c[31:0] +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst[31:0] := -(a[31:0] * b[31:0]) + c[31:0] +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i] +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst". + [round_note] + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i] +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := c[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := c[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst". + [round_note] + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := c[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := c[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the negated intermediate result. Store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + [round_note] + +dst[63:0] := -(a[63:0] * b[63:0]) - c[63:0] +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the negated intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + [round_note] + +IF k[0] + dst[63:0] := -(a[63:0] * b[63:0]) - c[63:0] +ELSE + dst[63:0] := c[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the negated intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := -(a[63:0] * b[63:0]) - c[63:0] +ELSE + dst[63:0] := c[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the negated intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + [round_note] + +IF k[0] + dst[63:0] := -(a[63:0] * b[63:0]) - c[63:0] +ELSE + dst[63:0] := a[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the negated intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := -(a[63:0] * b[63:0]) - c[63:0] +ELSE + dst[63:0] := a[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the negated intermediate result. Store the result in "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + [round_note] + +IF k[0] + dst[63:0] := -(a[63:0] * b[63:0]) - c[63:0] +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the negated intermediate result. Store the result in "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := -(a[63:0] * b[63:0]) - c[63:0] +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", subtract the lower element in "c" from the negated intermediate result, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + [round_note] + +dst[31:0] := -(a[31:0] * b[31:0]) - c[31:0] +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", subtract the lower element in "c" from the negated intermediate result, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + [round_note] + +IF k[0] + dst[31:0] := -(a[31:0] * b[31:0]) - c[31:0] +ELSE + dst[31:0] := c[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the negated intermediate result. Store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +IF k[0] + dst[31:0] := -(a[31:0] * b[31:0]) - c[31:0] +ELSE + dst[31:0] := c[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the negated intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst[31:0] := -(a[31:0] * b[31:0]) - c[31:0] +ELSE + dst[31:0] := a[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the negated intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst[31:0] := -(a[31:0] * b[31:0]) - c[31:0] +ELSE + dst[31:0] := a[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the negated intermediate result. Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst[31:0] := -(a[31:0] * b[31:0]) - c[31:0] +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the negated intermediate result. Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst[31:0] := -(a[31:0] * b[31:0]) - c[31:0] +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Load + + + + + Gather double-precision (64-bit) floating-point elements from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 7 + i := j*64 + m := j*32 + dst[i+63:i] := MEM[base_addr + SignExtend(vindex[m+31:m])*scale] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Load + + + + + + + Gather double-precision (64-bit) floating-point elements from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 7 + i := j*64 + m := j*32 + IF k[j] + dst[i+63:i] := MEM[base_addr + SignExtend(vindex[m+31:m])*scale] + k[j] := 0 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +k[MAX:8] := 0 +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Load + + + + Gather single-precision (32-bit) floating-point elements from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := MEM[base_addr + SignExtend(vindex[i+31:i])*scale] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Load + + + + + + + Gather single-precision (32-bit) floating-point elements from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[base_addr + SignExtend(vindex[i+31:i])*scale] + k[j] := 0 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +k[MAX:16] := 0 +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Load + + + + + Gather double-precision (64-bit) floating-point elements from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := MEM[base_addr + SignExtend(vindex[i+63:i])*scale] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Load + + + + + + + Gather double-precision (64-bit) floating-point elements from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[base_addr + SignExtend(vindex[i+63:i])*scale] + k[j] := 0 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +k[MAX:8] := 0 +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Load + + + + + Gather single-precision (32-bit) floating-point elements from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 7 + i := j*32 + m := j*64 + dst[i+31:i] := MEM[base_addr + SignExtend(vindex[i+63:i])*scale] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Load + + + + + + + Gather single-precision (32-bit) floating-point elements from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 7 + i := j*32 + m := j*64 + IF k[j] + dst[i+31:i] := MEM[base_addr + SignExtend(vindex[i+63:i])*scale] + k[j] := 0 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +k[MAX:8] := 0 +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Miscellaneous + + Convert the exponent of each packed double-precision (64-bit) floating-point element in "a" to a double-precision (64-bit) floating-point number representing the integer exponent, and store the results in "dst". This intrinsic essentially calculates "floor(log2(x))" for each element. + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := ConvertExpFP64(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Miscellaneous + + + Convert the exponent of each packed double-precision (64-bit) floating-point element in "a" to a double-precision (64-bit) floating-point number representing the integer exponent, and store the results in "dst". This intrinsic essentially calculates "floor(log2(x))" for each element. + [round_note] + + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := ConvertExpFP64(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Miscellaneous + + + + Convert the exponent of each packed double-precision (64-bit) floating-point element in "a" to a double-precision (64-bit) floating-point number representing the integer exponent, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element. + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ConvertExpFP64(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Miscellaneous + + + + + Convert the exponent of each packed double-precision (64-bit) floating-point element in "a" to a double-precision (64-bit) floating-point number representing the integer exponent, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element. + [round_note] + + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ConvertExpFP64(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + Convert the exponent of each packed double-precision (64-bit) floating-point element in "a" to a double-precision (64-bit) floating-point number representing the integer exponent, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element. + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ConvertExpFP64(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + Convert the exponent of each packed double-precision (64-bit) floating-point element in "a" to a double-precision (64-bit) floating-point number representing the integer exponent, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element. + [round_note] + + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ConvertExpFP64(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Miscellaneous + + Convert the exponent of each packed single-precision (32-bit) floating-point element in "a" to a single-precision (32-bit) floating-point number representing the integer exponent, and store the results in "dst". This intrinsic essentially calculates "floor(log2(x))" for each element. + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := ConvertExpFP32(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Miscellaneous + + + Convert the exponent of each packed single-precision (32-bit) floating-point element in "a" to a single-precision (32-bit) floating-point number representing the integer exponent, and store the results in "dst". This intrinsic essentially calculates "floor(log2(x))" for each element. + [round_note] + + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := ConvertExpFP32(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Miscellaneous + + + + Convert the exponent of each packed single-precision (32-bit) floating-point element in "a" to a single-precision (32-bit) floating-point number representing the integer exponent, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element. + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ConvertExpFP32(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Miscellaneous + + + + + Convert the exponent of each packed single-precision (32-bit) floating-point element in "a" to a single-precision (32-bit) floating-point number representing the integer exponent, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element. + [round_note] + + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ConvertExpFP32(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + Convert the exponent of each packed single-precision (32-bit) floating-point element in "a" to a single-precision (32-bit) floating-point number representing the integer exponent, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element. + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ConvertExpFP32(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + Convert the exponent of each packed single-precision (32-bit) floating-point element in "a" to a single-precision (32-bit) floating-point number representing the integer exponent, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element. + [round_note] + + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ConvertExpFP32(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + Convert the exponent of the lower double-precision (64-bit) floating-point element in "b" to a double-precision (64-bit) floating-point number representing the integer exponent, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". This intrinsic essentially calculates "floor(log2(x))" for the lower element. + [round_note] + + +dst[63:0] := ConvertExpFP64(b[63:0]) +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + Convert the exponent of the lower double-precision (64-bit) floating-point element in "b" to a double-precision (64-bit) floating-point number representing the integer exponent, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". This intrinsic essentially calculates "floor(log2(x))" for the lower element. + +dst[63:0] := ConvertExpFP64(b[63:0]) +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + + Convert the exponent of the lower double-precision (64-bit) floating-point element in "b" to a double-precision (64-bit) floating-point number representing the integer exponent, store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". This intrinsic essentially calculates "floor(log2(x))" for the lower element. + [round_note] + + +IF k[0] + dst[63:0] := ConvertExpFP64(b[63:0]) +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + Convert the exponent of the lower double-precision (64-bit) floating-point element in "b" to a double-precision (64-bit) floating-point number representing the integer exponent, store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". This intrinsic essentially calculates "floor(log2(x))" for the lower element. + +IF k[0] + dst[63:0] := ConvertExpFP64(b[63:0]) +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + Convert the exponent of the lower double-precision (64-bit) floating-point element in "b" to a double-precision (64-bit) floating-point number representing the integer exponent, store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". This intrinsic essentially calculates "floor(log2(x))" for the lower element. + [round_note] + + +IF k[0] + dst[63:0] := ConvertExpFP64(b[63:0]) +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + Convert the exponent of the lower double-precision (64-bit) floating-point element in "b" to a double-precision (64-bit) floating-point number representing the integer exponent, store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". This intrinsic essentially calculates "floor(log2(x))" for the lower element. + +IF k[0] + dst[63:0] := ConvertExpFP64(b[63:0]) +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + Convert the exponent of the lower single-precision (32-bit) floating-point element in "b" to a single-precision (32-bit) floating-point number representing the integer exponent, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". This intrinsic essentially calculates "floor(log2(x))" for the lower element. + [round_note] + + +dst[31:0] := ConvertExpFP32(b[31:0]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + Convert the exponent of the lower single-precision (32-bit) floating-point element in "b" to a single-precision (32-bit) floating-point number representing the integer exponent, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". This intrinsic essentially calculates "floor(log2(x))" for the lower element. + +dst[31:0] := ConvertExpFP32(b[31:0]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + + Convert the exponent of the lower single-precision (32-bit) floating-point element in "b" to a single-precision (32-bit) floating-point number representing the integer exponent, store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". This intrinsic essentially calculates "floor(log2(x))" for the lower element. + [round_note] + + +IF k[0] + dst[31:0] := ConvertExpFP32(b[31:0]) +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + Convert the exponent of the lower single-precision (32-bit) floating-point element in "b" to a single-precision (32-bit) floating-point number representing the integer exponent, store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". This intrinsic essentially calculates "floor(log2(x))" for the lower element. + +IF k[0] + dst[31:0] := ConvertExpFP32(b[31:0]) +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + Convert the exponent of the lower single-precision (32-bit) floating-point element in "b" to a single-precision (32-bit) floating-point number representing the integer exponent, store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". This intrinsic essentially calculates "floor(log2(x))" for the lower element. + [round_note] + + +IF k[0] + dst[31:0] := ConvertExpFP32(b[31:0]) +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + Convert the exponent of the lower single-precision (32-bit) floating-point element in "b" to a single-precision (32-bit) floating-point number representing the integer exponent, store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". This intrinsic essentially calculates "floor(log2(x))" for the lower element. + +IF k[0] + dst[31:0] := ConvertExpFP32(b[31:0]) +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Miscellaneous + + + + Normalize the mantissas of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note] + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := GetNormalizedMantissa(a[i+63:i], sc, interv) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Miscellaneous + + + + + Normalize the mantissas of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note][round_note] + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := GetNormalizedMantissa(a[i+63:i], sc, interv) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Miscellaneous + + + + + + Normalize the mantissas of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := GetNormalizedMantissa(a[i+63:i], sc, interv) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Miscellaneous + + + + + + + Normalize the mantissas of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note][round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := GetNormalizedMantissa(a[i+63:i], sc, interv) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + Normalize the mantissas of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := GetNormalizedMantissa(a[i+63:i], sc, interv) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + + Normalize the mantissas of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note][round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := GetNormalizedMantissa(a[i+63:i], sc, interv) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Miscellaneous + + + + Normalize the mantissas of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note] + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := GetNormalizedMantissa(a[i+31:i], sc, interv) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Miscellaneous + + + + + Normalize the mantissas of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note][round_note] + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := GetNormalizedMantissa(a[i+31:i], sc, interv) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Miscellaneous + + + + + + Normalize the mantissas of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := GetNormalizedMantissa(a[i+31:i], sc, interv) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Miscellaneous + + + + + + + Normalize the mantissas of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note][round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := GetNormalizedMantissa(a[i+31:i], sc, interv) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + Normalize the mantissas of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := GetNormalizedMantissa(a[i+31:i], sc, interv) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + + Normalize the mantissas of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note][round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := GetNormalizedMantissa(a[i+31:i], sc, interv) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + + Normalize the mantissas of the lower double-precision (64-bit) floating-point element in "a", store the result in the lower element of "dst", and copy the upper element from "b" to the upper element of "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note][round_note] + +dst[63:0] := GetNormalizedMantissa(a[63:0], sc, interv) +dst[127:64] := b[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + Normalize the mantissas of the lower double-precision (64-bit) floating-point element in "a", store the result in the lower element of "dst", and copy the upper element from "b" to the upper element of "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note] + +dst[63:0] := GetNormalizedMantissa(a[63:0], sc, interv) +dst[127:64] := b[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + + + + Normalize the mantissas of the lower double-precision (64-bit) floating-point element in "a", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "b" to the upper element of "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note][round_note] + +IF k[0] + dst[63:0] := GetNormalizedMantissa(a[63:0], sc, interv) +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := b[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + + + Normalize the mantissas of the lower double-precision (64-bit) floating-point element in "a", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "b" to the upper element of "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note] + +IF k[0] + dst[63:0] := GetNormalizedMantissa(a[63:0], sc, interv) +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := b[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + + + Normalize the mantissas of the lower double-precision (64-bit) floating-point element in "a", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "b" to the upper element of "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note][round_note] + +IF k[0] + dst[63:0] := GetNormalizedMantissa(a[63:0], sc, interv) +ELSE + dst[63:0] := 0 +FI +dst[127:64] := b[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + + Normalize the mantissas of the lower double-precision (64-bit) floating-point element in "a", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "b" to the upper element of "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note] + +IF k[0] + dst[63:0] := GetNormalizedMantissa(a[63:0], sc, interv) +ELSE + dst[63:0] := 0 +FI +dst[127:64] := b[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + + Normalize the mantissas of the lower single-precision (32-bit) floating-point element in "a", store the result in the lower element of "dst", and copy the upper 3 packed elements from "b" to the upper elements of "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note][round_note] + +dst[31:0] := GetNormalizedMantissa(a[31:0], sc, interv) +dst[127:32] := b[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + Normalize the mantissas of the lower single-precision (32-bit) floating-point element in "a", store the result in the lower element of "dst", and copy the upper 3 packed elements from "b" to the upper elements of "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note] + +dst[31:0] := GetNormalizedMantissa(a[31:0], sc, interv) +dst[127:32] := b[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + + + + Normalize the mantissas of the lower single-precision (32-bit) floating-point element in "a", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "b" to the upper elements of "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note][round_note] + +IF k[0] + dst[31:0] := GetNormalizedMantissa(a[31:0], sc, interv) +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := b[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + + + Normalize the mantissas of the lower single-precision (32-bit) floating-point element in "a", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "b" to the upper elements of "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note] + +IF k[0] + dst[31:0] := GetNormalizedMantissa(a[31:0], sc, interv) +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := b[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + + + Normalize the mantissas of the lower single-precision (32-bit) floating-point element in "a", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "b" to the upper elements of "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note][round_note] + +IF k[0] + dst[31:0] := GetNormalizedMantissa(a[31:0], sc, interv) +ELSE + dst[31:0] := 0 +FI +dst[127:32] := b[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + + Normalize the mantissas of the lower single-precision (32-bit) floating-point element in "a", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "b" to the upper elements of "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note] + +IF k[0] + dst[31:0] := GetNormalizedMantissa(a[31:0], sc, interv) +ELSE + dst[31:0] := 0 +FI +dst[127:32] := b[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + Copy "a" to "dst", then insert 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "b" into "dst" at the location specified by "imm8". + +dst[511:0] := a[511:0] +CASE (imm8[1:0]) of +0: dst[127:0] := b[127:0] +1: dst[255:128] := b[127:0] +2: dst[383:256] := b[127:0] +3: dst[511:384] := b[127:0] +ESAC +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + + + Copy "a" to "tmp", then insert 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + + +tmp[511:0] := a[511:0] +CASE (imm8[1:0]) of +0: tmp[127:0] := b[127:0] +1: tmp[255:128] := b[127:0] +2: tmp[383:256] := b[127:0] +3: tmp[511:384] := b[127:0] +ESAC +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + + Copy "a" to "tmp", then insert 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +tmp[511:0] := a[511:0] +CASE (imm8[1:0]) of +0: tmp[127:0] := b[127:0] +1: tmp[255:128] := b[127:0] +2: tmp[383:256] := b[127:0] +3: tmp[511:384] := b[127:0] +ESAC +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + Copy "a" to "dst", then insert 256 bits (composed of 4 packed double-precision (64-bit) floating-point elements) from "b" into "dst" at the location specified by "imm8". + +dst[511:0] := a[511:0] +CASE (imm8[0]) of +0: dst[255:0] := b[255:0] +1: dst[511:256] := b[255:0] +ESAC +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + + + Copy "a" to "tmp", then insert 256 bits (composed of 4 packed double-precision (64-bit) floating-point elements) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + + +tmp[511:0] := a[511:0] +CASE (imm8[0]) of +0: tmp[255:0] := b[255:0] +1: tmp[511:256] := b[255:0] +ESAC +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + + Copy "a" to "tmp", then insert 256 bits (composed of 4 packed double-precision (64-bit) floating-point elements) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +tmp[511:0] := a[511:0] +CASE (imm8[0]) of +0: tmp[255:0] := b[255:0] +1: tmp[511:256] := b[255:0] +ESAC +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + + Copy "a" to "dst", then insert 128 bits (composed of 4 packed 32-bit integers) from "b" into "dst" at the location specified by "imm8". + +dst[511:0] := a[511:0] +CASE (imm8[1:0]) of +0: dst[127:0] := b[127:0] +1: dst[255:128] := b[127:0] +2: dst[383:256] := b[127:0] +3: dst[511:384] := b[127:0] +ESAC +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + + + + Copy "a" to "tmp", then insert 128 bits (composed of 4 packed 32-bit integers) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + + +tmp[511:0] := a[511:0] +CASE (imm8[1:0]) of +0: tmp[127:0] := b[127:0] +1: tmp[255:128] := b[127:0] +2: tmp[383:256] := b[127:0] +3: tmp[511:384] := b[127:0] +ESAC +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + + + Copy "a" to "tmp", then insert 128 bits (composed of 4 packed 32-bit integers) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +tmp[511:0] := a[511:0] +CASE (imm8[1:0]) of +0: tmp[127:0] := b[127:0] +1: tmp[255:128] := b[127:0] +2: tmp[383:256] := b[127:0] +3: tmp[511:384] := b[127:0] +ESAC +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + + Copy "a" to "dst", then insert 256 bits (composed of 4 packed 64-bit integers) from "b" into "dst" at the location specified by "imm8". + +dst[511:0] := a[511:0] +CASE (imm8[7:0]) OF +0: dst[255:0] := b[255:0] +1: dst[511:256] := b[255:0] +ESAC +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + + + + Copy "a" to "tmp", then insert 256 bits (composed of 4 packed 64-bit integers) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + + +tmp[511:0] := a[511:0] +CASE (imm8[0]) of +0: tmp[255:0] := b[255:0] +1: tmp[511:256] := b[255:0] +ESAC +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + + + Copy "a" to "tmp", then insert 256 bits (composed of 4 packed 64-bit integers) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +tmp[511:0] := a[511:0] +CASE (imm8[0]) of +0: tmp[255:0] := b[255:0] +1: tmp[511:256] := b[255:0] +ESAC +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst". + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst". + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + + + Compare the lower double-precision (64-bit) floating-point elements in "a" and "b", store the maximum value in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +IF k[0] + dst[63:0] := MAX(a[63:0], b[63:0]) +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + + Compare the lower double-precision (64-bit) floating-point elements in "a" and "b", store the maximum value in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := MAX(a[63:0], b[63:0]) +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + + Compare the lower double-precision (64-bit) floating-point elements in "a" and "b", store the maximum value in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +IF k[0] + dst[63:0] := MAX(a[63:0], b[63:0]) +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + Compare the lower double-precision (64-bit) floating-point elements in "a" and "b", store the maximum value in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := MAX(a[63:0], b[63:0]) +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + Compare the lower double-precision (64-bit) floating-point elements in "a" and "b", store the maximum value in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +dst[63:0] := MAX(a[63:0], b[63:0]) +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + + + Compare the lower single-precision (32-bit) floating-point elements in "a" and "b", store the maximum value in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +IF k[0] + dst[31:0] := MAX(a[31:0], b[31:0]) +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + + Compare the lower single-precision (32-bit) floating-point elements in "a" and "b", store the maximum value in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[31:0] := MAX(a[31:0], b[31:0]) +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + + Compare the lower single-precision (32-bit) floating-point elements in "a" and "b", store the maximum value in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +IF k[0] + dst[31:0] := MAX(a[31:0], b[31:0]) +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + Compare the lower single-precision (32-bit) floating-point elements in "a" and "b", store the maximum value in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[31:0] := MAX(a[31:0], b[31:0]) +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + Compare the lower single-precision (32-bit) floating-point elements in "a" and "b", store the maximum value in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +dst[31:0] := MAX(a[31:0], b[31:0]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst". + + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst". + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst". + + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst". + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + + + Compare the lower double-precision (64-bit) floating-point elements in "a" and "b", store the minimum value in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +IF k[0] + dst[63:0] := MIN(a[63:0], b[63:0]) +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + + Compare the lower double-precision (64-bit) floating-point elements in "a" and "b", store the minimum value in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := MIN(a[63:0], b[63:0]) +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + + Compare the lower double-precision (64-bit) floating-point elements in "a" and "b", store the minimum value in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +IF k[0] + dst[63:0] := MIN(a[63:0], b[63:0]) +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + Compare the lower double-precision (64-bit) floating-point elements in "a" and "b", store the minimum value in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := MIN(a[63:0], b[63:0]) +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + Compare the lower double-precision (64-bit) floating-point elements in "a" and "b", store the minimum value in the lower element of "dst" , and copy the upper element from "a" to the upper element of "dst". + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +dst[63:0] := MIN(a[63:0], b[63:0]) +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + + + Compare the lower single-precision (32-bit) floating-point elements in "a" and "b", store the minimum value in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +IF k[0] + dst[31:0] := MIN(a[31:0], b[31:0]) +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + + Compare the lower single-precision (32-bit) floating-point elements in "a" and "b", store the minimum value in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[31:0] := MIN(a[31:0], b[31:0]) +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + + Compare the lower single-precision (32-bit) floating-point elements in "a" and "b", store the minimum value in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +IF k[0] + dst[31:0] := MIN(a[31:0], b[31:0]) +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + Compare the lower single-precision (32-bit) floating-point elements in "a" and "b", store the minimum value in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[31:0] := MIN(a[31:0], b[31:0]) +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + Compare the lower single-precision (32-bit) floating-point elements in "a" and "b", store the minimum value in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +dst[31:0] := MIN(a[31:0], b[31:0]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Load + + Load 512-bits (composed of 8 packed double-precision (64-bit) floating-point elements) from memory into "dst". + "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +dst[511:0] := MEM[mem_addr+511:mem_addr] +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Load + + + + Load packed double-precision (64-bit) floating-point elements from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Move + + + + Move packed double-precision (64-bit) floating-point elements from "a" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Store + + + + Store packed double-precision (64-bit) floating-point elements from "a" into memory using writemask "k". + "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 7 + i := j*64 + IF k[j] + MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i] + FI +ENDFOR + + +
immintrin.h
+
+ + Floating Point + AVX512F + Load + + + Load packed double-precision (64-bit) floating-point elements from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Move + + + Move packed double-precision (64-bit) floating-point elements from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Store + + + Store 512-bits (composed of 8 packed double-precision (64-bit) floating-point elements) from "a" into memory. + "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+511:mem_addr] := a[511:0] + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Load + + Load 512-bits (composed of 16 packed single-precision (32-bit) floating-point elements) from memory into "dst". + "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +dst[511:0] := MEM[mem_addr+511:mem_addr] +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Load + + + + Load packed single-precision (32-bit) floating-point elements from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Move + + + + Move packed single-precision (32-bit) floating-point elements from "a" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Store + + + + Store packed single-precision (32-bit) floating-point elements from "a" into memory using writemask "k". + "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 15 + i := j*32 + IF k[j] + MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i] + FI +ENDFOR + + +
immintrin.h
+
+ + Floating Point + AVX512F + Load + + + Load packed single-precision (32-bit) floating-point elements from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Move + + + Move packed single-precision (32-bit) floating-point elements from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Store + + + Store 512-bits (composed of 16 packed single-precision (32-bit) floating-point elements) from "a" into memory. + "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+511:mem_addr] := a[511:0] + + +
immintrin.h
+
+ + Floating Point + AVX512F + Move + + + + Duplicate even-indexed double-precision (64-bit) floating-point elements from "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp[63:0] := a[63:0] +tmp[127:64] := a[63:0] +tmp[191:128] := a[191:128] +tmp[255:192] := a[191:128] +tmp[319:256] := a[319:256] +tmp[383:320] := a[319:256] +tmp[447:384] := a[447:384] +tmp[511:448] := a[447:384] +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Move + + + Duplicate even-indexed double-precision (64-bit) floating-point elements from "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp[63:0] := a[63:0] +tmp[127:64] := a[63:0] +tmp[191:128] := a[191:128] +tmp[255:192] := a[191:128] +tmp[319:256] := a[319:256] +tmp[383:320] := a[319:256] +tmp[447:384] := a[447:384] +tmp[511:448] := a[447:384] +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Move + + Duplicate even-indexed double-precision (64-bit) floating-point elements from "a", and store the results in "dst". + + +tmp[63:0] := a[63:0] +tmp[127:64] := a[63:0] +tmp[191:128] := a[191:128] +tmp[255:192] := a[191:128] +tmp[319:256] := a[319:256] +tmp[383:320] := a[319:256] +tmp[447:384] := a[447:384] +tmp[511:448] := a[447:384] +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Load + + Load 512-bits (composed of 16 packed 32-bit integers) from memory into "dst". + "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +dst[511:0] := MEM[mem_addr+511:mem_addr] +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Load + + Load 512-bits of integer data from memory into "dst". + "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +dst[511:0] := MEM[mem_addr+511:mem_addr] +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Load + + + + Load packed 32-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Move + + + + Move packed 32-bit integers from "a" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Store + + + + Store packed 32-bit integers from "a" into memory using writemask "k". + "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + + +FOR j := 0 to 15 + i := j*32 + IF k[j] + MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i] + FI +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512F + Load + + + Load packed 32-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Move + + + Move packed 32-bit integers from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Store + + + Store 512-bits (composed of 16 packed 32-bit integers) from "a" into memory. + "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+511:mem_addr] := a[511:0] + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Store + + + Store 512-bits of integer data from "a" into memory. + "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+511:mem_addr] := a[511:0] + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Load + + Load 512-bits (composed of 8 packed 64-bit integers) from memory into "dst". + "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +dst[511:0] := MEM[mem_addr+511:mem_addr] +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Load + + + + Load packed 64-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Move + + + + Move packed 64-bit integers from "a" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Store + + + + Store packed 64-bit integers from "a" into memory using writemask "k". + "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + + +FOR j := 0 to 7 + i := j*64 + IF k[j] + MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i] + FI +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512F + Load + + + Load packed 64-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Move + + + Move packed 64-bit integers from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Store + + + Store 512-bits (composed of 8 packed 64-bit integers) from "a" into memory. + "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+511:mem_addr] := a[511:0] + + +
immintrin.h
+
+ + Integer + AVX512F + Load + + Load 512-bits of integer data from memory into "dst". + "mem_addr" does not need to be aligned on any particular boundary. + + +dst[511:0] := MEM[mem_addr+511:mem_addr] +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Load + + + + Load packed 32-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Store + + + + Store packed 32-bit integers from "a" into memory using writemask "k". + "mem_addr" does not need to be aligned on any particular boundary. + + +FOR j := 0 to 15 + i := j*32 + IF k[j] + MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i] + FI +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512F + Load + + + Load packed 32-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Store + + + Store 512-bits of integer data from "a" into memory. + "mem_addr" does not need to be aligned on any particular boundary. + + +MEM[mem_addr+511:mem_addr] := a[511:0] + + +
immintrin.h
+
+ + Integer + AVX512F + Load + + + + Load packed 64-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Store + + + + Store packed 64-bit integers from "a" into memory using writemask "k". + "mem_addr" does not need to be aligned on any particular boundary. + + +FOR j := 0 to 7 + i := j*64 + IF k[j] + MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i] + FI +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512F + Load + + + Load packed 64-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Load + + Load 512-bits of integer data from memory into "dst" using a non-temporal memory hint. + "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +dst[511:0] := MEM[mem_addr+511:mem_addr] +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Store + + + Store 512-bits of integer data from "a" into memory using a non-temporal memory hint. + "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+511:mem_addr] := a[511:0] + + +
immintrin.h
+
+ + Floating Point + AVX512F + Store + + + Store 512-bits (composed of 8 packed double-precision (64-bit) floating-point elements) from "a" into memory using a non-temporal memory hint. + "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+511:mem_addr] := a[511:0] + + +
immintrin.h
+
+ + Floating Point + AVX512F + Store + + + Store 512-bits (composed of 16 packed single-precision (32-bit) floating-point elements) from "a" into memory using a non-temporal memory hint. + "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+511:mem_addr] := a[511:0] + + +
immintrin.h
+
+ + Floating Point + AVX512F + Load + + + + Load a double-precision (64-bit) floating-point element from memory into the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and set the upper element of "dst" to zero. "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +IF k[0] + dst[63:0] := MEM[mem_addr+63:mem_addr] +ELSE + dst[63:0] := src[63:0] +FI +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Move + + + + + Move the lower double-precision (64-bit) floating-point element from "b" to the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := b[63:0] +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Store + + + + Store the lower double-precision (64-bit) floating-point element from "a" into memory using writemask "k". + "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +IF k[0] + MEM[mem_addr+63:mem_addr] := a[63:0] +FI + + +
immintrin.h
+
+ + Floating Point + AVX512F + Load + + + Load a double-precision (64-bit) floating-point element from memory into the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and set the upper element of "dst" to zero. "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +IF k[0] + dst[63:0] := MEM[mem_addr+63:mem_addr] +ELSE + dst[63:0] := 0 +FI +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Move + + + + Move the lower double-precision (64-bit) floating-point element from "b" to the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := b[63:0] +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Move + + + + Duplicate odd-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp[31:0] := a[63:32] +tmp[63:32] := a[63:32] +tmp[95:64] := a[127:96] +tmp[127:96] := a[127:96] +tmp[159:128] := a[191:160] +tmp[191:160] := a[191:160] +tmp[223:192] := a[255:224] +tmp[255:224] := a[255:224] +tmp[287:256] := a[319:288] +tmp[319:288] := a[319:288] +tmp[351:320] := a[383:352] +tmp[383:352] := a[383:352] +tmp[415:384] := a[447:416] +tmp[447:416] := a[447:416] +tmp[479:448] := a[511:480] +tmp[511:480] := a[511:480] +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Move + + + Duplicate odd-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp[31:0] := a[63:32] +tmp[63:32] := a[63:32] +tmp[95:64] := a[127:96] +tmp[127:96] := a[127:96] +tmp[159:128] := a[191:160] +tmp[191:160] := a[191:160] +tmp[223:192] := a[255:224] +tmp[255:224] := a[255:224] +tmp[287:256] := a[319:288] +tmp[319:288] := a[319:288] +tmp[351:320] := a[383:352] +tmp[383:352] := a[383:352] +tmp[415:384] := a[447:416] +tmp[447:416] := a[447:416] +tmp[479:448] := a[511:480] +tmp[511:480] := a[511:480] +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Move + + Duplicate odd-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst". + + +dst[31:0] := a[63:32] +dst[63:32] := a[63:32] +dst[95:64] := a[127:96] +dst[127:96] := a[127:96] +dst[159:128] := a[191:160] +dst[191:160] := a[191:160] +dst[223:192] := a[255:224] +dst[255:224] := a[255:224] +dst[287:256] := a[319:288] +dst[319:288] := a[319:288] +dst[351:320] := a[383:352] +dst[383:352] := a[383:352] +dst[415:384] := a[447:416] +dst[447:416] := a[447:416] +dst[479:448] := a[511:480] +dst[511:480] := a[511:480] +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Move + + + + Duplicate even-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp[31:0] := a[31:0] +tmp[63:32] := a[31:0] +tmp[95:64] := a[95:64] +tmp[127:96] := a[95:64] +tmp[159:128] := a[159:128] +tmp[191:160] := a[159:128] +tmp[223:192] := a[223:192] +tmp[255:224] := a[223:192] +tmp[287:256] := a[287:256] +tmp[319:288] := a[287:256] +tmp[351:320] := a[351:320] +tmp[383:352] := a[351:320] +tmp[415:384] := a[415:384] +tmp[447:416] := a[415:384] +tmp[479:448] := a[479:448] +tmp[511:480] := a[479:448] +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Move + + + Duplicate even-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp[31:0] := a[31:0] +tmp[63:32] := a[31:0] +tmp[95:64] := a[95:64] +tmp[127:96] := a[95:64] +tmp[159:128] := a[159:128] +tmp[191:160] := a[159:128] +tmp[223:192] := a[223:192] +tmp[255:224] := a[223:192] +tmp[287:256] := a[287:256] +tmp[319:288] := a[287:256] +tmp[351:320] := a[351:320] +tmp[383:352] := a[351:320] +tmp[415:384] := a[415:384] +tmp[447:416] := a[415:384] +tmp[479:448] := a[479:448] +tmp[511:480] := a[479:448] +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Move + + Duplicate even-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst". + + +dst[31:0] := a[31:0] +dst[63:32] := a[31:0] +dst[95:64] := a[95:64] +dst[127:96] := a[95:64] +dst[159:128] := a[159:128] +dst[191:160] := a[159:128] +dst[223:192] := a[223:192] +dst[255:224] := a[223:192] +dst[287:256] := a[287:256] +dst[319:288] := a[287:256] +dst[351:320] := a[351:320] +dst[383:352] := a[351:320] +dst[415:384] := a[415:384] +dst[447:416] := a[415:384] +dst[479:448] := a[479:448] +dst[511:480] := a[479:448] +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Load + + + + Load a single-precision (32-bit) floating-point element from memory into the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and set the upper elements of "dst" to zero. "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +IF k[0] + dst[31:0] := MEM[mem_addr+31:mem_addr] +ELSE + dst[31:0] := src[31:0] +FI +dst[MAX:32] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Move + + + + + Move the lower single-precision (32-bit) floating-point element from "b" to the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst[31:0] := b[31:0] +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Store + + + + Store the lower single-precision (32-bit) floating-point element from "a" into memory using writemask "k". + "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +IF k[0] + MEM[mem_addr+31:mem_addr] := a[31:0] +FI + + +
immintrin.h
+
+ + Floating Point + AVX512F + Load + + + Load a single-precision (32-bit) floating-point element from memory into the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and set the upper elements of "dst" to zero. "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +IF k[0] + dst[31:0] := MEM[mem_addr+31:mem_addr] +ELSE + dst[31:0] := 0 +FI +dst[MAX:32] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Move + + + + Move the lower single-precision (32-bit) floating-point element from "b" to the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst[31:0] := b[31:0] +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Load + + Load 512-bits (composed of 8 packed double-precision (64-bit) floating-point elements) from memory into "dst". + "mem_addr" does not need to be aligned on any particular boundary. + +dst[511:0] := MEM[mem_addr+511:mem_addr] +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Load + + + + Load packed double-precision (64-bit) floating-point elements from memoy into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Store + + + + Store packed double-precision (64-bit) floating-point elements from "a" into memory using writemask "k". + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 7 + i := j*64 + IF k[j] + MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i] + FI +ENDFOR + + +
immintrin.h
+
+ + Floating Point + AVX512F + Load + + + Load packed double-precision (64-bit) floating-point elements from memoy into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Store + + + Store 512-bits (composed of 8 packed double-precision (64-bit) floating-point elements) from "a" into memory. + "mem_addr" does not need to be aligned on any particular boundary. + +MEM[mem_addr+511:mem_addr] := a[511:0] + + +
immintrin.h
+
+ + Floating Point + AVX512F + Load + + Load 512-bits (composed of 16 packed single-precision (32-bit) floating-point elements) from memory into "dst". + "mem_addr" does not need to be aligned on any particular boundary. + +dst[511:0] := MEM[mem_addr+511:mem_addr] +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Load + + + + Load packed single-precision (32-bit) floating-point elements from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Store + + + + Store packed single-precision (32-bit) floating-point elements from "a" into memory using writemask "k". + "mem_addr" does not need to be aligned on any particular boundary. + + +FOR j := 0 to 15 + i := j*32 + IF k[j] + MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i] + FI +ENDFOR + + +
immintrin.h
+
+ + Floating Point + AVX512F + Load + + + Load packed single-precision (32-bit) floating-point elements from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Store + + + Store 512-bits (composed of 16 packed single-precision (32-bit) floating-point elements) from "a" into memory. + "mem_addr" does not need to be aligned on any particular boundary. + +MEM[mem_addr+511:mem_addr] := a[511:0] + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). RM. + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] * b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] * b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] * b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] * b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst". + + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := a[i+63:i] * b[i+63:i] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst". + [round_note] + + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := a[i+63:i] * b[i+63:i] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). RM. + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] * b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] * b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] * b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] * b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst". + + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := a[i+31:i] * b[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst". + [round_note] + + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := a[i+31:i] * b[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Multiply the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + [round_note] + + +IF k[0] + dst[63:0] := a[63:0] * b[63:0] +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := a[63:0] * b[63:0] +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + [round_note] + + +IF k[0] + dst[63:0] := a[63:0] * b[63:0] +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + Multiply the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := a[63:0] * b[63:0] +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + Multiply the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + [round_note] + + +dst[63:0] := a[63:0] * b[63:0] +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Multiply the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + + +IF k[0] + dst[31:0] := a[31:0] * b[31:0] +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst[31:0] := a[31:0] * b[31:0] +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Multiply the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + + +IF k[0] + dst[31:0] := a[31:0] * b[31:0] +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + Multiply the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst[31:0] := a[31:0] * b[31:0] +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + Multiply the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + + +dst[31:0] := a[31:0] * b[31:0] +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Special Math Functions + + Compute the absolute value of packed 32-bit integers in "a", and store the unsigned results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := ABS(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Special Math Functions + + + + Compute the absolute value of packed 32-bit integers in "a", and store the unsigned results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ABS(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Special Math Functions + + + Compute the absolute value of packed 32-bit integers in "a", and store the unsigned results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ABS(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Special Math Functions + + Compute the absolute value of packed 64-bit integers in "a", and store the unsigned results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := ABS(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Special Math Functions + + + + Compute the absolute value of packed 64-bit integers in "a", and store the unsigned results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ABS(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Special Math Functions + + + Compute the absolute value of packed 64-bit integers in "a", and store the unsigned results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ABS(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Arithmetic + + + Add packed 32-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := a[i+31:i] + b[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Arithmetic + + + + + Add packed 32-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Arithmetic + + + + Add packed 32-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Arithmetic + + + Add packed 64-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := a[i+63:i] + b[i+63:i] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Arithmetic + + + + + Add packed 64-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Arithmetic + + + + Add packed 64-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Logical + + + Compute the bitwise AND of packed 32-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := a[i+31:i] BITWISE AND b[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Logical + + + Compute the bitwise AND of 512 bits (representing integer data) in "a" and "b", and store the result in "dst". + +dst[511:0] := (a[511:0] AND b[511:0]) +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Logical + + + + Compute the bitwise AND of packed 32-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] AND b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Logical + + + Compute the bitwise NOT of packed 32-bit integers in "a" and then AND with "b", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := (NOT a[i+31:i]) AND b[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Logical + + + Compute the bitwise NOT of 512 bits (representing integer data) in "a" and then AND with "b", and store the result in "dst". + +dst[511:0] := ((NOT a[511:0]) AND b[511:0]) +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Logical + + + + + Compute the bitwise NOT of packed 32-bit integers in "a" and then AND with "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ((NOT a[i+31:i]) AND b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Logical + + + + Compute the bitwise NOT of packed 32-bit integers in "a" and then AND with "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := (NOT a[i+31:i]) AND b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Logical + + + Compute the bitwise NOT of 512 bits (composed of packed 64-bit integers) in "a" and then AND with "b", and store the results in "dst". + +dst[511:0] := ((NOT a[511:0]) AND b[511:0]) +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Logical + + + + + Compute the bitwise NOT of packed 64-bit integers in "a" and then AND with "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Logical + + + + Compute the bitwise NOT of packed 64-bit integers in "a" and then AND with "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := (NOT a[i+63:i]) AND b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Logical + + + Compute the bitwise AND of 512 bits (composed of packed 64-bit integers) in "a" and "b", and store the results in "dst". + +dst[511:0] := (a[511:0] AND b[511:0]) +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Logical + + + + + Compute the bitwise AND of packed 64-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] AND b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Logical + + + + Compute the bitwise AND of packed 64-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] AND b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Swizzle + + + + Blend packed 32-bit integers from "a" and "b" using control mask "k", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := b[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Swizzle + + + + Blend packed 64-bit integers from "a" and "b" using control mask "k", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := b[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Set + + Broadcast 8-bit integer "a" to all elements of "dst". + +FOR j := 0 to 63 + i := j*8 + dst[i+7:i] := a[7:0] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + Broadcast the low packed 32-bit integer from "a" to all elements of "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := a[31:0] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + + Broadcast the low packed 32-bit integer from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[31:0] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Set + + + + Broadcast 32-bit integer "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[31:0] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + Broadcast the low packed 32-bit integer from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[31:0] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Set + + + Broadcast 32-bit integer "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[31:0] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Set + + Broadcast 32-bit integer "a" to all elements of "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := a[31:0] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512CD + Swizzle + + Broadcast the low 8-bits from input mask "k" to all 64-bit elements of "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := ZeroExtend(k[7:0]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512CD + Swizzle + + Broadcast the low 16-bits from input mask "k" to all 32-bit elements of "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := ZeroExtend(k[15:0]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + Broadcast the low packed 64-bit integer from "a" to all elements of "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := a[63:0] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + + Broadcast the low packed 64-bit integer from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[63:0] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Set + + + + Broadcast 64-bit integer "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[63:0] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + Broadcast the low packed 64-bit integer from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[63:0] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Set + + + Broadcast 64-bit integer "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[63:0] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Set + + Broadcast 64-bit integer "a" to all elements of "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := a[63:0] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Set + + Broadcast the low packed 16-bit integer from "a" to all all elements of "dst". + + +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := a[15:0] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F/KNCNI + Compare + + + + Compare packed 32-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + +CASE (imm8[7:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 15 + i := j*32 + k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F/KNCNI + Compare + + + Compare packed 32-bit integers in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*32 + k[j] := ( a[i+31:i] == b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F/KNCNI + Compare + + + Compare packed 32-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*32 + k[j] := ( a[i+31:i] >= b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F/KNCNI + Compare + + + Compare packed 32-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*32 + k[j] := ( a[i+31:i] > b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F/KNCNI + Compare + + + Compare packed 32-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*32 + k[j] := ( a[i+31:i] <= b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F + Compare + + + Compare packed 32-bit integers in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*32 + k[j] := ( a[i+31:i] < b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + KNCNI + Compare + + + Compare packed 32-bit integers in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*32 + k[j] := ( a[i+31:i] < b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F/KNCNI + Compare + + + Compare packed 32-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*32 + k[j] := ( a[i+31:i] != b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F/KNCNI + Compare + + + + + Compare packed 32-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +CASE (imm8[7:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F/KNCNI + Compare + + + + Compare packed 32-bit integers in "a" and "b" for equality, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] == b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F/KNCNI + Compare + + + + Compare packed 32-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] >= b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F/KNCNI + Compare + + + + Compare packed 32-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] > b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F/KNCNI + Compare + + + + Compare packed 32-bit integers in "a" and "b" for less-than, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] < b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F + Compare + + + + Compare packed 32-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] <= b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + KNCNI + Compare + + + + Compare packed 32-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] <= b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F/KNCNI + Compare + + + + Compare packed 32-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] != b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F + Compare + + + + Compare packed 64-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + +CASE (imm8[7:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 7 + i := j*64 + k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F + Compare + + + Compare packed 64-bit integers in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*64 + k[j] := ( a[i+63:i] == b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F + Compare + + + Compare packed 64-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*64 + k[j] := ( a[i+63:i] >= b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F + Compare + + + Compare packed 64-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*64 + k[j] := ( a[i+63:i] > b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F + Compare + + + Compare packed 64-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*64 + k[j] := ( a[i+63:i] <= b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F + Compare + + + Compare packed 64-bit integers in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*64 + k[j] := ( a[i+63:i] < b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F + Compare + + + Compare packed 64-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*64 + k[j] := ( a[i+63:i] != b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F + Compare + + + + + Compare packed 64-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +CASE (imm8[7:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F + Compare + + + + Compare packed 64-bit integers in "a" and "b" for equality, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] == b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F + Compare + + + + Compare packed 64-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] >= b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F + Compare + + + + Compare packed 64-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] > b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F + Compare + + + + Compare packed 64-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] <= b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F + Compare + + + + Compare packed 64-bit integers in "a" and "b" for less-than, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] < b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F + Compare + + + + Compare packed 64-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] != b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F/KNCNI + Compare + + + + Compare packed unsigned 32-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + +CASE (imm8[7:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 15 + i := j*32 + k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F/KNCNI + Compare + + + Compare packed unsigned 32-bit integers in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*32 + k[j] := ( a[i+31:i] == b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F/KNCNI + Compare + + + Compare packed unsigned 32-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*32 + k[j] := ( a[i+31:i] >= b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F/KNCNI + Compare + + + Compare packed unsigned 32-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*32 + k[j] := ( a[i+31:i] > b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F/KNCNI + Compare + + + Compare packed unsigned 32-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*32 + k[j] := ( a[i+31:i] <= b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F/KNCNI + Compare + + + Compare packed unsigned 32-bit integers in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*32 + k[j] := ( a[i+31:i] < b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F/KNCNI + Compare + + + Compare packed unsigned 32-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*32 + k[j] := ( a[i+31:i] != b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F/KNCNI + Compare + + + + + Compare packed unsigned 32-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +CASE (imm8[7:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F/KNCNI + Compare + + + + Compare packed unsigned 32-bit integers in "a" and "b" for equality, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] == b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F/KNCNI + Compare + + + + Compare packed unsigned 32-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] >= b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F/KNCNI + Compare + + + + Compare packed unsigned 32-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] > b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F/KNCNI + Compare + + + + Compare packed unsigned 32-bit integers in "a" and "b" for less-than, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] < b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F/KNCNI + Compare + + + + Compare packed unsigned 32-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] <= b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F/KNCNI + Compare + + + + Compare packed unsigned 32-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] != b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F + Compare + + + + Compare packed unsigned 64-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + +CASE (imm8[7:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 7 + i := j*64 + k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F + Compare + + + Compare packed unsigned 64-bit integers in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*64 + k[j] := ( a[i+63:i] == b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F + Compare + + + Compare packed unsigned 64-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*64 + k[j] := ( a[i+63:i] >= b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F + Compare + + + Compare packed unsigned 64-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*64 + k[j] := ( a[i+63:i] > b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F + Compare + + + Compare packed unsigned 64-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*64 + k[j] := ( a[i+63:i] <= b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F + Compare + + + Compare packed unsigned 64-bit integers in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*64 + k[j] := ( a[i+63:i] < b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F + Compare + + + Compare packed unsigned 64-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*64 + k[j] := ( a[i+63:i] != b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F + Compare + + + + + Compare packed unsigned 64-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +CASE (imm8[7:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F + Compare + + + + Compare packed unsigned 64-bit integers in "a" and "b" for equality, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] == b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F + Compare + + + + Compare packed unsigned 64-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] >= b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F + Compare + + + + Compare packed unsigned 64-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] > b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F + Compare + + + + Compare packed unsigned 64-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] <= b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F + Compare + + + + Compare packed unsigned 64-bit integers in "a" and "b" for less-than, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] < b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F + Compare + + + + Compare packed unsigned 64-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] != b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + + Contiguously store the active 32-bit integers in "a" (those with their respective bit set in writemask "k") to "dst", and pass through the remaining elements from "src". + +size := 32 +m := 0 +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[m+size-1:m] := a[i+31:i] + m := m + size + FI +ENDFOR +dst[511:m] := src[511:m] +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Store + Swizzle + + + + Contiguously store the active 32-bit integers in "a" (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +size := 32 +m := base_addr +FOR j := 0 to 15 + i := j*32 + IF k[j] + MEM[m+size-1:m] := a[i+31:i] + m := m + size + FI +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + Contiguously store the active 32-bit integers in "a" (those with their respective bit set in zeromask "k") to "dst", and set the remaining elements to zero. + +size := 32 +m := 0 +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[m+size-1:m] := a[i+31:i] + m := m + size + FI +ENDFOR +dst[511:m] := 0 +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + + Contiguously store the active 64-bit integers in "a" (those with their respective bit set in writemask "k") to "dst", and pass through the remaining elements from "src". + +size := 64 +m := 0 +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[m+size-1:m] := a[i+63:i] + m := m + size + FI +ENDFOR +dst[511:m] := src[511:m] +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Store + Swizzle + + + + Contiguously store the active 64-bit integers in "a" (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +size := 64 +m := base_addr +FOR j := 0 to 7 + i := j*64 + IF k[j] + MEM[m+size-1:m] := a[i+63:i] + m := m + size + FI +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + Contiguously store the active 64-bit integers in "a" (those with their respective bit set in zeromask "k") to "dst", and set the remaining elements to zero. + +size := 64 +m := 0 +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[m+size-1:m] := a[i+63:i] + m := m + size + FI +ENDFOR +dst[511:m] := 0 +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512CD + Compare + + Test each 32-bit element of "a" for equality with all other elements in "a" closer to the least significant bit. Each element's comparison forms a zero extended bit vector in "dst". + +FOR j := 0 to 15 + i := j*32 + FOR k := 0 to j-1 + m := k*32 + dst[i+k] := (a[i+31:i] == a[m+31:m]) ? 1 : 0 + ENDFOR + dst[i+31:i+j] := 0 +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512CD + Compare + + + + Test each 32-bit element of "a" for equality with all other elements in "a" closer to the least significant bit using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in "dst". + +FOR j := 0 to 15 + i := j*32 + IF k[i] + FOR l := 0 to j-1 + m := l*32 + dst[i+l] := (a[i+31:i] == a[m+31:m]) ? 1 : 0 + ENDFOR + dst[i+31:i+j] := 0 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512CD + Compare + + + Test each 32-bit element of "a" for equality with all other elements in "a" closer to the least significant bit using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in "dst". + +FOR j := 0 to 15 + i := j*32 + IF k[i] + FOR l := 0 to j-1 + m := l*32 + dst[i+l] := (a[i+31:i] == a[m+31:m]) ? 1 : 0 + ENDFOR + dst[i+31:i+j] := 0 + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512CD + Compare + + Test each 64-bit element of "a" for equality with all other elements in "a" closer to the least significant bit. Each element's comparison forms a zero extended bit vector in "dst". + +FOR j := 0 to 7 + i := j*64 + FOR k := 0 to j-1 + m := k*64 + dst[i+k] := (a[i+63:i] == a[m+63:m]) ? 1 : 0 + ENDFOR + dst[i+63:i+j] := 0 +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512CD + Compare + + + + Test each 64-bit element of "a" for equality with all other elements in "a" closer to the least significant bit using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in "dst". + +FOR j := 0 to 7 + i := j*64 + IF k[j] + FOR l := 0 to j-1 + m := l*64 + dst[i+l] := (a[i+63:i] == a[m+63:m]) ? 1 : 0 + ENDFOR + dst[i+63:i+j] := 0 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512CD + Compare + + + Test each 64-bit element of "a" for equality with all other elements in "a" closer to the least significant bit using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in "dst". + +FOR j := 0 to 7 + i := j*64 + IF k[j] + FOR l := 0 to j-1 + m := l*64 + dst[i+l] := (a[i+63:i] == a[m+63:m]) ? 1 : 0 + ENDFOR + dst[i+63:i+j] := 0 + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Swizzle + + + + + Shuffle 32-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). Note that this intrinsic shuffles across 128-bit lanes, unlike past intrinsics that use the "permutevar" name. This intrinsic is identical to "_mm512_mask_permutexvar_epi32", and it is recommended that you use that intrinsic name. + +FOR j := 0 to 15 + i := j*32 + id := idx[i+3:i]*32 + IF k[j] + dst[i+31:i] := a[id+31:id] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + + + Shuffle 32-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + id := idx[i+3:i]*32 + IF k[j] + dst[i+31:i] := a[id+31:id] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + + Integer + AVX512F + Swizzle + + + + Shuffle 32-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + id := idx[i+3:i]*32 + IF k[j] + dst[i+31:i] := a[id+31:id] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Swizzle + + + Shuffle 32-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst". Note that this intrinsic shuffles across 128-bit lanes, unlike past intrinsics that use the "permutevar" name. This intrinsic is identical to "_mm512_permutexvar_epi32", and it is recommended that you use that intrinsic name. + + +FOR j := 0 to 15 + i := j*32 + id := idx[i+3:i]*32 + dst[i+31:i] := a[id+31:id] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + Shuffle 32-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst". + + +FOR j := 0 to 15 + i := j*32 + id := idx[i+3:i]*32 + dst[i+31:i] := a[id+31:id] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + + + Shuffle 32-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "idx" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + off := idx[i+3:i]*32 + IF k[j] + dst[i+31:i] := idx[i+4] ? b[off+31:off] : a[off+31:off] + ELSE + dst[i+31:i] := idx[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + + + Shuffle 32-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + off := idx[i+3:i]*32 + IF k[j] + dst[i+31:i] := idx[i+4] ? b[off+31:off] : a[off+31:off] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + + + Shuffle 32-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + off := idx[i+3:i]*32 + IF k[j] + dst[i+31:i] := (idx[i+4]) ? b[off+31:off] : a[off+31:off] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + + Shuffle 32-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst". + + +FOR j := 0 to 15 + i := j*32 + off := idx[i+3:i]*32 + dst[i+31:i] := idx[i+4] ? b[off+31:off] : a[off+31:off] +ENDFOR +dst[MAX:512] := 0 + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "idx" when the corresponding mask bit is not set) + +FOR j := 0 to 7 + i := j*64 + off := idx[i+2:i]*64 + IF k[j] + dst[i+63:i] := idx[i+3] ? b[off+63:off] : a[off+63:off] + ELSE + dst[i+63:i] := idx[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + off := idx[i+2:i]*64 + IF k[j] + dst[i+63:i] := idx[i+3] ? b[off+63:off] : a[off+63:off] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + off := idx[i+2:i]*64 + IF k[j] + dst[i+63:i] := (idx[i+3]) ? b[off+63:off] : a[off+63:off] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + Shuffle double-precision (64-bit) floating-point elements in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + off := idx[i+2:i]*64 + dst[i+63:i] := idx[i+3] ? b[off+63:off] : a[off+63:off] +ENDFOR +dst[MAX:512] := 0 + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "idx" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + off := idx[i+3:i]*32 + IF k[j] + dst[i+31:i] := idx[i+4] ? b[off+31:off] : a[off+31:off] + ELSE + dst[i+31:i] := idx[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + off := idx[i+3:i]*32 + IF k[j] + dst[i+31:i] := idx[i+4] ? b[off+31:off] : a[off+31:off] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + off := idx[i+3:i]*32 + IF k[j] + dst[i+31:i] := (idx[i+4]) ? b[off+31:off] : a[off+31:off] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + Shuffle single-precision (32-bit) floating-point elements in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + off := idx[i+3:i]*32 + dst[i+31:i] := idx[i+4] ? b[off+31:off] : a[off+31:off] +ENDFOR +dst[MAX:512] := 0 + + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + + + Shuffle 64-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "idx" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + off := idx[i+2:i]*64 + IF k[j] + dst[i+63:i] := idx[i+3] ? b[off+63:off] : a[off+63:off] + ELSE + dst[i+63:i] := idx[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + + + Shuffle 64-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + off := idx[i+2:i]*64 + IF k[j] + dst[i+63:i] := idx[i+3] ? b[off+63:off] : a[off+63:off] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + + + Shuffle 64-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + off := idx[i+2:i]*64 + IF k[j] + dst[i+63:i] := (idx[i+3]) ? b[off+63:off] : a[off+63:off] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + + Shuffle 64-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst". + + +FOR j := 0 to 7 + i := j*64 + off := idx[i+2:i]*64 + dst[i+63:i] := idx[i+3] ? b[off+63:off] : a[off+63:off] +ENDFOR +dst[MAX:512] := 0 + + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +IF (imm8[0] == 0) tmp_dst[63:0] := a[63:0] +IF (imm8[0] == 1) tmp_dst[63:0] := a[127:64] +IF (imm8[1] == 0) tmp_dst[127:64] := a[63:0] +IF (imm8[1] == 1) tmp_dst[127:64] := a[127:64] +IF (imm8[2] == 0) tmp_dst[191:128] := a[191:128] +IF (imm8[2] == 1) tmp_dst[191:128] := a[255:192] +IF (imm8[3] == 0) tmp_dst[255:192] := a[191:128] +IF (imm8[3] == 1) tmp_dst[255:192] := a[255:192] +IF (imm8[4] == 0) tmp_dst[319:256] := a[319:256] +IF (imm8[4] == 1) tmp_dst[319:256] := a[383:320] +IF (imm8[5] == 0) tmp_dst[383:320] := a[319:256] +IF (imm8[5] == 1) tmp_dst[383:320] := a[383:320] +IF (imm8[6] == 0) tmp_dst[447:384] := a[447:384] +IF (imm8[6] == 1) tmp_dst[447:384] := a[511:448] +IF (imm8[7] == 0) tmp_dst[511:448] := a[447:384] +IF (imm8[7] == 1) tmp_dst[511:448] := a[511:448] +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" within 128-bit lanes using the control in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +IF (b[1] == 0) tmp_dst[63:0] := a[63:0] +IF (b[1] == 1) tmp_dst[63:0] := a[127:64] +IF (b[65] == 0) tmp_dst[127:64] := a[63:0] +IF (b[65] == 1) tmp_dst[127:64] := a[127:64] +IF (b[129] == 0) tmp_dst[191:128] := a[191:128] +IF (b[129] == 1) tmp_dst[191:128] := a[255:192] +IF (b[193] == 0) tmp_dst[255:192] := a[191:128] +IF (b[193] == 1) tmp_dst[255:192] := a[255:192] +IF (b[257] == 0) tmp_dst[319:256] := a[319:256] +IF (b[257] == 1) tmp_dst[319:256] := a[383:320] +IF (b[321] == 0) tmp_dst[383:320] := a[319:256] +IF (b[321] == 1) tmp_dst[383:320] := a[383:320] +IF (b[385] == 0) tmp_dst[447:384] := a[447:384] +IF (b[385] == 1) tmp_dst[447:384] := a[511:448] +IF (b[449] == 0) tmp_dst[511:448] := a[447:384] +IF (b[449] == 1) tmp_dst[511:448] := a[511:448] +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + Shuffle double-precision (64-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +IF (imm8[0] == 0) tmp_dst[63:0] := a[63:0] +IF (imm8[0] == 1) tmp_dst[63:0] := a[127:64] +IF (imm8[1] == 0) tmp_dst[127:64] := a[63:0] +IF (imm8[1] == 1) tmp_dst[127:64] := a[127:64] +IF (imm8[2] == 0) tmp_dst[191:128] := a[191:128] +IF (imm8[2] == 1) tmp_dst[191:128] := a[255:192] +IF (imm8[3] == 0) tmp_dst[255:192] := a[191:128] +IF (imm8[3] == 1) tmp_dst[255:192] := a[255:192] +IF (imm8[4] == 0) tmp_dst[319:256] := a[319:256] +IF (imm8[4] == 1) tmp_dst[319:256] := a[383:320] +IF (imm8[5] == 0) tmp_dst[383:320] := a[319:256] +IF (imm8[5] == 1) tmp_dst[383:320] := a[383:320] +IF (imm8[6] == 0) tmp_dst[447:384] := a[447:384] +IF (imm8[6] == 1) tmp_dst[447:384] := a[511:448] +IF (imm8[7] == 0) tmp_dst[511:448] := a[447:384] +IF (imm8[7] == 1) tmp_dst[511:448] := a[511:448] +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + Shuffle double-precision (64-bit) floating-point elements in "a" within 128-bit lanes using the control in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +IF (b[1] == 0) tmp_dst[63:0] := a[63:0] +IF (b[1] == 1) tmp_dst[63:0] := a[127:64] +IF (b[65] == 0) tmp_dst[127:64] := a[63:0] +IF (b[65] == 1) tmp_dst[127:64] := a[127:64] +IF (b[129] == 0) tmp_dst[191:128] := a[191:128] +IF (b[129] == 1) tmp_dst[191:128] := a[255:192] +IF (b[193] == 0) tmp_dst[255:192] := a[191:128] +IF (b[193] == 1) tmp_dst[255:192] := a[255:192] +IF (b[257] == 0) tmp_dst[319:256] := a[319:256] +IF (b[257] == 1) tmp_dst[319:256] := a[383:320] +IF (b[321] == 0) tmp_dst[383:320] := a[319:256] +IF (b[321] == 1) tmp_dst[383:320] := a[383:320] +IF (b[385] == 0) tmp_dst[447:384] := a[447:384] +IF (b[385] == 1) tmp_dst[447:384] := a[511:448] +IF (b[449] == 0) tmp_dst[511:448] := a[447:384] +IF (b[449] == 1) tmp_dst[511:448] := a[511:448] +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + Shuffle double-precision (64-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst". + +IF (imm8[0] == 0) dst[63:0] := a[63:0] +IF (imm8[0] == 1) dst[63:0] := a[127:64] +IF (imm8[1] == 0) dst[127:64] := a[63:0] +IF (imm8[1] == 1) dst[127:64] := a[127:64] +IF (imm8[2] == 0) dst[191:128] := a[191:128] +IF (imm8[2] == 1) dst[191:128] := a[255:192] +IF (imm8[3] == 0) dst[255:192] := a[191:128] +IF (imm8[3] == 1) dst[255:192] := a[255:192] +IF (imm8[4] == 0) dst[319:256] := a[319:256] +IF (imm8[4] == 1) dst[319:256] := a[383:320] +IF (imm8[5] == 0) dst[383:320] := a[319:256] +IF (imm8[5] == 1) dst[383:320] := a[383:320] +IF (imm8[6] == 0) dst[447:384] := a[447:384] +IF (imm8[6] == 1) dst[447:384] := a[511:448] +IF (imm8[7] == 0) dst[511:448] := a[447:384] +IF (imm8[7] == 1) dst[511:448] := a[511:448] +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + Shuffle double-precision (64-bit) floating-point elements in "a" within 128-bit lanes using the control in "b", and store the results in "dst". + +IF (b[1] == 0) dst[63:0] := a[63:0] +IF (b[1] == 1) dst[63:0] := a[127:64] +IF (b[65] == 0) dst[127:64] := a[63:0] +IF (b[65] == 1) dst[127:64] := a[127:64] +IF (b[129] == 0) dst[191:128] := a[191:128] +IF (b[129] == 1) dst[191:128] := a[255:192] +IF (b[193] == 0) dst[255:192] := a[191:128] +IF (b[193] == 1) dst[255:192] := a[255:192] +IF (b[257] == 0) dst[319:256] := a[319:256] +IF (b[257] == 1) dst[319:256] := a[383:320] +IF (b[321] == 0) dst[383:320] := a[319:256] +IF (b[321] == 1) dst[383:320] := a[383:320] +IF (b[385] == 0) dst[447:384] := a[447:384] +IF (b[385] == 1) dst[447:384] := a[511:448] +IF (b[449] == 0) dst[511:448] := a[447:384] +IF (b[449] == 1) dst[511:448] := a[511:448] +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} + +tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +tmp_dst[95:64] := SELECT4(a[127:0], imm8[5:4]) +tmp_dst[127:96] := SELECT4(a[127:0], imm8[7:6]) +tmp_dst[159:128] := SELECT4(a[255:128], imm8[1:0]) +tmp_dst[191:160] := SELECT4(a[255:128], imm8[3:2]) +tmp_dst[223:192] := SELECT4(a[255:128], imm8[5:4]) +tmp_dst[255:224] := SELECT4(a[255:128], imm8[7:6]) +tmp_dst[287:256] := SELECT4(a[383:256], imm8[1:0]) +tmp_dst[319:288] := SELECT4(a[383:256], imm8[3:2]) +tmp_dst[351:320] := SELECT4(a[383:256], imm8[5:4]) +tmp_dst[383:352] := SELECT4(a[383:256], imm8[7:6]) +tmp_dst[415:384] := SELECT4(a[511:384], imm8[1:0]) +tmp_dst[447:416] := SELECT4(a[511:384], imm8[3:2]) +tmp_dst[479:448] := SELECT4(a[511:384], imm8[5:4]) +tmp_dst[511:480] := SELECT4(a[511:384], imm8[7:6]) +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} + +tmp_dst[31:0] := SELECT4(a[127:0], b[1:0]) +tmp_dst[63:32] := SELECT4(a[127:0], b[33:32]) +tmp_dst[95:64] := SELECT4(a[127:0], b[65:64]) +tmp_dst[127:96] := SELECT4(a[127:0], b[97:96]) +tmp_dst[159:128] := SELECT4(a[255:128], b[129:128]) +tmp_dst[191:160] := SELECT4(a[255:128], b[161:160]) +tmp_dst[223:192] := SELECT4(a[255:128], b[193:192]) +tmp_dst[255:224] := SELECT4(a[255:128], b[225:224]) +tmp_dst[287:256] := SELECT4(a[383:256], b[257:256]) +tmp_dst[319:288] := SELECT4(a[383:256], b[289:288]) +tmp_dst[351:320] := SELECT4(a[383:256], b[321:320]) +tmp_dst[383:352] := SELECT4(a[383:256], b[353:352]) +tmp_dst[415:384] := SELECT4(a[511:384], b[385:384]) +tmp_dst[447:416] := SELECT4(a[511:384], b[417:416]) +tmp_dst[479:448] := SELECT4(a[511:384], b[449:448]) +tmp_dst[511:480] := SELECT4(a[511:384], b[481:480]) +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} + +tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +tmp_dst[95:64] := SELECT4(a[127:0], imm8[5:4]) +tmp_dst[127:96] := SELECT4(a[127:0], imm8[7:6]) +tmp_dst[159:128] := SELECT4(a[255:128], imm8[1:0]) +tmp_dst[191:160] := SELECT4(a[255:128], imm8[3:2]) +tmp_dst[223:192] := SELECT4(a[255:128], imm8[5:4]) +tmp_dst[255:224] := SELECT4(a[255:128], imm8[7:6]) +tmp_dst[287:256] := SELECT4(a[383:256], imm8[1:0]) +tmp_dst[319:288] := SELECT4(a[383:256], imm8[3:2]) +tmp_dst[351:320] := SELECT4(a[383:256], imm8[5:4]) +tmp_dst[383:352] := SELECT4(a[383:256], imm8[7:6]) +tmp_dst[415:384] := SELECT4(a[511:384], imm8[1:0]) +tmp_dst[447:416] := SELECT4(a[511:384], imm8[3:2]) +tmp_dst[479:448] := SELECT4(a[511:384], imm8[5:4]) +tmp_dst[511:480] := SELECT4(a[511:384], imm8[7:6]) +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} + +tmp_dst[31:0] := SELECT4(a[127:0], b[1:0]) +tmp_dst[63:32] := SELECT4(a[127:0], b[33:32]) +tmp_dst[95:64] := SELECT4(a[127:0], b[65:64]) +tmp_dst[127:96] := SELECT4(a[127:0], b[97:96]) +tmp_dst[159:128] := SELECT4(a[255:128], b[129:128]) +tmp_dst[191:160] := SELECT4(a[255:128], b[161:160]) +tmp_dst[223:192] := SELECT4(a[255:128], b[193:192]) +tmp_dst[255:224] := SELECT4(a[255:128], b[225:224]) +tmp_dst[287:256] := SELECT4(a[383:256], b[257:256]) +tmp_dst[319:288] := SELECT4(a[383:256], b[289:288]) +tmp_dst[351:320] := SELECT4(a[383:256], b[321:320]) +tmp_dst[383:352] := SELECT4(a[383:256], b[353:352]) +tmp_dst[415:384] := SELECT4(a[511:384], b[385:384]) +tmp_dst[447:416] := SELECT4(a[511:384], b[417:416]) +tmp_dst[479:448] := SELECT4(a[511:384], b[449:448]) +tmp_dst[511:480] := SELECT4(a[511:384], b[481:480]) +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst". + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} + +dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +dst[95:64] := SELECT4(a[127:0], imm8[5:4]) +dst[127:96] := SELECT4(a[127:0], imm8[7:6]) +dst[159:128] := SELECT4(a[255:128], imm8[1:0]) +dst[191:160] := SELECT4(a[255:128], imm8[3:2]) +dst[223:192] := SELECT4(a[255:128], imm8[5:4]) +dst[255:224] := SELECT4(a[255:128], imm8[7:6]) +dst[287:256] := SELECT4(a[383:256], imm8[1:0]) +dst[319:288] := SELECT4(a[383:256], imm8[3:2]) +dst[351:320] := SELECT4(a[383:256], imm8[5:4]) +dst[383:352] := SELECT4(a[383:256], imm8[7:6]) +dst[415:384] := SELECT4(a[511:384], imm8[1:0]) +dst[447:416] := SELECT4(a[511:384], imm8[3:2]) +dst[479:448] := SELECT4(a[511:384], imm8[5:4]) +dst[511:480] := SELECT4(a[511:384], imm8[7:6]) +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "b", and store the results in "dst". + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} + +dst[31:0] := SELECT4(a[127:0], b[1:0]) +dst[63:32] := SELECT4(a[127:0], b[33:32]) +dst[95:64] := SELECT4(a[127:0], b[65:64]) +dst[127:96] := SELECT4(a[127:0], b[97:96]) +dst[159:128] := SELECT4(a[255:128], b[129:128]) +dst[191:160] := SELECT4(a[255:128], b[161:160]) +dst[223:192] := SELECT4(a[255:128], b[193:192]) +dst[255:224] := SELECT4(a[255:128], b[225:224]) +dst[287:256] := SELECT4(a[383:256], b[257:256]) +dst[319:288] := SELECT4(a[383:256], b[289:288]) +dst[351:320] := SELECT4(a[383:256], b[321:320]) +dst[383:352] := SELECT4(a[383:256], b[353:352]) +dst[415:384] := SELECT4(a[511:384], b[385:384]) +dst[447:416] := SELECT4(a[511:384], b[417:416]) +dst[479:448] := SELECT4(a[511:384], b[449:448]) +dst[511:480] := SELECT4(a[511:384], b[481:480]) +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" within 256-bit lanes using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[63:0] := src[63:0] + 1: tmp[63:0] := src[127:64] + 2: tmp[63:0] := src[191:128] + 3: tmp[63:0] := src[255:192] + ESAC + RETURN tmp[63:0] +} + +tmp_dst[63:0] := SELECT4(a[255:0], imm8[1:0]) +tmp_dst[127:64] := SELECT4(a[255:0], imm8[3:2]) +tmp_dst[191:128] := SELECT4(a[255:0], imm8[5:4]) +tmp_dst[255:192] := SELECT4(a[255:0], imm8[7:6]) +tmp_dst[319:256] := SELECT4(a[511:256], imm8[1:0]) +tmp_dst[383:320] := SELECT4(a[511:256], imm8[3:2]) +tmp_dst[447:384] := SELECT4(a[511:256], imm8[5:4]) +tmp_dst[511:448] := SELECT4(a[511:256], imm8[7:6]) +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + id := idx[i+2:i]*64 + IF k[j] + dst[i+63:i] := a[id+63:id] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + Shuffle double-precision (64-bit) floating-point elements in "a" within 256-bit lanes using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[63:0] := src[63:0] + 1: tmp[63:0] := src[127:64] + 2: tmp[63:0] := src[191:128] + 3: tmp[63:0] := src[255:192] + ESAC + RETURN tmp[63:0] +} + +tmp_dst[63:0] := SELECT4(a[255:0], imm8[1:0]) +tmp_dst[127:64] := SELECT4(a[255:0], imm8[3:2]) +tmp_dst[191:128] := SELECT4(a[255:0], imm8[5:4]) +tmp_dst[255:192] := SELECT4(a[255:0], imm8[7:6]) +tmp_dst[319:256] := SELECT4(a[511:256], imm8[1:0]) +tmp_dst[383:320] := SELECT4(a[511:256], imm8[3:2]) +tmp_dst[447:384] := SELECT4(a[511:256], imm8[5:4]) +tmp_dst[511:448] := SELECT4(a[511:256], imm8[7:6]) +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + Shuffle double-precision (64-bit) floating-point elements in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + id := idx[i+2:i]*64 + IF k[j] + dst[i+63:i] := a[id+63:id] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + Shuffle double-precision (64-bit) floating-point elements in "a" within 256-bit lanes using the control in "imm8", and store the results in "dst". + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[63:0] := src[63:0] + 1: tmp[63:0] := src[127:64] + 2: tmp[63:0] := src[191:128] + 3: tmp[63:0] := src[255:192] + ESAC + RETURN tmp[63:0] +} + +dst[63:0] := SELECT4(a[255:0], imm8[1:0]) +dst[127:64] := SELECT4(a[255:0], imm8[3:2]) +dst[191:128] := SELECT4(a[255:0], imm8[5:4]) +dst[255:192] := SELECT4(a[255:0], imm8[7:6]) +dst[319:256] := SELECT4(a[511:256], imm8[1:0]) +dst[383:320] := SELECT4(a[511:256], imm8[3:2]) +dst[447:384] := SELECT4(a[511:256], imm8[5:4]) +dst[511:448] := SELECT4(a[511:256], imm8[7:6]) +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + Shuffle double-precision (64-bit) floating-point elements in "a" across lanes using the corresponding index in "idx", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + id := idx[i+2:i]*64 + dst[i+63:i] := a[id+63:id] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + id := idx[i+3:i]*32 + IF k[j] + dst[i+31:i] := a[id+31:id] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + Shuffle single-precision (32-bit) floating-point elements in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + id := idx[i+3:i]*32 + IF k[j] + dst[i+31:i] := a[id+31:id] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + Shuffle single-precision (32-bit) floating-point elements in "a" across lanes using the corresponding index in "idx". + +FOR j := 0 to 15 + i := j*32 + id := idx[i+3:i]*32 + dst[i+31:i] := a[id+31:id] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + + + Shuffle 64-bit integers in "a" within 256-bit lanes using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[63:0] := src[63:0] + 1: tmp[63:0] := src[127:64] + 2: tmp[63:0] := src[191:128] + 3: tmp[63:0] := src[255:192] + ESAC + RETURN tmp[63:0] +} + +tmp_dst[63:0] := SELECT4(a[255:0], imm8[1:0]) +tmp_dst[127:64] := SELECT4(a[255:0], imm8[3:2]) +tmp_dst[191:128] := SELECT4(a[255:0], imm8[5:4]) +tmp_dst[255:192] := SELECT4(a[255:0], imm8[7:6]) +tmp_dst[319:256] := SELECT4(a[511:256], imm8[1:0]) +tmp_dst[383:320] := SELECT4(a[511:256], imm8[3:2]) +tmp_dst[447:384] := SELECT4(a[511:256], imm8[5:4]) +tmp_dst[511:448] := SELECT4(a[511:256], imm8[7:6]) +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + + + Shuffle 64-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + id := idx[i+2:i]*64 + IF k[j] + dst[i+63:i] := a[id+63:id] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + + Shuffle 64-bit integers in "a" within 256-bit lanes using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[63:0] := src[63:0] + 1: tmp[63:0] := src[127:64] + 2: tmp[63:0] := src[191:128] + 3: tmp[63:0] := src[255:192] + ESAC + RETURN tmp[63:0] +} + +tmp_dst[63:0] := SELECT4(a[255:0], imm8[1:0]) +tmp_dst[127:64] := SELECT4(a[255:0], imm8[3:2]) +tmp_dst[191:128] := SELECT4(a[255:0], imm8[5:4]) +tmp_dst[255:192] := SELECT4(a[255:0], imm8[7:6]) +tmp_dst[319:256] := SELECT4(a[511:256], imm8[1:0]) +tmp_dst[383:320] := SELECT4(a[511:256], imm8[3:2]) +tmp_dst[447:384] := SELECT4(a[511:256], imm8[5:4]) +tmp_dst[511:448] := SELECT4(a[511:256], imm8[7:6]) +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + + Shuffle 64-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + id := idx[i+2:i]*64 + IF k[j] + dst[i+63:i] := a[id+63:id] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + Shuffle 64-bit integers in "a" within 256-bit lanes using the control in "imm8", and store the results in "dst". + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[63:0] := src[63:0] + 1: tmp[63:0] := src[127:64] + 2: tmp[63:0] := src[191:128] + 3: tmp[63:0] := src[255:192] + ESAC + RETURN tmp[63:0] +} + +dst[63:0] := SELECT4(a[255:0], imm8[1:0]) +dst[127:64] := SELECT4(a[255:0], imm8[3:2]) +dst[191:128] := SELECT4(a[255:0], imm8[5:4]) +dst[255:192] := SELECT4(a[255:0], imm8[7:6]) +dst[319:256] := SELECT4(a[511:256], imm8[1:0]) +dst[383:320] := SELECT4(a[511:256], imm8[3:2]) +dst[447:384] := SELECT4(a[511:256], imm8[5:4]) +dst[511:448] := SELECT4(a[511:256], imm8[7:6]) +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + Shuffle 64-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + id := idx[i+2:i]*64 + dst[i+63:i] := a[id+63:id] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + + Load contiguous active 32-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[m+31:m] + m := m + 32 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Load + Swizzle + + + + Load contiguous active 32-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+m+31:mem_addr+m] + m := m + 32 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + Load contiguous active 32-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[m+31:m] + m := m + 32 + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Load + Swizzle + + + Load contiguous active 32-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+m+31:mem_addr+m] + m := m + 32 + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + + Load contiguous active 64-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[m+63:m] + m := m + 64 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Load + Swizzle + + + + Load contiguous active 64-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+m+63:mem_addr+m] + m := m + 64 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + Load contiguous active 64-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[m+63:m] + m := m + 64 + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Load + Swizzle + + + Load contiguous active 64-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+m+63:mem_addr+m] + m := m + 64 + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Load + + + + + Gather 32-bit integers from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := MEM[base_addr + SignExtend(vindex[i+31:i])*scale] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Load + + + + + + + Gather 32-bit integers from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[base_addr + SignExtend(vindex[i+31:i])*scale] + k[j] := 0 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +k[MAX:16] := 0 +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Load + + + + Gather 64-bit integers from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 7 + i := j*64 + m := j*32 + dst[i+63:i] := MEM[base_addr + SignExtend(vindex[m+31:m])*scale] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Load + + + + + + Gather 64-bit integers from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 7 + i := j*64 + m := j*32 + IF k[j] + dst[i+63:i] := MEM[base_addr + SignExtend(vindex[m+31:m])*scale] + k[j] := 0 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +k[MAX:8] := 0 +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Load + + + + Gather 32-bit integers from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 7 + i := j*32 + m := j*64 + dst[i+31:i] := MEM[base_addr + SignExtend(vindex[i+63:i])*scale] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Load + + + + + + Gather 32-bit integers from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 7 + i := j*32 + m := j*64 + IF k[j] + dst[i+31:i] := MEM[base_addr + SignExtend(vindex[i+63:i])*scale] + k[j] := 0 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +k[MAX:8] := 0 +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Load + + + + Gather 64-bit integers from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := MEM[base_addr + SignExtend(vindex[i+63:i])*scale] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Load + + + + + + Gather 64-bit integers from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[base_addr + SignExtend(vindex[i+63:i])*scale] + k[j] := 0 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +k[MAX:8] := 0 +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512CD + Bit Manipulation + + Counts the number of leading zero bits in each packed 32-bit integer in "a", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + tmp := 31 + dst[i+31:i] := 0 + DO WHILE (tmp >= 0 AND a[i+tmp] == 0) + tmp := tmp - 1 + dst[i+31:i] := dst[i+31:i] + 1 + OD +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512CD + Bit Manipulation + + + + Counts the number of leading zero bits in each packed 32-bit integer in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + tmp := 31 + dst[i+31:i] := 0 + DO WHILE (tmp >= 0 AND a[i+tmp] == 0) + tmp := tmp - 1 + dst[i+31:i] := dst[i+31:i] + 1 + OD + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512CD + Bit Manipulation + + + Counts the number of leading zero bits in each packed 32-bit integer in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + tmp := 31 + dst[i+31:i] := 0 + DO WHILE (tmp >= 0 AND a[i+tmp] == 0) + tmp := tmp - 1 + dst[i+31:i] := dst[i+31:i] + 1 + OD + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512CD + Bit Manipulation + + Counts the number of leading zero bits in each packed 64-bit integer in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + tmp := 63 + dst[i+63:i] := 0 + DO WHILE (tmp >= 0 AND a[i+tmp] == 0) + tmp := tmp - 1 + dst[i+63:i] := dst[i+63:i] + 1 + OD +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512CD + Bit Manipulation + + + + Counts the number of leading zero bits in each packed 64-bit integer in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + tmp := 63 + dst[i+63:i] := 0 + DO WHILE (tmp >= 0 AND a[i+tmp] == 0) + tmp := tmp - 1 + dst[i+63:i] := dst[i+63:i] + 1 + OD + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512CD + Bit Manipulation + + + Counts the number of leading zero bits in each packed 64-bit integer in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + tmp := 63 + dst[i+63:i] := 0 + DO WHILE (tmp >= 0 AND a[i+tmp] == 0) + tmp := tmp - 1 + dst[i+63:i] := dst[i+63:i] + 1 + OD + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Special Math Functions + + + + + Compare packed 32-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF a[i+31:i] > b[i+31:i] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := b[i+31:i] + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Special Math Functions + + + + Compare packed 32-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF a[i+31:i] > b[i+31:i] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := b[i+31:i] + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Special Math Functions + + + Compare packed 32-bit integers in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 15 + i := j*32 + IF a[i+31:i] > b[i+31:i] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := b[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Special Math Functions + + + + + Compare packed 64-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF a[i+63:i] > b[i+63:i] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := b[i+63:i] + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Special Math Functions + + + + Compare packed 64-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF a[i+63:i] > b[i+63:i] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := b[i+63:i] + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Special Math Functions + + + Compare packed 64-bit integers in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 7 + i := j*64 + IF a[i+63:i] > b[i+63:i] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := b[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Special Math Functions + + + + + Compare packed unsigned 32-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF a[i+31:i] > b[i+31:i] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := b[i+31:i] + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Special Math Functions + + + + Compare packed unsigned 32-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF a[i+31:i] > b[i+31:i] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := b[i+31:i] + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Special Math Functions + + + Compare packed unsigned 32-bit integers in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 15 + i := j*32 + IF a[i+31:i] > b[i+31:i] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := b[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Special Math Functions + + + + + Compare packed unsigned 64-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF a[i+63:i] > b[i+63:i] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := b[i+63:i] + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Special Math Functions + + + + Compare packed unsigned 64-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF a[i+63:i] > b[i+63:i] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := b[i+63:i] + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Special Math Functions + + + Compare packed unsigned 64-bit integers in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 7 + i := j*64 + IF a[i+63:i] > b[i+63:i] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := b[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Special Math Functions + + + + + Compare packed 32-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF a[i+31:i] < b[i+31:i] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := b[i+31:i] + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Special Math Functions + + + + Compare packed 32-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF a[i+31:i] < b[i+31:i] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := b[i+31:i] + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Special Math Functions + + + Compare packed 32-bit integers in "a" and "b", and store packed minimum values in "dst". + + +FOR j := 0 to 15 + i := j*32 + IF a[i+31:i] < b[i+31:i] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := b[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Special Math Functions + + + + + Compare packed 64-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF a[i+63:i] < b[i+63:i] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := b[i+63:i] + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Special Math Functions + + + + Compare packed 64-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF a[i+63:i] < b[i+63:i] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := b[i+63:i] + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Special Math Functions + + + Compare packed 64-bit integers in "a" and "b", and store packed minimum values in "dst". + +FOR j := 0 to 7 + i := j*64 + IF a[i+63:i] < b[i+63:i] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := b[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Special Math Functions + + + + + Compare packed unsigned 32-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF a[i+31:i] < b[i+31:i] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := b[i+31:i] + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Special Math Functions + + + + Compare packed unsigned 32-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF a[i+31:i] < b[i+31:i] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := b[i+31:i] + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Special Math Functions + + + Compare packed unsigned 32-bit integers in "a" and "b", and store packed minimum values in "dst". + + +FOR j := 0 to 15 + i := j*32 + IF a[i+31:i] < b[i+31:i] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := b[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Special Math Functions + + + + + Compare packed unsigned 64-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF a[i+63:i] < b[i+63:i] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := b[i+63:i] + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Special Math Functions + + + + Compare packed unsigned 64-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF a[i+63:i] < b[i+63:i] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := b[i+63:i] + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Special Math Functions + + + Compare packed unsigned 64-bit integers in "a" and "b", and store packed minimum values in "dst". + +FOR j := 0 to 7 + i := j*64 + IF a[i+63:i] < b[i+63:i] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := b[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + Convert packed 32-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 15 + i := 32*j + k := 8*j + dst[k+7:k] := Truncate_Int32_To_Int8(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + + + Convert packed 32-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + l := 8*j + IF k[j] + dst[l+7:l] := Truncate_Int32_To_Int8(a[i+31:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + Store + + + + Convert packed 32-bit integers in "a" to packed 8-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 15 + i := 32*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := Truncate_Int32_To_Int8(a[i+31:i]) + FI +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + + Convert packed 32-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + l := 8*j + IF k[j] + dst[l+7:l] := Truncate_Int32_To_Int8(a[i+31:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + Convert packed 32-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 15 + i := 32*j + k := 16*j + dst[k+15:k] := Truncate_Int32_To_Int16(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + + + Convert packed 32-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + l := 16*j + IF k[j] + dst[l+15:l] := Truncate_Int32_To_Int16(a[i+31:i]) + ELSE + dst[l+15:l] := src[l+15:l] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + Store + + + + Convert packed 32-bit integers in "a" to packed 16-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 15 + i := 32*j + l := 16*j + IF k[j] + MEM[base_addr+l+15:base_addr+l] := Truncate_Int32_To_Int16(a[i+31:i]) + FI +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + + Convert packed 32-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + l := 16*j + IF k[j] + dst[l+15:l] := Truncate_Int32_To_Int16(a[i+31:i]) + ELSE + dst[l+15:l] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + Convert packed 64-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 7 + i := 64*j + k := 8*j + dst[k+7:k] := Truncate_Int64_To_Int8(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + + + Convert packed 64-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 8*j + IF k[j] + dst[l+7:l] := Truncate_Int64_To_Int8(a[i+63:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + Store + + + + Convert packed 64-bit integers in "a" to packed 8-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 7 + i := 64*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := Truncate_Int64_To_Int8(a[i+63:i]) + FI +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + + Convert packed 64-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 8*j + IF k[j] + dst[l+7:l] := Truncate_Int64_To_Int8(a[i+63:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + Convert packed 64-bit integers in "a" to packed 32-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 7 + i := 64*j + k := 32*j + dst[k+31:k] := Truncate_Int64_To_Int32(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + + + Convert packed 64-bit integers in "a" to packed 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 32*j + IF k[j] + dst[l+31:l] := Truncate_Int64_To_Int32(a[i+63:i]) + ELSE + dst[l+31:l] := src[l+31:l] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + Store + + + + Convert packed 64-bit integers in "a" to packed 32-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 7 + i := 64*j + l := 32*j + IF k[j] + MEM[base_addr+l+31:base_addr+l] := Truncate_Int64_To_Int32(a[i+63:i]) + FI +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + + Convert packed 64-bit integers in "a" to packed 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 32*j + IF k[j] + dst[l+31:l] := Truncate_Int64_To_Int32(a[i+63:i]) + ELSE + dst[l+31:l] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + Convert packed 64-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 7 + i := 64*j + k := 16*j + dst[k+15:k] := Truncate_Int64_To_Int16(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + + + Convert packed 64-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 16*j + IF k[j] + dst[l+15:l] := Truncate_Int64_To_Int16(a[i+63:i]) + ELSE + dst[l+15:l] := src[l+15:l] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + Store + + + + Convert packed 64-bit integers in "a" to packed 16-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 7 + i := 64*j + l := 16*j + IF k[j] + MEM[base_addr+l+15:base_addr+l] := Truncate_Int64_To_Int16(a[i+63:i]) + FI +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + + Convert packed 64-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 16*j + IF k[j] + dst[l+15:l] := Truncate_Int64_To_Int16(a[i+63:i]) + ELSE + dst[l+15:l] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + Convert packed 32-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst". + +FOR j := 0 to 15 + i := 32*j + k := 8*j + dst[k+7:k] := Saturate_Int32_To_Int8(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + + + Convert packed 32-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate_Int32_To_Int8(a[i+31:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + Store + + + + Convert packed 32-bit integers in "a" to packed 8-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 15 + i := 32*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := Saturate_Int32_To_Int8(a[i+31:i]) + FI +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + + Convert packed 32-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate_Int32_To_Int8(a[i+31:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + Convert packed 32-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst". + +FOR j := 0 to 15 + i := 32*j + k := 16*j + dst[k+15:k] := Saturate_Int32_To_Int16(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + + + Convert packed 32-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + l := 16*j + IF k[j] + dst[l+15:l] := Saturate_Int32_To_Int16(a[i+31:i]) + ELSE + dst[l+15:l] := src[l+15:l] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + Store + + + + Convert packed 32-bit integers in "a" to packed 16-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 15 + i := 32*j + l := 16*j + IF k[j] + MEM[base_addr+l+15:base_addr+l] := Saturate_Int32_To_Int16(a[i+31:i]) + FI +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + + Convert packed 32-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + l := 16*j + IF k[j] + dst[l+15:l] := Saturate_Int32_To_Int16(a[i+31:i]) + ELSE + dst[l+15:l] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + Convert packed 64-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst". + +FOR j := 0 to 7 + i := 64*j + k := 8*j + dst[k+7:k] := Saturate_Int64_To_Int8(a[i+63:i]) +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + + + Convert packed 64-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate_Int64_To_Int8(a[i+63:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + Store + + + + Convert packed 64-bit integers in "a" to packed 8-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 7 + i := 64*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := Saturate_Int64_To_Int8(a[i+63:i]) + FI +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + + Convert packed 64-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate_Int64_To_Int8(a[i+63:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + Convert packed 64-bit integers in "a" to packed 32-bit integers with signed saturation, and store the results in "dst". + +FOR j := 0 to 7 + i := 64*j + k := 32*j + dst[k+31:k] := Saturate_Int64_To_Int32(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + + + Convert packed 64-bit integers in "a" to packed 32-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 32*j + IF k[j] + dst[l+31:l] := Saturate_Int64_To_Int32(a[i+63:i]) + ELSE + dst[l+31:l] := src[l+31:l] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + Store + + + + Convert packed 64-bit integers in "a" to packed 32-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 7 + i := 64*j + l := 32*j + IF k[j] + MEM[base_addr+l+31:base_addr+l] := Saturate_Int64_To_Int32(a[i+63:i]) + FI +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + + Convert packed 64-bit integers in "a" to packed 32-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 32*j + IF k[j] + dst[l+31:l] := Saturate_Int64_To_Int32(a[i+63:i]) + ELSE + dst[l+31:l] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + Convert packed 64-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst". + +FOR j := 0 to 7 + i := 64*j + k := 16*j + dst[k+15:k] := Saturate_Int64_To_Int16(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + + + Convert packed 64-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 16*j + IF k[j] + dst[l+15:l] := Saturate_Int64_To_Int16(a[i+63:i]) + ELSE + dst[l+15:l] := src[l+15:l] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + Store + + + + Convert packed 64-bit integers in "a" to packed 16-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 7 + i := 64*j + l := 16*j + IF k[j] + MEM[base_addr+l+15:base_addr+l] := Saturate_Int64_To_Int16(a[i+63:i]) + FI +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + + Convert packed 64-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 16*j + IF k[j] + dst[l+15:l] := Saturate_Int64_To_Int16(a[i+63:i]) + ELSE + dst[l+15:l] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + Sign extend packed 8-bit integers in "a" to packed 32-bit integers, and store the results in "dst". + +FOR j := 0 to 15 + i := 32*j + k := 8*j + dst[i+31:i] := SignExtend(a[k+7:k]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + + + Sign extend packed 8-bit integers in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + l := 8*j + IF k[j] + dst[i+31:i] := SignExtend(a[l+7:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + + Sign extend packed 8-bit integers in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + l := 8*j + IF k[j] + dst[i+31:i] := SignExtend(a[l+7:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + Sign extend packed 8-bit integers in the low 8 bytes of "a" to packed 64-bit integers, and store the results in "dst". + +FOR j := 0 to 7 + i := 64*j + k := 8*j + dst[i+63:i] := SignExtend(a[k+7:k]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + + + Sign extend packed 8-bit integers in the low 8 bytes of "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 8*j + IF k[j] + dst[i+63:i] := SignExtend(a[l+7:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + + Sign extend packed 8-bit integers in the low 8 bytes of "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 8*j + IF k[j] + dst[i+63:i] := SignExtend(a[l+7:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + Sign extend packed 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst". + +FOR j := 0 to 7 + i := 64*j + k := 32*j + dst[i+63:i] := SignExtend(a[k+31:k]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + + + Sign extend packed 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 32*j + IF k[j] + dst[i+63:i] := SignExtend(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + + Sign extend packed 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 32*j + IF k[j] + dst[i+63:i] := SignExtend(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + Sign extend packed 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst". + +FOR j := 0 to 15 + i := 32*j + k := 16*j + dst[i+31:i] := SignExtend(a[k+15:k]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + + + Sign extend packed 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + l := j*16 + IF k[j] + dst[i+31:i] := SignExtend(a[l+15:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + + Sign extend packed 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + l := 16*j + IF k[j] + dst[i+31:i] := SignExtend(a[l+15:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + Sign extend packed 16-bit integers in "a" to packed 64-bit integers, and store the results in "dst". + +FOR j := 0 to 7 + i := 64*j + k := 16*j + dst[i+63:i] := SignExtend(a[k+15:k]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + + + Sign extend packed 16-bit integers in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 16*j + IF k[j] + dst[i+63:i] := SignExtend(a[l+15:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + + Sign extend packed 16-bit integers in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 16*j + IF k[j] + dst[i+63:i] := SignExtend(a[l+15:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + Convert packed unsigned 32-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst". + +FOR j := 0 to 15 + i := 32*j + k := 8*j + dst[k+7:k] := Saturate_UnsignedInt32_To_Int8(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + + + Convert packed unsigned 32-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate_UnsignedInt32_To_Int8(a[i+31:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + Store + + + + Convert packed unsigned 32-bit integers in "a" to packed 8-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 15 + i := 32*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := Saturate_UnsignedInt32_To_Int8(a[i+31:i]) + FI +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + + Convert packed unsigned 32-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate_UnsignedInt32_To_Int8(a[i+31:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + Convert packed unsigned 32-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst". + +FOR j := 0 to 15 + i := 32*j + k := 16*j + dst[k+15:k] := Saturate_UnsignedInt32_To_Int16(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + + + Convert packed unsigned 32-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + l := 16*j + IF k[j] + dst[l+15:l] := Saturate_UnsignedInt32_To_Int16(a[i+31:i]) + ELSE + dst[l+15:l] := src[l+15:l] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + Store + + + + Convert packed unsigned 32-bit integers in "a" to packed 16-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 15 + i := 32*j + l := 16*j + IF k[j] + MEM[base_addr+l+15:base_addr+l] := Saturate_UnsignedInt32_To_Int16(a[i+31:i]) + FI +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + + Convert packed unsigned 32-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + l := 16*j + IF k[j] + dst[l+15:l] := Saturate_UnsignedInt32_To_Int16(a[i+31:i]) + ELSE + dst[l+15:l] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst". + +FOR j := 0 to 7 + i := 64*j + k := 8*j + dst[k+7:k] := Saturate_UnsignedInt64_To_Int8(a[i+63:i]) +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate_UnsignedInt64_To_Int8(a[i+63:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + Store + + + + Convert packed unsigned 64-bit integers in "a" to packed 8-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 7 + i := 64*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := Saturate_UnsignedInt64_To_Int8(a[i+63:i]) + FI +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate_UnsignedInt64_To_Int8(a[i+63:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 32-bit integers with unsigned saturation, and store the results in "dst". + +FOR j := 0 to 7 + i := 64*j + k := 32*j + dst[k+31:k] := Saturate_UnsignedInt64_To_Int32(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 32-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 32*j + IF k[j] + dst[l+31:l] := Saturate_UnsignedInt64_To_Int32(a[i+63:i]) + ELSE + dst[l+31:l] := src[l+31:l] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + Store + + + + Convert packed unsigned 64-bit integers in "a" to packed 32-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 7 + i := 64*j + l := 32*j + IF k[j] + MEM[base_addr+l+31:base_addr+l] := Saturate_UnsignedInt64_To_Int32(a[i+63:i]) + FI +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 32-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 32*j + IF k[j] + dst[l+31:l] := Saturate_UnsignedInt64_To_Int32(a[i+63:i]) + ELSE + dst[l+31:l] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst". + +FOR j := 0 to 7 + i := 64*j + k := 16*j + dst[k+15:k] := Saturate_UnsignedInt64_To_Int16(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 16*j + IF k[j] + dst[l+15:l] := Saturate_UnsignedInt64_To_Int16(a[i+63:i]) + ELSE + dst[l+15:l] := src[l+15:l] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + Store + + + + Convert packed unsigned 64-bit integers in "a" to packed 16-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 7 + i := 64*j + l := 16*j + IF k[j] + MEM[base_addr+l+15:base_addr+l] := Saturate_UnsignedInt64_To_Int16(a[i+63:i]) + FI +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 16*j + IF k[j] + dst[l+15:l] := Saturate_UnsignedInt64_To_Int16(a[i+63:i]) + ELSE + dst[l+15:l] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + Zero extend packed unsigned 8-bit integers in "a" to packed 32-bit integers, and store the results in "dst". + +FOR j := 0 to 15 + i := 32*j + k := 8*j + dst[i+31:i] := ZeroExtend(a[k+7:k]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + + + Zero extend packed unsigned 8-bit integers in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + l := 8*j + IF k[j] + dst[i+31:i] := ZeroExtend(a[l+7:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + + Zero extend packed unsigned 8-bit integers in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + l := 8*j + IF k[j] + dst[i+31:i] := ZeroExtend(a[l+7:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + Zero extend packed unsigned 8-bit integers in the low 8 byte sof "a" to packed 64-bit integers, and store the results in "dst". + +FOR j := 0 to 7 + i := 64*j + k := 8*j + dst[i+63:i] := ZeroExtend(a[k+7:k]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + + + Zero extend packed unsigned 8-bit integers in the low 8 bytes of "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 8*j + IF k[j] + dst[i+63:i] := ZeroExtend(a[l+7:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + + Zero extend packed unsigned 8-bit integers in the low 8 bytes of "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 8*j + IF k[j] + dst[i+63:i] := ZeroExtend(a[l+7:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + Zero extend packed unsigned 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst". + +FOR j := 0 to 7 + i := 64*j + k := 32*j + dst[i+63:i] := ZeroExtend(a[k+31:k]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + + + Zero extend packed unsigned 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 32*j + IF k[j] + dst[i+63:i] := ZeroExtend(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + + Zero extend packed unsigned 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 32*j + IF k[j] + dst[i+63:i] := ZeroExtend(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + Zero extend packed unsigned 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst". + +FOR j := 0 to 15 + i := 32*j + k := 16*j + dst[i+31:i] := ZeroExtend(a[k+15:k]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + + + Zero extend packed unsigned 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + l := 16*j + IF k[j] + dst[i+31:i] := ZeroExtend(a[l+15:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + + Zero extend packed unsigned 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + l := 16*j + IF k[j] + dst[i+31:i] := ZeroExtend(a[l+15:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + Zero extend packed unsigned 16-bit integers in "a" to packed 64-bit integers, and store the results in "dst". + +FOR j := 0 to 7 + i := 64*j + k := 16*j + dst[i+63:i] := ZeroExtend(a[k+15:k]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + + + Zero extend packed unsigned 16-bit integers in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 16*j + IF k[j] + dst[i+63:i] := ZeroExtend(a[l+15:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Convert + + + Zero extend packed unsigned 16-bit integers in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 16*j + IF k[j] + dst[i+63:i] := ZeroExtend(a[l+15:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Arithmetic + + + + + Multiply the low 32-bit integers from each packed 64-bit element in "a" and "b", and store the signed 64-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+31:i] * b[i+31:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Arithmetic + + + + Multiply the low 32-bit integers from each packed 64-bit element in "a" and "b", and store the signed 64-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+31:i] * b[i+31:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Arithmetic + + + Multiply the low 32-bit integers from each packed 64-bit element in "a" and "b", and store the signed 64-bit results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := a[i+31:i] * b[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Arithmetic + + + + + Multiply the packed 32-bit integers in "a" and "b", producing intermediate 64-bit integers, and store the low 32 bits of the intermediate integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + tmp[63:0] := a[i+31:i] * b[i+31:i] + dst[i+31:i] := tmp[31:0] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Arithmetic + + + Multiply the packed 32-bit integers in "a" and "b", producing intermediate 64-bit integers, and store the low 32 bits of the intermediate integers in "dst". + +FOR j := 0 to 15 + i := j*32 + tmp[63:0] := a[i+31:i] * b[i+31:i] + dst[i+31:i] := tmp[31:0] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Arithmetic + + + + + Multiply the low unsigned 32-bit integers from each packed 64-bit element in "a" and "b", and store the unsigned 64-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+31:i] * b[i+31:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Arithmetic + + + + Multiply the low unsigned 32-bit integers from each packed 64-bit element in "a" and "b", and store the unsigned 64-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+31:i] * b[i+31:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Arithmetic + + + Multiply the low unsigned 32-bit integers from each packed 64-bit element in "a" and "b", and store the unsigned 64-bit results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := a[i+31:i] * b[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Logical + + + + + Compute the bitwise OR of packed 32-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] OR b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Logical + + + + Compute the bitwise OR of packed 32-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] OR b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Logical + + + Compute the bitwise OR of packed 32-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := a[i+31:i] OR b[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Logical + + + Compute the bitwise OR of 512 bits (representing integer data) in "a" and "b", and store the result in "dst". + +dst[511:0] := (a[511:0] OR b[511:0]) +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Logical + + + + + Compute the bitwise OR of packed 64-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] OR b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Logical + + + + Compute the bitwise OR of packed 64-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] OR b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Logical + + + Compute the bitwise OR of packed 64-bit integers in "a" and "b", and store the resut in "dst". + + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := a[i+63:i] OR b[i+63:i] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + + + Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +LEFT_ROTATE_DWORDS(src, count_src){ + count := count_src modulo 32 + RETURN (src << count) OR (src >> (32 - count)) +} +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + + Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +LEFT_ROTATE_DWORDS(src, count_src){ + count := count_src modulo 32 + RETURN (src << count) OR (src >> (32 - count)) +} +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst". + +LEFT_ROTATE_DWORDS(src, count_src){ + count := count_src modulo 32 + RETURN (src << count) OR (src >> (32 - count)) +} +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + + + Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +LEFT_ROTATE_QWORDS(src, count_src){ + count := count_src modulo 64 + RETURN (src << count) OR (src >> (64 - count)) +} +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + + Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +LEFT_ROTATE_QWORDS(src, count_src){ + count := count_src modulo 64 + RETURN (src << count) OR (src >> (64 - count)) +} +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst". + +LEFT_ROTATE_QWORDS(src, count_src){ + count := count_src modulo 64 + RETURN (src << count) OR (src >> (64 - count)) +} +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + + + Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +LEFT_ROTATE_DWORDS(src, count_src){ + count := count_src modulo 32 + RETURN (src << count) OR (src >> (32 - count)) +} +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + + Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +LEFT_ROTATE_DWORDS(src, count_src){ + count := count_src modulo 32 + RETURN (src << count) OR (src >> (32 - count)) +} +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst". + +LEFT_ROTATE_DWORDS(src, count_src){ + count := count_src modulo 32 + RETURN (src << count) OR (src >> (32 - count)) +} +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + + + Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +LEFT_ROTATE_QWORDS(src, count_src){ + count := count_src modulo 64 + RETURN (src << count) OR (src >> (64 - count)) +} +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + + Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +LEFT_ROTATE_QWORDS(src, count_src){ + count := count_src modulo 64 + RETURN (src << count) OR (src >> (64 - count)) +} +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst". + +LEFT_ROTATE_QWORDS(src, count_src){ + count := count_src modulo 64 + RETURN (src << count) OR (src >> (64 - count)) +} +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + + + Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +RIGHT_ROTATE_DWORDS(src, count_src){ + count := count_src modulo 32 + RETURN (src >>count) OR (src << (32 - count)) +} +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + + Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +RIGHT_ROTATE_DWORDS(src, count_src){ + count := count_src modulo 32 + RETURN (src >>count) OR (src << (32 - count)) +} +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst". + +RIGHT_ROTATE_DWORDS(src, count_src){ + count := count_src modulo 32 + RETURN (src >>count) OR (src << (32 - count)) +} +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + + + Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +RIGHT_ROTATE_QWORDS(src, count_src){ + count := count_src modulo 64 + RETURN (src >> count) OR (src << (64 - count)) +} +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + + Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +RIGHT_ROTATE_QWORDS(src, count_src){ + count := count_src modulo 64 + RETURN (src >> count) OR (src << (64 - count)) +} +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst". + +RIGHT_ROTATE_QWORDS(src, count_src){ + count := count_src modulo 64 + RETURN (src >> count) OR (src << (64 - count)) +} +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + + + Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +RIGHT_ROTATE_DWORDS(src, count_src){ + count := count_src modulo 32 + RETURN (src >>count) OR (src << (32 - count)) +} +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Miscellaneous + + + + Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +RIGHT_ROTATE_DWORDS(src, count_src){ + count := count_src modulo 32 + RETURN (src >>count) OR (src << (32 - count)) +} +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst". + +RIGHT_ROTATE_DWORDS(src, count_src){ + count := count_src modulo 32 + RETURN (src >>count) OR (src << (32 - count)) +} +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + + + Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +RIGHT_ROTATE_QWORDS(src, count_src){ + count := count_src modulo 64 + RETURN (src >> count) OR (src << (64 - count)) +} +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + + Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +RIGHT_ROTATE_QWORDS(src, count_src){ + count := count_src modulo 64 + RETURN (src >> count) OR (src << (64 - count)) +} +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst". + +RIGHT_ROTATE_QWORDS(src, count_src){ + count := count_src modulo 64 + RETURN (src >> count) OR (src << (64 - count)) +} +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Store + + + + + Scatter 32-bit integers from "a" into memory using 32-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 15 + i := j*32 + MEM[base_addr + SignExtend(vindex[i+31:i])*scale] := a[i+31:i] +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Store + + + + + + Scatter 32-bit integers from "a" into memory using 32-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 15 + i := j*32 + IF k[j] + MEM[base_addr + SignExtend(vindex[i+31:i])*scale] := a[i+31:i] + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Store + + + + + Scatter 64-bit integers from "a" into memory using 32-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 7 + i := j*64 + l := j*32 + MEM[base_addr + SignExtend(vindex[l+31:l])*scale] := a[i+63:i] +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512F + Store + + + + + + Scatter 64-bit integers from "a" into memory using 32-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + MEM[base_addr + SignExtend(vindex[l+31:l])*scale] := a[i+63:i] + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Store + + + + + Scatter 32-bit integers from "a" into memory using 64-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 7 + i := j*32 + l := j*64 + MEM[base_addr + SignExtend(vindex[l+63:l])*scale] := a[i+31:i] +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512F + Store + + + + + + Scatter 32-bit integers from "a" into memory using 64-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 7 + i := j*32 + l := j*64 + IF k[j] + MEM[base_addr + SignExtend(vindex[l+63:l])*scale] := a[i+31:i] + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Store + + + + + Scatter 64-bit integers from "a" into memory using 64-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 7 + i := j*64 + MEM[base_addr + SignExtend(vindex[i+63:i])*scale] := a[i+63:i] +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512F + Store + + + + + + Scatter 64-bit integers from "a" into memory using 64-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 7 + i := j*64 + IF k[j] + MEM[base_addr + SignExtend(vindex[i+63:i])*scale] := a[i+63:i] + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Swizzle + + + + + Shuffle 32-bit integers in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} + +tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +tmp_dst[95:64] := SELECT4(a[127:0], imm8[5:4]) +tmp_dst[127:96] := SELECT4(a[127:0], imm8[7:6]) +tmp_dst[159:128] := SELECT4(a[255:128], imm8[1:0]) +tmp_dst[191:160] := SELECT4(a[255:128], imm8[3:2]) +tmp_dst[223:192] := SELECT4(a[255:128], imm8[5:4]) +tmp_dst[255:224] := SELECT4(a[255:128], imm8[7:6]) +tmp_dst[287:256] := SELECT4(a[383:256], imm8[1:0]) +tmp_dst[319:288] := SELECT4(a[383:256], imm8[3:2]) +tmp_dst[351:320] := SELECT4(a[383:256], imm8[5:4]) +tmp_dst[383:352] := SELECT4(a[383:256], imm8[7:6]) +tmp_dst[415:384] := SELECT4(a[511:384], imm8[1:0]) +tmp_dst[447:416] := SELECT4(a[511:384], imm8[3:2]) +tmp_dst[479:448] := SELECT4(a[511:384], imm8[5:4]) +tmp_dst[511:480] := SELECT4(a[511:384], imm8[7:6]) +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + + Shuffle 32-bit integers in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} + +tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +tmp_dst[95:64] := SELECT4(a[127:0], imm8[5:4]) +tmp_dst[127:96] := SELECT4(a[127:0], imm8[7:6]) +tmp_dst[159:128] := SELECT4(a[255:128], imm8[1:0]) +tmp_dst[191:160] := SELECT4(a[255:128], imm8[3:2]) +tmp_dst[223:192] := SELECT4(a[255:128], imm8[5:4]) +tmp_dst[255:224] := SELECT4(a[255:128], imm8[7:6]) +tmp_dst[287:256] := SELECT4(a[383:256], imm8[1:0]) +tmp_dst[319:288] := SELECT4(a[383:256], imm8[3:2]) +tmp_dst[351:320] := SELECT4(a[383:256], imm8[5:4]) +tmp_dst[383:352] := SELECT4(a[383:256], imm8[7:6]) +tmp_dst[415:384] := SELECT4(a[511:384], imm8[1:0]) +tmp_dst[447:416] := SELECT4(a[511:384], imm8[3:2]) +tmp_dst[479:448] := SELECT4(a[511:384], imm8[5:4]) +tmp_dst[511:480] := SELECT4(a[511:384], imm8[7:6]) +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Swizzle + + + Shuffle 32-bit integers in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst". + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} + +dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +dst[95:64] := SELECT4(a[127:0], imm8[5:4]) +dst[127:96] := SELECT4(a[127:0], imm8[7:6]) +dst[159:128] := SELECT4(a[255:128], imm8[1:0]) +dst[191:160] := SELECT4(a[255:128], imm8[3:2]) +dst[223:192] := SELECT4(a[255:128], imm8[5:4]) +dst[255:224] := SELECT4(a[255:128], imm8[7:6]) +dst[287:256] := SELECT4(a[383:256], imm8[1:0]) +dst[319:288] := SELECT4(a[383:256], imm8[3:2]) +dst[351:320] := SELECT4(a[383:256], imm8[5:4]) +dst[383:352] := SELECT4(a[383:256], imm8[7:6]) +dst[415:384] := SELECT4(a[511:384], imm8[1:0]) +dst[447:416] := SELECT4(a[511:384], imm8[3:2]) +dst[479:448] := SELECT4(a[511:384], imm8[5:4]) +dst[511:480] := SELECT4(a[511:384], imm8[7:6]) +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + + + Shift packed 32-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF count[63:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend(a[i+31:i] << count[63:0]) + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Shift + + + + + Shift packed 32-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF imm8[7:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend(a[i+31:i] << imm8[7:0]) + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + + Shift packed 32-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF count[63:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend(a[i+31:i] << count[63:0]) + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + + Shift packed 32-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF imm8[7:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend(a[i+31:i] << imm8[7:0]) + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + Shift packed 32-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + IF count[63:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend(a[i+31:i] << count[63:0]) + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Shift + + + Shift packed 32-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + IF imm8[7:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend(a[i+31:i] << imm8[7:0]) + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + + + Shift packed 64-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF count[63:0] > 63 + dst[i+63:i] := 0 + ELSE + dst[i+63:i] := ZeroExtend(a[i+63:i] << count[63:0]) + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + + + Shift packed 64-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF imm8[7:0] > 63 + dst[i+63:i] := 0 + ELSE + dst[i+63:i] := ZeroExtend(a[i+63:i] << imm8[7:0]) + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + + Shift packed 64-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF count[63:0] > 63 + dst[i+63:i] := 0 + ELSE + dst[i+63:i] := ZeroExtend(a[i+63:i] << count[63:0]) + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + + Shift packed 64-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF imm8[7:0] > 63 + dst[i+63:i] := 0 + ELSE + dst[i+63:i] := ZeroExtend(a[i+63:i] << imm8[7:0]) + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + Shift packed 64-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + IF count[63:0] > 63 + dst[i+63:i] := 0 + ELSE + dst[i+63:i] := ZeroExtend(a[i+63:i] << count[63:0]) + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + Shift packed 64-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + IF imm8[7:0] > 63 + dst[i+63:i] := 0 + ELSE + dst[i+63:i] := ZeroExtend(a[i+63:i] << imm8[7:0]) + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Shift + + + + + Shift packed 32-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ZeroExtend(a[i+31:i] << count[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + + Shift packed 32-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ZeroExtend(a[i+31:i] << count[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Shift + + + Shift packed 32-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := ZeroExtend(a[i+31:i] << count[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + + + Shift packed 64-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ZeroExtend(a[i+63:i] << count[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + + Shift packed 64-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ZeroExtend(a[i+63:i] << count[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + Shift packed 64-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := ZeroExtend(a[i+63:i] << count[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + + + Shift packed 32-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF count[63:0] > 31 + dst[i+31:i] := SignBit + ELSE + dst[i+31:i] := SignExtend(a[i+31:i] >> count[63:0]) + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Shift + + + + + Shift packed 32-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF imm8[7:0] > 31 + dst[i+31:i] := SignBit + ELSE + dst[i+31:i] := SignExtend(a[i+31:i] >> imm8[7:0]) + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + + Shift packed 32-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF count[63:0] > 31 + dst[i+31:i] := SignBit + ELSE + dst[i+31:i] := SignExtend(a[i+31:i] >> count[63:0]) + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + + Shift packed 32-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF imm8[7:0] > 31 + dst[i+31:i] := SignBit + ELSE + dst[i+31:i] := SignExtend(a[i+31:i] >> imm8[7:0]) + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + Shift packed 32-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + IF count[63:0] > 31 + dst[i+31:i] := SignBit + ELSE + dst[i+31:i] := SignExtend(a[i+31:i] >> count[63:0]) + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Shift + + + Shift packed 32-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + IF imm8[7:0] > 31 + dst[i+31:i] := SignBit + ELSE + dst[i+31:i] := SignExtend(a[i+31:i] >> imm8[7:0]) + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + + + Shift packed 64-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF count[63:0] > 63 + dst[i+63:i] := SignBit + ELSE + dst[i+63:i] := SignExtend(a[i+63:i] >> count[63:0]) + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + + + Shift packed 64-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF imm8[7:0] > 63 + dst[i+63:i] := SignBit + ELSE + dst[i+63:i] := SignExtend(a[i+63:i] << imm8[7:0]) + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + + Shift packed 64-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF count[63:0] > 63 + dst[i+63:i] := SignBit + ELSE + dst[i+63:i] := SignExtend(a[i+63:i] >> count[63:0]) + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + + Shift packed 64-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF imm8[7:0] > 63 + dst[i+63:i] := SignBit + ELSE + dst[i+63:i] := SignExtend(a[i+63:i] << imm8[7:0]) + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + Shift packed 64-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + IF count[63:0] > 63 + dst[i+63:i] := SignBit + ELSE + dst[i+63:i] := SignExtend(a[i+63:i] >> count[63:0]) + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + Shift packed 64-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + IF imm8[7:0] > 63 + dst[i+63:i] := SignBit + ELSE + dst[i+63:i] := SignExtend(a[i+63:i] << imm8[7:0]) + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Shift + + + + + Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := SignExtend(a[i+31:i] >> count[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + + Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := SignExtend(a[i+31:i] >> count[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Shift + + + Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := SignExtend(a[i+31:i] >> count[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + + + Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := SignExtend(a[i+63:i] >> count[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + + Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := SignExtend(a[i+63:i] >> count[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := SignExtend(a[i+63:i] >> count[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + + + Shift packed 32-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF count[63:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend(a[i+31:i] >> count[63:0]) + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Shift + + + + + Shift packed 32-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF imm8[7:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend(a[i+31:i] >> imm8[7:0]) + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + + Shift packed 32-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF count[63:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend(a[i+31:i] >> count[63:0]) + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + + Shift packed 32-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF imm8[7:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend(a[i+31:i] >> imm8[7:0]) + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + Shift packed 32-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + IF count[63:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend(a[i+31:i] >> count[63:0]) + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Shift + + + Shift packed 32-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + IF imm8[7:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend(a[i+31:i] >> imm8[7:0]) + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + + + Shift packed 64-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF count[63:0] > 63 + dst[i+63:i] := 0 + ELSE + dst[i+63:i] := ZeroExtend(a[i+63:i] >> count[63:0]) + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + + + Shift packed 64-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF imm8[7:0] > 63 + dst[i+63:i] := 0 + ELSE + dst[i+63:i] := ZeroExtend(a[i+63:i] >> imm8[7:0]) + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + + Shift packed 64-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF count[63:0] > 63 + dst[i+63:i] := 0 + ELSE + dst[i+63:i] := ZeroExtend(a[i+63:i] >> count[63:0]) + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + + Shift packed 64-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF imm8[7:0] > 63 + dst[i+63:i] := 0 + ELSE + dst[i+63:i] := ZeroExtend(a[i+63:i] >> imm8[7:0]) + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + Shift packed 64-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + IF count[63:0] > 63 + dst[i+63:i] := 0 + ELSE + dst[i+63:i] := ZeroExtend(a[i+63:i] >> count[63:0]) + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + Shift packed 64-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + IF imm8[7:0] > 63 + dst[i+63:i] := 0 + ELSE + dst[i+63:i] := ZeroExtend(a[i+63:i] >> imm8[7:0]) + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Shift + + + + + Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ZeroExtend(a[i+31:i] >> count[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + + Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ZeroExtend(a[i+31:i] >> count[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Shift + + + Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := ZeroExtend(a[i+31:i] >> count[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + + + Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ZeroExtend(a[i+63:i] >> count[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + + Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ZeroExtend(a[i+63:i] >> count[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Shift + + + Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := ZeroExtend(a[i+63:i] >> count[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Arithmetic + + + + + Subtract packed 32-bit integers in "b" from packed 32-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] - b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Arithmetic + + + + Subtract packed 32-bit integers in "b" from packed 32-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] - b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Arithmetic + + + Subtract packed 32-bit integers in "b" from packed 32-bit integers in "a", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := a[i+31:i] - b[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Arithmetic + + + + + Subtract packed 64-bit integers in "b" from packed 64-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] - b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Arithmetic + + + + Subtract packed 64-bit integers in "b" from packed 64-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] - b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Arithmetic + + + Subtract packed 64-bit integers in "b" from packed 64-bit integers in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := a[i+63:i] - b[i+63:i] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Logical + + + + + + Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 32-bit integer, the corresponding bit from "src", "a", and "b" are used to form a 3 bit index into "imm8", and the value at that bit in "imm8" is written to the corresponding bit in "dst" using writemask "k" at 32-bit granularity (32-bit elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + FOR h := 0 to 31 + index[2:0] := (src[i+h] << 2) OR (a[i+h] << 1) OR b[i+h] + dst[i+h] := imm8[index[2:0]] + ENDFOR + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Logical + + + + + + Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 32-bit integer, the corresponding bit from "a", "b", and "c" are used to form a 3 bit index into "imm8", and the value at that bit in "imm8" is written to the corresponding bit in "dst" using zeromask "k" at 32-bit granularity (32-bit elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + FOR h := 0 to 31 + index[2:0] := (a[i+h] << 2) OR (b[i+h] << 1) OR c[i+h] + dst[i+h] := imm8[index[2:0]] + ENDFOR + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Logical + + + + + Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 32-bit integer, the corresponding bit from "a", "b", and "c" are used to form a 3 bit index into "imm8", and the value at that bit in "imm8" is written to the corresponding bit in "dst". + +FOR j := 0 to 15 + i := j*32 + FOR h := 0 to 31 + index[2:0] := (a[i+h] << 2) OR (b[i+h] << 1) OR c[i+h] + dst[i+h] := imm8[index[2:0]] + ENDFOR +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Logical + + + + + + Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 64-bit integer, the corresponding bit from "src", "a", and "b" are used to form a 3 bit index into "imm8", and the value at that bit in "imm8" is written to the corresponding bit in "dst" using writemask "k" at 64-bit granularity (64-bit elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + FOR h := 0 to 63 + index[2:0] := (src[i+h] << 2) OR (a[i+h] << 1) OR b[i+h] + dst[i+h] := imm8[index[2:0]] + ENDFOR + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Logical + + + + + + Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 64-bit integer, the corresponding bit from "a", "b", and "c" are used to form a 3 bit index into "imm8", and the value at that bit in "imm8" is written to the corresponding bit in "dst" using zeromask "k" at 64-bit granularity (64-bit elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + FOR h := 0 to 63 + index[2:0] := (a[i+h] << 2) OR (b[i+h] << 1) OR c[i+h] + dst[i+h] := imm8[index[2:0]] + ENDFOR + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Logical + + + + + Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 64-bit integer, the corresponding bit from "a", "b", and "c" are used to form a 3 bit index into "imm8", and the value at that bit in "imm8" is written to the corresponding bit in "dst". + +FOR j := 0 to 7 + i := j*64 + FOR h := 0 to 63 + index[2:0] := (a[i+h] << 2) OR (b[i+h] << 1) OR c[i+h] + dst[i+h] := imm8[index[2:0]] + ENDFOR +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F/KNCNI + Logical + + + + Compute the bitwise AND of packed 32-bit integers in "a" and "b", producing intermediate 32-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is non-zero. + +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := ((a[i+31:i] AND b[i+31:i]) != 0) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F/KNCNI + Logical + + + Compute the bitwise AND of packed 32-bit integers in "a" and "b", producing intermediate 32-bit values, and set the corresponding bit in result mask "k" if the intermediate value is non-zero. + +FOR j := 0 to 15 + i := j*32 + k[j] := ((a[i+31:i] AND b[i+31:i]) != 0) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F + Logical + + + + Compute the bitwise AND of packed 64-bit integers in "a" and "b", producing intermediate 64-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is non-zero. + +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := ((a[i+63:i] AND b[i+63:i]) != 0) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F + Logical + + + Compute the bitwise AND of packed 64-bit integers in "a" and "b", producing intermediate 64-bit values, and set the corresponding bit in result mask "k" if the intermediate value is non-zero. + +FOR j := 0 to 7 + i := j*64 + k[j] := ((a[i+63:i] AND b[i+63:i]) != 0) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F + Logical + + + + Compute the bitwise NAND of packed 32-bit integers in "a" and "b", producing intermediate 32-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is zero. + +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := ((a[i+31:i] AND b[i+31:i]) == 0) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F + Logical + + + Compute the bitwise NAND of packed 32-bit integers in "a" and "b", producing intermediate 32-bit values, and set the corresponding bit in result mask "k" if the intermediate value is zero. + +FOR j := 0 to 15 + i := j*32 + k[j] := ((a[i+31:i] AND b[i+31:i]) == 0) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F + Logical + + + + Compute the bitwise NAND of packed 64-bit integers in "a" and "b", producing intermediate 64-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is zero. + +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := ((a[i+63:i] AND b[i+63:i]) == 0) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512F + Logical + + + Compute the bitwise NAND of packed 64-bit integers in "a" and "b", producing intermediate 64-bit values, and set the corresponding bit in result mask "k" if the intermediate value is zero. + +FOR j := 0 to 7 + i := j*64 + k[j] := ((a[i+63:i] AND b[i+63:i]) == 0) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + + + Unpack and interleave 32-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]){ + dst[31:0] := src1[95:64] + dst[63:32] := src2[95:64] + dst[95:64] := src1[127:96] + dst[127:96] := src2[127:96] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_HIGH_DWORDS(a[255:128], b[255:128]) +tmp_dst[383:256] := INTERLEAVE_HIGH_DWORDS(a[383:256], b[383:256]) +tmp_dst[511:384] := INTERLEAVE_HIGH_DWORDS(a[511:384], b[511:384]) + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + + Unpack and interleave 32-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]){ + dst[31:0] := src1[95:64] + dst[63:32] := src2[95:64] + dst[95:64] := src1[127:96] + dst[127:96] := src2[127:96] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_HIGH_DWORDS(a[255:128], b[255:128]) +tmp_dst[383:256] := INTERLEAVE_HIGH_DWORDS(a[383:256], b[383:256]) +tmp_dst[511:384] := INTERLEAVE_HIGH_DWORDS(a[511:384], b[511:384]) + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + Unpack and interleave 32-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst". + +INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]){ + dst[31:0] := src1[95:64] + dst[63:32] := src2[95:64] + dst[95:64] := src1[127:96] + dst[127:96] := src2[127:96] + RETURN dst[127:0] +} + +dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) +dst[255:128] := INTERLEAVE_HIGH_DWORDS(a[255:128], b[255:128]) +dst[383:256] := INTERLEAVE_HIGH_DWORDS(a[383:256], b[383:256]) +dst[511:384] := INTERLEAVE_HIGH_DWORDS(a[511:384], b[511:384]) +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + + + Unpack and interleave 64-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]){ + dst[63:0] := src1[127:64] + dst[127:64] := src2[127:64] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_HIGH_QWORDS(a[255:128], b[255:128]) +tmp_dst[383:256] := INTERLEAVE_HIGH_QWORDS(a[383:256], b[383:256]) +tmp_dst[511:384] := INTERLEAVE_HIGH_QWORDS(a[511:384], b[511:384]) + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + + Unpack and interleave 64-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]){ + dst[63:0] := src1[127:64] + dst[127:64] := src2[127:64] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_HIGH_QWORDS(a[255:128], b[255:128]) +tmp_dst[383:256] := INTERLEAVE_HIGH_QWORDS(a[383:256], b[383:256]) +tmp_dst[511:384] := INTERLEAVE_HIGH_QWORDS(a[511:384], b[511:384]) + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + Unpack and interleave 64-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst". + +INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]){ + dst[63:0] := src1[127:64] + dst[127:64] := src2[127:64] + RETURN dst[127:0] +} + +dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) +dst[255:128] := INTERLEAVE_HIGH_QWORDS(a[255:128], b[255:128]) +dst[383:256] := INTERLEAVE_HIGH_QWORDS(a[383:256], b[383:256]) +dst[511:384] := INTERLEAVE_HIGH_QWORDS(a[511:384], b[511:384]) +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + + + Unpack and interleave 32-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +INTERLEAVE_DWORDS(src1[127:0], src2[127:0]){ + dst[31:0] := src1[31:0] + dst[63:32] := src2[31:0] + dst[95:64] := src1[63:32] + dst[127:96] := src2[63:32] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_DWORDS(a[255:128], b[255:128]) +tmp_dst[383:256] := INTERLEAVE_DWORDS(a[383:256], b[383:256]) +tmp_dst[511:384] := INTERLEAVE_DWORDS(a[511:384], b[511:384]) + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + + Unpack and interleave 32-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +INTERLEAVE_DWORDS(src1[127:0], src2[127:0]){ + dst[31:0] := src1[31:0] + dst[63:32] := src2[31:0] + dst[95:64] := src1[63:32] + dst[127:96] := src2[63:32] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_DWORDS(a[255:128], b[255:128]) +tmp_dst[383:256] := INTERLEAVE_DWORDS(a[383:256], b[383:256]) +tmp_dst[511:384] := INTERLEAVE_DWORDS(a[511:384], b[511:384]) + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + Unpack and interleave 32-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst". + +INTERLEAVE_DWORDS(src1[127:0], src2[127:0]){ + dst[31:0] := src1[31:0] + dst[63:32] := src2[31:0] + dst[95:64] := src1[63:32] + dst[127:96] := src2[63:32] + RETURN dst[127:0] +} + +dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) +dst[255:128] := INTERLEAVE_DWORDS(a[255:128], b[255:128]) +dst[383:256] := INTERLEAVE_DWORDS(a[383:256], b[383:256]) +dst[511:384] := INTERLEAVE_DWORDS(a[511:384], b[511:384]) +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + + + Unpack and interleave 64-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +INTERLEAVE_QWORDS(src1[127:0], src2[127:0]){ + dst[63:0] := src1[63:0] + dst[127:64] := src2[63:0] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_QWORDS(a[255:128], b[255:128]) +tmp_dst[383:256] := INTERLEAVE_QWORDS(a[383:256], b[383:256]) +tmp_dst[511:384] := INTERLEAVE_QWORDS(a[511:384], b[511:384]) + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + + Unpack and interleave 64-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +INTERLEAVE_QWORDS(src1[127:0], src2[127:0]){ + dst[63:0] := src1[63:0] + dst[127:64] := src2[63:0] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_QWORDS(a[255:128], b[255:128]) +tmp_dst[383:256] := INTERLEAVE_QWORDS(a[383:256], b[383:256]) +tmp_dst[511:384] := INTERLEAVE_QWORDS(a[511:384], b[511:384]) + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + Unpack and interleave 64-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst". + +INTERLEAVE_QWORDS(src1[127:0], src2[127:0]){ + dst[63:0] := src1[63:0] + dst[127:64] := src2[63:0] + RETURN dst[127:0] +} + +dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) +dst[255:128] := INTERLEAVE_QWORDS(a[255:128], b[255:128]) +dst[383:256] := INTERLEAVE_QWORDS(a[383:256], b[383:256]) +dst[511:384] := INTERLEAVE_QWORDS(a[511:384], b[511:384]) +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Logical + + + + + Compute the bitwise XOR of packed 32-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] XOR b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Logical + + + + Compute the bitwise XOR of packed 32-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] XOR b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Logical + + + Compute the bitwise XOR of packed 32-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := a[i+31:i] XOR b[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Logical + + + Compute the bitwise XOR of 512 bits (representing integer data) in "a" and "b", and store the result in "dst". + +dst[511:0] := (a[511:0] XOR b[511:0]) +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Logical + + + + + Compute the bitwise XOR of packed 64-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] XOR b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Logical + + + + Compute the bitwise XOR of packed 64-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] XOR b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Logical + + + Compute the bitwise XOR of packed 64-bit integers in "a" and "b", and store the results in "dst". + + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := a[i+63:i] XOR b[i+63:i] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + Compute the approximate reciprocal of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := APPROXIMATE(1.0/a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + Compute the approximate reciprocal of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := APPROXIMATE(1.0/a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + Compute the approximate reciprocal of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := APPROXIMATE(1.0/a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := APPROXIMATE(1.0/a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := APPROXIMATE(1.0/a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := APPROXIMATE(1.0/a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + + Compute the approximate reciprocal of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". The maximum relative error for this approximation is less than 2^-14. + +IF k[0] + dst[63:0] := APPROXIMATE(1.0/b[63:0]) +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + Compute the approximate reciprocal of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". The maximum relative error for this approximation is less than 2^-14. + +IF k[0] + dst[63:0] := APPROXIMATE(1.0/b[63:0]) +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + Compute the approximate reciprocal of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". The maximum relative error for this approximation is less than 2^-14. + +dst[63:0] := APPROXIMATE(1.0/b[63:0]) +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + + Compute the approximate reciprocal of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 2^-14. + +IF k[0] + dst[31:0] := APPROXIMATE(1.0/b[31:0]) +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + Compute the approximate reciprocal of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 2^-14. + +IF k[0] + dst[31:0] := APPROXIMATE(1.0/b[31:0]) +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + Compute the approximate reciprocal of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 2^-14. + +dst[31:0] := APPROXIMATE(1.0/b[31:0]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + Round packed double-precision (64-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +RoundTo_IntegerPD(src[63:0], imm8[7:0]){ + IF(imm8[2] == 1) + rounding_direction := MXCSR.RC //Use the rounding mode specified by MXCSR.RC + ELSE + rounding_direction := imm8[1:0] //Use the rounding mode specified by imm8[1:0] + FI + + M := imm8[7:4] // The scaling factor (number of fraction bits to round to) + + CASE(rounding_direction) + 0: tmp[63:0] := round_to_nearest_even_integer(2^M * src[63:0]) + 1: tmp[63:0] := round_to_equal_or_smaller_integer(2^M * src[63:0]) + 2: tmp[63:0] := round_to_equal_or_larger_integer(2^M * src[63:0]) + 3: tmp[63:0] := round_to_nearest_smallest_magnitude_integer(2^M * src[63:0]) + ESAC + + dst[63:0] := 2^-M * tmp[63:0] // scale back down + + IF imm8[3] == 0 //check SPE + IF src[63:0] != dst[63:0] //check if precision has been lost + set_precision() //set #PE + FI + FI + RETURN dst[63:0] +} + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := RoundTo_IntegerPD(a[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + + Round packed double-precision (64-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +RoundTo_IntegerPD(src[63:0], imm8[7:0]){ + IF(imm8[2] == 1) + rounding_direction := MXCSR.RC //Use the rounding mode specified by MXCSR.RC + ELSE + rounding_direction := imm8[1:0] //Use the rounding mode specified by imm8[1:0] + FI + + M := imm8[7:4] // The scaling factor (number of fraction bits to round to) + + CASE(rounding_direction) + 0: tmp[63:0] := round_to_nearest_even_integer(2^M * src[63:0]) + 1: tmp[63:0] := round_to_equal_or_smaller_integer(2^M * src[63:0]) + 2: tmp[63:0] := round_to_equal_or_larger_integer(2^M * src[63:0]) + 3: tmp[63:0] := round_to_nearest_smallest_magnitude_integer(2^M * src[63:0]) + ESAC + + dst[63:0] := 2^-M * tmp[63:0] // scale back down + + IF imm8[3] == 0 //check SPE + IF src[63:0] != dst[63:0] //check if precision has been lost + set_precision() //set #PE + FI + FI + RETURN dst[63:0] +} + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := RoundTo_IntegerPD(a[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + Round packed double-precision (64-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +RoundTo_IntegerPD(src[63:0], imm8[7:0]){ + IF(imm8[2] == 1) + rounding_direction := MXCSR.RC //Use the rounding mode specified by MXCSR.RC + ELSE + rounding_direction := imm8[1:0] //Use the rounding mode specified by imm8[1:0] + FI + + M := imm8[7:4] // The scaling factor (number of fraction bits to round to) + + CASE(rounding_direction) + 0: tmp[63:0] := round_to_nearest_even_integer(2^M * src[63:0]) + 1: tmp[63:0] := round_to_equal_or_smaller_integer(2^M * src[63:0]) + 2: tmp[63:0] := round_to_equal_or_larger_integer(2^M * src[63:0]) + 3: tmp[63:0] := round_to_nearest_smallest_magnitude_integer(2^M * src[63:0]) + ESAC + + dst[63:0] := 2^-M * tmp[63:0] // scale back down + + IF imm8[3] == 0 //check SPE + IF src[63:0] != dst[63:0] //check if precision has been lost + set_precision() //set #PE + FI + FI + RETURN dst[63:0] +} + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := RoundTo_IntegerPD(a[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + Round packed double-precision (64-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +RoundTo_IntegerPD(src[63:0], imm8[7:0]){ + IF(imm8[2] == 1) + rounding_direction := MXCSR.RC //Use the rounding mode specified by MXCSR.RC + ELSE + rounding_direction := imm8[1:0] //Use the rounding mode specified by imm8[1:0] + FI + + M := imm8[7:4] // The scaling factor (number of fraction bits to round to) + + CASE(rounding_direction) + 0: tmp[63:0] := round_to_nearest_even_integer(2^M * src[63:0]) + 1: tmp[63:0] := round_to_equal_or_smaller_integer(2^M * src[63:0]) + 2: tmp[63:0] := round_to_equal_or_larger_integer(2^M * src[63:0]) + 3: tmp[63:0] := round_to_nearest_smallest_magnitude_integer(2^M * src[63:0]) + ESAC + + dst[63:0] := 2^-M * tmp[63:0] // scale back down + + IF imm8[3] == 0 //check SPE + IF src[63:0] != dst[63:0] //check if precision has been lost + set_precision() //set #PE + FI + FI + RETURN dst[63:0] +} + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := RoundTo_IntegerPD(a[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + Round packed double-precision (64-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst". + + +RoundTo_IntegerPD(src[63:0], imm8[7:0]){ + IF(imm8[2] == 1) + rounding_direction := MXCSR.RC //Use the rounding mode specified by MXCSR.RC + ELSE + rounding_direction := imm8[1:0] //Use the rounding mode specified by imm8[1:0] + FI + + M := imm8[7:4] // The scaling factor (number of fraction bits to round to) + + CASE(rounding_direction) + 0: tmp[63:0] := round_to_nearest_even_integer(2^M * src[63:0]) + 1: tmp[63:0] := round_to_equal_or_smaller_integer(2^M * src[63:0]) + 2: tmp[63:0] := round_to_equal_or_larger_integer(2^M * src[63:0]) + 3: tmp[63:0] := round_to_nearest_smallest_magnitude_integer(2^M * src[63:0]) + ESAC + + dst[63:0] := 2^-M * tmp[63:0] // scale back down + + IF imm8[3] == 0 //check SPE + IF src[63:0] != dst[63:0] //check if precision has been lost + set_precision() //set #PE + FI + FI + RETURN dst[63:0] +} + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := RoundTo_IntegerPD(a[i+63:i], imm8[7:0]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + Round packed double-precision (64-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst". + [round_note] + +RoundTo_IntegerPD(src[63:0], imm8[7:0]){ + IF(imm8[2] == 1) + rounding_direction := MXCSR.RC //Use the rounding mode specified by MXCSR.RC + ELSE + rounding_direction := imm8[1:0] //Use the rounding mode specified by imm8[1:0] + FI + + M := imm8[7:4] // The scaling factor (number of fraction bits to round to) + + CASE(rounding_direction) + 0: tmp[63:0] := round_to_nearest_even_integer(2^M * src[63:0]) + 1: tmp[63:0] := round_to_equal_or_smaller_integer(2^M * src[63:0]) + 2: tmp[63:0] := round_to_equal_or_larger_integer(2^M * src[63:0]) + 3: tmp[63:0] := round_to_nearest_smallest_magnitude_integer(2^M * src[63:0]) + ESAC + + dst[63:0] := 2^-M * tmp[63:0] // scale back down + + IF imm8[3] == 0 //check SPE + IF src[63:0] != dst[63:0] //check if precision has been lost + set_precision() //set #PE + FI + FI + RETURN dst[63:0] +} + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := RoundTo_IntegerPD(a[i+63:i], imm8[7:0]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + Round packed single-precision (32-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +RoundTo_IntegerPS(src[31:0], imm8[7:0]){ + IF(imm8[2] == 1) + rounding_direction := MXCSR.RC //Use the rounding mode specified by MXCSR.RC + ELSE + rounding_direction := imm8[1:0] //Use the rounding mode specified by imm8[1:0] + FI + + M := imm8[7:4] // The scaling factor (number of fraction bits to round to) + + CASE(rounding_direction) + 0: tmp[31:0] := round_to_nearest_even_integer(2^M * src[31:0]) + 1: tmp[31:0] := round_to_equal_or_smaller_integer(2^M * src[31:0]) + 2: tmp[31:0] := round_to_equal_or_larger_integer(2^M * src[31:0]) + 3: tmp[31:0] := round_to_nearest_smallest_magnitude_integer(2^M * src[31:0]) + ESAC + + dst[31:0] := 2^-M * tmp[31:0] // scale back down + + IF imm8[3] == 0 //check SPE + IF src[31:0] != dst[31:0] //check if precision has been lost + set_precision() //set #PE + FI + FI + RETURN dst[31:0] +} + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := RoundTo_IntegerPS(a[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + + Round packed single-precision (32-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +RoundTo_IntegerPS(src[31:0], imm8[7:0]){ + IF(imm8[2] == 1) + rounding_direction := MXCSR.RC //Use the rounding mode specified by MXCSR.RC + ELSE + rounding_direction := imm8[1:0] //Use the rounding mode specified by imm8[1:0] + FI + + M := imm8[7:4] // The scaling factor (number of fraction bits to round to) + + CASE(rounding_direction) + 0: tmp[31:0] := round_to_nearest_even_integer(2^M * src[31:0]) + 1: tmp[31:0] := round_to_equal_or_smaller_integer(2^M * src[31:0]) + 2: tmp[31:0] := round_to_equal_or_larger_integer(2^M * src[31:0]) + 3: tmp[31:0] := round_to_nearest_smallest_magnitude_integer(2^M * src[31:0]) + ESAC + + dst[31:0] := 2^-M * tmp[31:0] // scale back down + + IF imm8[3] == 0 //check SPE + IF src[31:0] != dst[31:0] //check if precision has been lost + set_precision() //set #PE + FI + FI + RETURN dst[31:0] +} + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := RoundTo_IntegerPS(a[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + Round packed single-precision (32-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +RoundTo_IntegerPS(src[31:0], imm8[7:0]){ + IF(imm8[2] == 1) + rounding_direction := MXCSR.RC //Use the rounding mode specified by MXCSR.RC + ELSE + rounding_direction := imm8[1:0] //Use the rounding mode specified by imm8[1:0] + FI + + M := imm8[7:4] // The scaling factor (number of fraction bits to round to) + + CASE(rounding_direction) + 0: tmp[31:0] := round_to_nearest_even_integer(2^M * src[31:0]) + 1: tmp[31:0] := round_to_equal_or_smaller_integer(2^M * src[31:0]) + 2: tmp[31:0] := round_to_equal_or_larger_integer(2^M * src[31:0]) + 3: tmp[31:0] := round_to_nearest_smallest_magnitude_integer(2^M * src[31:0]) + ESAC + + dst[31:0] := 2^-M * tmp[31:0] // scale back down + + IF imm8[3] == 0 //check SPE + IF src[31:0] != dst[31:0] //check if precision has been lost + set_precision() //set #PE + FI + FI + RETURN dst[31:0] +} + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := RoundTo_IntegerPS(a[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + Round packed single-precision (32-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +RoundTo_IntegerPS(src[31:0], imm8[7:0]){ + IF(imm8[2] == 1) + rounding_direction := MXCSR.RC //Use the rounding mode specified by MXCSR.RC + ELSE + rounding_direction := imm8[1:0] //Use the rounding mode specified by imm8[1:0] + FI + + M := imm8[7:4] // The scaling factor (number of fraction bits to round to) + + CASE(rounding_direction) + 0: tmp[31:0] := round_to_nearest_even_integer(2^M * src[31:0]) + 1: tmp[31:0] := round_to_equal_or_smaller_integer(2^M * src[31:0]) + 2: tmp[31:0] := round_to_equal_or_larger_integer(2^M * src[31:0]) + 3: tmp[31:0] := round_to_nearest_smallest_magnitude_integer(2^M * src[31:0]) + ESAC + + dst[31:0] := 2^-M * tmp[31:0] // scale back down + + IF imm8[3] == 0 //check SPE + IF src[31:0] != dst[31:0] //check if precision has been lost + set_precision() //set #PE + FI + FI + RETURN dst[31:0] +} + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := RoundTo_IntegerPS(a[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + Round packed single-precision (32-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst". + + +RoundTo_IntegerPS(src[31:0], imm8[7:0]){ + IF(imm8[2] == 1) + rounding_direction := MXCSR.RC //Use the rounding mode specified by MXCSR.RC + ELSE + rounding_direction := imm8[1:0] //Use the rounding mode specified by imm8[1:0] + FI + + M := imm8[7:4] // The scaling factor (number of fraction bits to round to) + + CASE(rounding_direction) + 0: tmp[31:0] := round_to_nearest_even_integer(2^M * src[31:0]) + 1: tmp[31:0] := round_to_equal_or_smaller_integer(2^M * src[31:0]) + 2: tmp[31:0] := round_to_equal_or_larger_integer(2^M * src[31:0]) + 3: tmp[31:0] := round_to_nearest_smallest_magnitude_integer(2^M * src[31:0]) + ESAC + + dst[31:0] := 2^-M * tmp[31:0] // scale back down + + IF imm8[3] == 0 //check SPE + IF src[31:0] != dst[31:0] //check if precision has been lost + set_precision() //set #PE + FI + FI + RETURN dst[31:0] +} + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := RoundTo_IntegerPS(a[i+31:i], imm8[7:0]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + Round packed single-precision (32-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst". + [round_note] + +RoundTo_IntegerPS(src[31:0], imm8[7:0]){ + IF(imm8[2] == 1) + rounding_direction := MXCSR.RC //Use the rounding mode specified by MXCSR.RC + ELSE + rounding_direction := imm8[1:0] //Use the rounding mode specified by imm8[1:0] + FI + + M := imm8[7:4] // The scaling factor (number of fraction bits to round to) + + CASE(rounding_direction) + 0: tmp[31:0] := round_to_nearest_even_integer(2^M * src[31:0]) + 1: tmp[31:0] := round_to_equal_or_smaller_integer(2^M * src[31:0]) + 2: tmp[31:0] := round_to_equal_or_larger_integer(2^M * src[31:0]) + 3: tmp[31:0] := round_to_nearest_smallest_magnitude_integer(2^M * src[31:0]) + ESAC + + dst[31:0] := 2^-M * tmp[31:0] // scale back down + + IF imm8[3] == 0 //check SPE + IF src[31:0] != dst[31:0] //check if precision has been lost + set_precision() //set #PE + FI + FI + RETURN dst[31:0] +} + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := RoundTo_IntegerPS(a[i+31:i], imm8[7:0]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + + + Round the lower double-precision (64-bit) floating-point element in "a" to the number of fraction bits specified by "imm8", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "b" to the upper element of "dst". + [round_note] + +RoundTo_IntegerPD(src[63:0], imm8[7:0]){ + IF(imm8[2] == 1) + rounding_direction := MXCSR.RC //Use the rounding mode specified by MXCSR.RC + ELSE + rounding_direction := imm8[1:0] //Use the rounding mode specified by imm8[1:0] + FI + + M := imm8[7:4] // The scaling factor (number of fraction bits to round to) + + CASE(rounding_direction) + 0: tmp[63:0] := round_to_nearest_even_integer(2^M * src[63:0]) + 1: tmp[63:0] := round_to_equal_or_smaller_integer(2^M * src[63:0]) + 2: tmp[63:0] := round_to_equal_or_larger_integer(2^M * src[63:0]) + 3: tmp[63:0] := round_to_nearest_smallest_magnitude_integer(2^M * src[63:0]) + ESAC + + dst[63:0] := 2^-M * tmp[63:0] // scale back down + + IF imm8[3] == 0 //check SPE + IF src[63:0] != dst[63:0] //check if precision has been lost + set_precision() //set #PE + FI + FI + RETURN dst[63:0] +} + +IF k[0] + dst[63:0] := RoundTo_IntegerPD(a[63:0], imm8[7:0]) +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := b[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + + Round the lower double-precision (64-bit) floating-point element in "a" to the number of fraction bits specified by "imm8", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "b" to the upper element of "dst". + +RoundTo_IntegerPD(src[63:0], imm8[7:0]){ + IF(imm8[2] == 1) + rounding_direction := MXCSR.RC //Use the rounding mode specified by MXCSR.RC + ELSE + rounding_direction := imm8[1:0] //Use the rounding mode specified by imm8[1:0] + FI + + M := imm8[7:4] // The scaling factor (number of fraction bits to round to) + + CASE(rounding_direction) + 0: tmp[63:0] := round_to_nearest_even_integer(2^M * src[63:0]) + 1: tmp[63:0] := round_to_equal_or_smaller_integer(2^M * src[63:0]) + 2: tmp[63:0] := round_to_equal_or_larger_integer(2^M * src[63:0]) + 3: tmp[63:0] := round_to_nearest_smallest_magnitude_integer(2^M * src[63:0]) + ESAC + + dst[63:0] := 2^-M * tmp[63:0] // scale back down + + IF imm8[3] == 0 //check SPE + IF src[63:0] != dst[63:0] //check if precision has been lost + set_precision() //set #PE + FI + FI + RETURN dst[63:0] +} + +IF k[0] + dst[63:0] := RoundTo_IntegerPD(a[63:0], imm8[7:0]) +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := b[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + + Round the lower double-precision (64-bit) floating-point element in "a" to the number of fraction bits specified by "imm8", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "b" to the upper element of "dst". + [round_note] + +RoundTo_IntegerPD(src[63:0], imm8[7:0]){ + IF(imm8[2] == 1) + rounding_direction := MXCSR.RC //Use the rounding mode specified by MXCSR.RC + ELSE + rounding_direction := imm8[1:0] //Use the rounding mode specified by imm8[1:0] + FI + + M := imm8[7:4] // The scaling factor (number of fraction bits to round to) + + CASE(rounding_direction) + 0: tmp[63:0] := round_to_nearest_even_integer(2^M * src[63:0]) + 1: tmp[63:0] := round_to_equal_or_smaller_integer(2^M * src[63:0]) + 2: tmp[63:0] := round_to_equal_or_larger_integer(2^M * src[63:0]) + 3: tmp[63:0] := round_to_nearest_smallest_magnitude_integer(2^M * src[63:0]) + ESAC + + dst[63:0] := 2^-M * tmp[63:0] // scale back down + + IF imm8[3] == 0 //check SPE + IF src[63:0] != dst[63:0] //check if precision has been lost + set_precision() //set #PE + FI + FI + RETURN dst[63:0] +} + +IF k[0] + dst[63:0] := RoundTo_IntegerPD(a[63:0], imm8[7:0]) +ELSE + dst[63:0] := 0 +FI +dst[127:64] := b[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + Round the lower double-precision (64-bit) floating-point element in "a" to the number of fraction bits specified by "imm8", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "b" to the upper element of "dst". + +RoundTo_IntegerPD(src[63:0], imm8[7:0]){ + IF(imm8[2] == 1) + rounding_direction := MXCSR.RC //Use the rounding mode specified by MXCSR.RC + ELSE + rounding_direction := imm8[1:0] //Use the rounding mode specified by imm8[1:0] + FI + + M := imm8[7:4] // The scaling factor (number of fraction bits to round to) + + CASE(rounding_direction) + 0: tmp[63:0] := round_to_nearest_even_integer(2^M * src[63:0]) + 1: tmp[63:0] := round_to_equal_or_smaller_integer(2^M * src[63:0]) + 2: tmp[63:0] := round_to_equal_or_larger_integer(2^M * src[63:0]) + 3: tmp[63:0] := round_to_nearest_smallest_magnitude_integer(2^M * src[63:0]) + ESAC + + dst[63:0] := 2^-M * tmp[63:0] // scale back down + + IF imm8[3] == 0 //check SPE + IF src[63:0] != dst[63:0] //check if precision has been lost + set_precision() //set #PE + FI + FI + RETURN dst[63:0] +} + +IF k[0] + dst[63:0] := RoundTo_IntegerPD(a[63:0], imm8[7:0]) +ELSE + dst[63:0] := 0 +FI +dst[127:64] := b[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + Round the lower double-precision (64-bit) floating-point element in "a" to the number of fraction bits specified by "imm8", store the result in the lower element of "dst", and copy the upper element from "b" to the upper element of "dst". + [round_note] + +RoundTo_IntegerPD(src[63:0], imm8[7:0]){ + IF(imm8[2] == 1) + rounding_direction := MXCSR.RC //Use the rounding mode specified by MXCSR.RC + ELSE + rounding_direction := imm8[1:0] //Use the rounding mode specified by imm8[1:0] + FI + + M := imm8[7:4] // The scaling factor (number of fraction bits to round to) + + CASE(rounding_direction) + 0: tmp[63:0] := round_to_nearest_even_integer(2^M * src[63:0]) + 1: tmp[63:0] := round_to_equal_or_smaller_integer(2^M * src[63:0]) + 2: tmp[63:0] := round_to_equal_or_larger_integer(2^M * src[63:0]) + 3: tmp[63:0] := round_to_nearest_smallest_magnitude_integer(2^M * src[63:0]) + ESAC + + dst[63:0] := 2^-M * tmp[63:0] // scale back down + + IF imm8[3] == 0 //check SPE + IF src[63:0] != dst[63:0] //check if precision has been lost + set_precision() //set #PE + FI + FI + RETURN dst[63:0] +} + +dst[63:0] := RoundTo_IntegerPD(a[63:0], imm8[7:0]) +dst[127:64] := b[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + Round the lower double-precision (64-bit) floating-point element in "a" to the number of fraction bits specified by "imm8", store the result in the lower element of "dst", and copy the upper element from "b" to the upper element of "dst". + +RoundTo_IntegerPD(src[63:0], imm8[7:0]){ + IF(imm8[2] == 1) + rounding_direction := MXCSR.RC //Use the rounding mode specified by MXCSR.RC + ELSE + rounding_direction := imm8[1:0] //Use the rounding mode specified by imm8[1:0] + FI + + M := imm8[7:4] // The scaling factor (number of fraction bits to round to) + + CASE(rounding_direction) + 0: tmp[63:0] := round_to_nearest_even_integer(2^M * src[63:0]) + 1: tmp[63:0] := round_to_equal_or_smaller_integer(2^M * src[63:0]) + 2: tmp[63:0] := round_to_equal_or_larger_integer(2^M * src[63:0]) + 3: tmp[63:0] := round_to_nearest_smallest_magnitude_integer(2^M * src[63:0]) + ESAC + + dst[63:0] := 2^-M * tmp[63:0] // scale back down + + IF imm8[3] == 0 //check SPE + IF src[63:0] != dst[63:0] //check if precision has been lost + set_precision() //set #PE + FI + FI + RETURN dst[63:0] +} + +dst[63:0] := RoundTo_IntegerPD(a[63:0], imm8[7:0]) +dst[127:64] := b[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + + + Round the lower single-precision (32-bit) floating-point element in "a" to the number of fraction bits specified by "imm8", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "b" to the upper elements of "dst". + [round_note] + +RoundTo_IntegerPS(src[31:0], imm8[7:0]){ + IF(imm8[2] == 1) + rounding_direction := MXCSR.RC //Use the rounding mode specified by MXCSR.RC + ELSE + rounding_direction := imm8[1:0] //Use the rounding mode specified by imm8[1:0] + FI + + M := imm8[7:4] // The scaling factor (number of fraction bits to round to) + + CASE(rounding_direction) + 0: tmp[31:0] := round_to_nearest_even_integer(2^M * src[31:0]) + 1: tmp[31:0] := round_to_equal_or_smaller_integer(2^M * src[31:0]) + 2: tmp[31:0] := round_to_equal_or_larger_integer(2^M * src[31:0]) + 3: tmp[31:0] := round_to_nearest_smallest_magnitude_integer(2^M * src[31:0]) + ESAC + + dst[31:0] := 2^-M * tmp[31:0] // scale back down + + IF imm8[3] == 0 //check SPE + IF src[31:0] != dst[31:0] //check if precision has been lost + set_precision() //set #PE + FI + FI + RETURN dst[31:0] +} + +IF k[0] + dst[31:0] := RoundTo_IntegerPS(a[31:0], imm8[7:0]) +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := b[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + + Round the lower single-precision (32-bit) floating-point element in "a" to the number of fraction bits specified by "imm8", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "b" to the upper elements of "dst". + +RoundTo_IntegerPS(src[31:0], imm8[7:0]){ + IF(imm8[2] == 1) + rounding_direction := MXCSR.RC //Use the rounding mode specified by MXCSR.RC + ELSE + rounding_direction := imm8[1:0] //Use the rounding mode specified by imm8[1:0] + FI + + M := imm8[7:4] // The scaling factor (number of fraction bits to round to) + + CASE(rounding_direction) + 0: tmp[31:0] := round_to_nearest_even_integer(2^M * src[31:0]) + 1: tmp[31:0] := round_to_equal_or_smaller_integer(2^M * src[31:0]) + 2: tmp[31:0] := round_to_equal_or_larger_integer(2^M * src[31:0]) + 3: tmp[31:0] := round_to_nearest_smallest_magnitude_integer(2^M * src[31:0]) + ESAC + + dst[31:0] := 2^-M * tmp[31:0] // scale back down + + IF imm8[3] == 0 //check SPE + IF src[31:0] != dst[31:0] //check if precision has been lost + set_precision() //set #PE + FI + FI + RETURN dst[31:0] +} + +IF k[0] + dst[31:0] := RoundTo_IntegerPS(a[31:0], imm8[7:0]) +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := b[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + + Round the lower single-precision (32-bit) floating-point element in "a" to the number of fraction bits specified by "imm8", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "b" to the upper elements of "dst". + [round_note] + +RoundTo_IntegerPS(src[31:0], imm8[7:0]){ + IF(imm8[2] == 1) + rounding_direction := MXCSR.RC //Use the rounding mode specified by MXCSR.RC + ELSE + rounding_direction := imm8[1:0] //Use the rounding mode specified by imm8[1:0] + FI + + M := imm8[7:4] // The scaling factor (number of fraction bits to round to) + + CASE(rounding_direction) + 0: tmp[31:0] := round_to_nearest_even_integer(2^M * src[31:0]) + 1: tmp[31:0] := round_to_equal_or_smaller_integer(2^M * src[31:0]) + 2: tmp[31:0] := round_to_equal_or_larger_integer(2^M * src[31:0]) + 3: tmp[31:0] := round_to_nearest_smallest_magnitude_integer(2^M * src[31:0]) + ESAC + + dst[31:0] := 2^-M * tmp[31:0] // scale back down + + IF imm8[3] == 0 //check SPE + IF src[31:0] != dst[31:0] //check if precision has been lost + set_precision() //set #PE + FI + FI + RETURN dst[31:0] +} + +IF k[0] + dst[31:0] := RoundTo_IntegerPS(a[31:0], imm8[7:0]) +ELSE + dst[31:0] := 0 +FI +dst[127:32] := b[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + Round the lower single-precision (32-bit) floating-point element in "a" to the number of fraction bits specified by "imm8", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "b" to the upper elements of "dst". + +RoundTo_IntegerPS(src[31:0], imm8[7:0]){ + IF(imm8[2] == 1) + rounding_direction := MXCSR.RC //Use the rounding mode specified by MXCSR.RC + ELSE + rounding_direction := imm8[1:0] //Use the rounding mode specified by imm8[1:0] + FI + + M := imm8[7:4] // The scaling factor (number of fraction bits to round to) + + CASE(rounding_direction) + 0: tmp[31:0] := round_to_nearest_even_integer(2^M * src[31:0]) + 1: tmp[31:0] := round_to_equal_or_smaller_integer(2^M * src[31:0]) + 2: tmp[31:0] := round_to_equal_or_larger_integer(2^M * src[31:0]) + 3: tmp[31:0] := round_to_nearest_smallest_magnitude_integer(2^M * src[31:0]) + ESAC + + dst[31:0] := 2^-M * tmp[31:0] // scale back down + + IF imm8[3] == 0 //check SPE + IF src[31:0] != dst[31:0] //check if precision has been lost + set_precision() //set #PE + FI + FI + RETURN dst[31:0] +} + +IF k[0] + dst[31:0] := RoundTo_IntegerPS(a[31:0], imm8[7:0]) +ELSE + dst[31:0] := 0 +FI +dst[127:32] := b[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + Round the lower single-precision (32-bit) floating-point element in "a" to the number of fraction bits specified by "imm8", store the result in the lower element of "dst", and copy the upper 3 packed elements from "b" to the upper elements of "dst". + [round_note] + +RoundTo_IntegerPS(src[31:0], imm8[7:0]){ + IF(imm8[2] == 1) + rounding_direction := MXCSR.RC //Use the rounding mode specified by MXCSR.RC + ELSE + rounding_direction := imm8[1:0] //Use the rounding mode specified by imm8[1:0] + FI + + M := imm8[7:4] // The scaling factor (number of fraction bits to round to) + + CASE(rounding_direction) + 0: tmp[31:0] := round_to_nearest_even_integer(2^M * src[31:0]) + 1: tmp[31:0] := round_to_equal_or_smaller_integer(2^M * src[31:0]) + 2: tmp[31:0] := round_to_equal_or_larger_integer(2^M * src[31:0]) + 3: tmp[31:0] := round_to_nearest_smallest_magnitude_integer(2^M * src[31:0]) + ESAC + + dst[31:0] := 2^-M * tmp[31:0] // scale back down + + IF imm8[3] == 0 //check SPE + IF src[31:0] != dst[31:0] //check if precision has been lost + set_precision() //set #PE + FI + FI + RETURN dst[31:0] +} + +dst[31:0] := RoundTo_IntegerPS(a[31:0], imm8[7:0]) +dst[127:32] := b[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + Round the lower single-precision (32-bit) floating-point element in "a" to the number of fraction bits specified by "imm8", store the result in the lower element of "dst", and copy the upper 3 packed elements from "b" to the upper elements of "dst". + +RoundTo_IntegerPS(src[31:0], imm8[7:0]){ + IF(imm8[2] == 1) + rounding_direction := MXCSR.RC //Use the rounding mode specified by MXCSR.RC + ELSE + rounding_direction := imm8[1:0] //Use the rounding mode specified by imm8[1:0] + FI + + M := imm8[7:4] // The scaling factor (number of fraction bits to round to) + + CASE(rounding_direction) + 0: tmp[31:0] := round_to_nearest_even_integer(2^M * src[31:0]) + 1: tmp[31:0] := round_to_equal_or_smaller_integer(2^M * src[31:0]) + 2: tmp[31:0] := round_to_equal_or_larger_integer(2^M * src[31:0]) + 3: tmp[31:0] := round_to_nearest_smallest_magnitude_integer(2^M * src[31:0]) + ESAC + + dst[31:0] := 2^-M * tmp[31:0] // scale back down + + IF imm8[3] == 0 //check SPE + IF src[31:0] != dst[31:0] //check if precision has been lost + set_precision() //set #PE + FI + FI + RETURN dst[31:0] +} + +dst[31:0] := RoundTo_IntegerPS(a[31:0], imm8[7:0]) +dst[127:32] := b[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + Compute the approximate reciprocal square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := APPROXIMATE(1.0 / SQRT(a[i+63:i])) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + Compute the approximate reciprocal square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := APPROXIMATE(1.0 / SQRT(a[i+63:i])) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + Compute the approximate reciprocal square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := APPROXIMATE(1.0 / SQRT(a[i+63:i])) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + Compute the approximate reciprocal square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := APPROXIMATE(1.0 / SQRT(a[i+31:i])) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + Compute the approximate reciprocal square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := APPROXIMATE(1.0 / SQRT(a[i+31:i])) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + Compute the approximate reciprocal square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := APPROXIMATE(1.0 / SQRT(a[i+31:i])) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + + Compute the approximate reciprocal square root of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". The maximum relative error for this approximation is less than 2^-14. + +IF k[0] + dst[63:0] := APPROXIMATE(1.0 / SQRT(b[63:0])) +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + Compute the approximate reciprocal square root of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". The maximum relative error for this approximation is less than 2^-14. + +IF k[0] + dst[63:0] := APPROXIMATE(1.0 / SQRT(b[63:0])) +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + Compute the approximate reciprocal square root of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". The maximum relative error for this approximation is less than 2^-14. + +dst[63:0] := APPROXIMATE(1.0 / SQRT(b[63:0])) +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + + Compute the approximate reciprocal square root of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 2^-14. + +IF k[0] + dst[31:0] := APPROXIMATE(1.0 / SQRT(b[31:0])) +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + Compute the approximate reciprocal square root of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 2^-14. + +IF k[0] + dst[31:0] := APPROXIMATE(1.0 / SQRT(b[31:0])) +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + Compute the approximate reciprocal square root of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 2^-14. + +dst[31:0] := APPROXIMATE(1.0 / SQRT(b[31:0])) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +SCALE(src1, src2){ + IF (src2 == NaN) + IF (src2 == SNaN) + RETURN QNAN(src2) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) + RETURN QNAN(src1) + FI + IF (src2 != INF) + RETURN QNAN(src1) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (src2 is denormal AND MXCSR.DAZ) + tmp_src2 := 0 + FI + IF (src1 is denormal AND MXCSR.DAZ) + tmp_src1 := 0 + FI + FI + dst[63:0] := tmp_src1[63:0] * POW(2, FLOOR(tmp_src2[63:0])) + RETURN dst[63:0] +} + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := SCALE(a[i+63:0], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + + Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +SCALE(src1, src2){ + IF (src2 == NaN) + IF (src2 == SNaN) + RETURN QNAN(src2) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) + RETURN QNAN(src1) + FI + IF (src2 != INF) + RETURN QNAN(src1) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (src2 is denormal AND MXCSR.DAZ) + tmp_src2 := 0 + FI + IF (src1 is denormal AND MXCSR.DAZ) + tmp_src1 := 0 + FI + FI + dst[63:0] := tmp_src1[63:0] * POW(2, FLOOR(tmp_src2[63:0])) + RETURN dst[63:0] +} + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := SCALE(a[i+63:0], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +SCALE(src1, src2){ + IF (src2 == NaN) + IF (src2 == SNaN) + RETURN QNAN(src2) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) + RETURN QNAN(src1) + FI + IF (src2 != INF) + RETURN QNAN(src1) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (src2 is denormal AND MXCSR.DAZ) + tmp_src2 := 0 + FI + IF (src1 is denormal AND MXCSR.DAZ) + tmp_src1 := 0 + FI + FI + dst[63:0] := tmp_src1[63:0] * POW(2, FLOOR(tmp_src2[63:0])) + RETURN dst[63:0] +} + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := SCALE(a[i+63:0], b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +SCALE(src1, src2){ + IF (src2 == NaN) + IF (src2 == SNaN) + RETURN QNAN(src2) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) + RETURN QNAN(src1) + FI + IF (src2 != INF) + RETURN QNAN(src1) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (src2 is denormal AND MXCSR.DAZ) + tmp_src2 := 0 + FI + IF (src1 is denormal AND MXCSR.DAZ) + tmp_src1 := 0 + FI + FI + dst[63:0] := tmp_src1[63:0] * POW(2, FLOOR(tmp_src2[63:0])) + RETURN dst[63:0] +} + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := SCALE(a[i+63:0], b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", and store the results in "dst". + +SCALE(src1, src2){ + IF (src2 == NaN) + IF (src2 == SNaN) + RETURN QNAN(src2) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) + RETURN QNAN(src1) + FI + IF (src2 != INF) + RETURN QNAN(src1) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (src2 is denormal AND MXCSR.DAZ) + tmp_src2 := 0 + FI + IF (src1 is denormal AND MXCSR.DAZ) + tmp_src1 := 0 + FI + FI + dst[63:0] := tmp_src1[63:0] * POW(2, FLOOR(tmp_src2[63:0])) + RETURN dst[63:0] +} + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := SCALE(a[i+63:0], b[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", and store the results in "dst". + [round_note] + +SCALE(src1, src2){ + IF (src2 == NaN) + IF (src2 == SNaN) + RETURN QNAN(src2) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) + RETURN QNAN(src1) + FI + IF (src2 != INF) + RETURN QNAN(src1) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (src2 is denormal AND MXCSR.DAZ) + tmp_src2 := 0 + FI + IF (src1 is denormal AND MXCSR.DAZ) + tmp_src1 := 0 + FI + FI + dst[63:0] := tmp_src1[63:0] * POW(2, FLOOR(tmp_src2[63:0])) + RETURN dst[63:0] +} + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := SCALE(a[i+63:0], b[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +SCALE(src1, src2){ + IF (src2 == NaN) + IF (src2 == SNaN) + RETURN QNAN(src2) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) + RETURN QNAN(src1) + FI + IF (src2 != INF) + RETURN QNAN(src1) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (src2 is denormal AND MXCSR.DAZ) + tmp_src2 := 0 + FI + IF (src1 is denormal AND MXCSR.DAZ) + tmp_src1 := 0 + FI + FI + dst[31:0] := tmp_src1[31:0] * POW(2, FLOOR(tmp_src2[31:0])) + RETURN dst[31:0] +} + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := SCALE(a[i+31:0], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + + Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +SCALE(src1, src2){ + IF (src2 == NaN) + IF (src2 == SNaN) + RETURN QNAN(src2) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) + RETURN QNAN(src1) + FI + IF (src2 != INF) + RETURN QNAN(src1) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (src2 is denormal AND MXCSR.DAZ) + tmp_src2 := 0 + FI + IF (src1 is denormal AND MXCSR.DAZ) + tmp_src1 := 0 + FI + FI + dst[31:0] := tmp_src1[31:0] * POW(2, FLOOR(tmp_src2[31:0])) + RETURN dst[31:0] +} + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := SCALE(a[i+31:0], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +SCALE(src1, src2){ + IF (src2 == NaN) + IF (src2 == SNaN) + RETURN QNAN(src2) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) + RETURN QNAN(src1) + FI + IF (src2 != INF) + RETURN QNAN(src1) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (src2 is denormal AND MXCSR.DAZ) + tmp_src2 := 0 + FI + IF (src1 is denormal AND MXCSR.DAZ) + tmp_src1 := 0 + FI + FI + dst[31:0] := tmp_src1[31:0] * POW(2, FLOOR(tmp_src2[31:0])) + RETURN dst[31:0] +} + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := SCALE(a[i+31:0], b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +SCALE(src1, src2){ + IF (src2 == NaN) + IF (src2 == SNaN) + RETURN QNAN(src2) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) + RETURN QNAN(src1) + FI + IF (src2 != INF) + RETURN QNAN(src1) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (src2 is denormal AND MXCSR.DAZ) + tmp_src2 := 0 + FI + IF (src1 is denormal AND MXCSR.DAZ) + tmp_src1 := 0 + FI + FI + dst[31:0] := tmp_src1[31:0] * POW(2, FLOOR(tmp_src2[31:0])) + RETURN dst[31:0] +} + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := SCALE(a[i+31:0], b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", and store the results in "dst". + +SCALE(src1, src2){ + IF (src2 == NaN) + IF (src2 == SNaN) + RETURN QNAN(src2) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) + RETURN QNAN(src1) + FI + IF (src2 != INF) + RETURN QNAN(src1) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (src2 is denormal AND MXCSR.DAZ) + tmp_src2 := 0 + FI + IF (src1 is denormal AND MXCSR.DAZ) + tmp_src1 := 0 + FI + FI + dst[31:0] := tmp_src1[31:0] * POW(2, FLOOR(tmp_src2[31:0])) + RETURN dst[31:0] +} + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := SCALE(a[i+31:0], b[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", and store the results in "dst". + [round_note] + +SCALE(src1, src2){ + IF (src2 == NaN) + IF (src2 == SNaN) + RETURN QNAN(src2) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) + RETURN QNAN(src1) + FI + IF (src2 != INF) + RETURN QNAN(src1) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (src2 is denormal AND MXCSR.DAZ) + tmp_src2 := 0 + FI + IF (src1 is denormal AND MXCSR.DAZ) + tmp_src1 := 0 + FI + FI + dst[31:0] := tmp_src1[31:0] * POW(2, FLOOR(tmp_src2[31:0])) + RETURN dst[31:0] +} + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := SCALE(a[i+31:0], b[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + + Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "b" to the upper element of "dst". + [round_note] + +SCALE(src1, src2){ + IF (src2 == NaN) + IF (src2 == SNaN) + RETURN QNAN(src2) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) + RETURN QNAN(src1) + FI + IF (src2 != INF) + RETURN QNAN(src1) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (src2 is denormal AND MXCSR.DAZ) + tmp_src2 := 0 + FI + IF (src1 is denormal AND MXCSR.DAZ) + tmp_src1 := 0 + FI + FI + dst[63:0] := tmp_src1[63:0] * POW(2, FLOOR(tmp_src2[63:0])) + RETURN dst[63:0] +} + +IF k[0] + dst[63:0] := SCALE(a[63:0], b[63:0]) +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := b[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "b" to the upper element of "dst". + +SCALE(src1, src2){ + IF (src2 == NaN) + IF (src2 == SNaN) + RETURN QNAN(src2) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) + RETURN QNAN(src1) + FI + IF (src2 != INF) + RETURN QNAN(src1) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (src2 is denormal AND MXCSR.DAZ) + tmp_src2 := 0 + FI + IF (src1 is denormal AND MXCSR.DAZ) + tmp_src1 := 0 + FI + FI + dst[63:0] := tmp_src1[63:0] * POW(2, FLOOR(tmp_src2[63:0])) + RETURN dst[63:0] +} + +IF k[0] + dst[63:0] := SCALE(a[63:0], b[63:0]) +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := b[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "b" to the upper element of "dst". + [round_note] + +SCALE(src1, src2){ + IF (src2 == NaN) + IF (src2 == SNaN) + RETURN QNAN(src2) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) + RETURN QNAN(src1) + FI + IF (src2 != INF) + RETURN QNAN(src1) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (src2 is denormal AND MXCSR.DAZ) + tmp_src2 := 0 + FI + IF (src1 is denormal AND MXCSR.DAZ) + tmp_src1 := 0 + FI + FI + dst[63:0] := tmp_src1[63:0] * POW(2, FLOOR(tmp_src2[63:0])) + RETURN dst[63:0] +} + +IF k[0] + dst[63:0] := SCALE(a[63:0], b[63:0]) +ELSE + dst[63:0] := 0 +FI +dst[127:64] := b[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "b" to the upper element of "dst". + +SCALE(src1, src2){ + IF (src2 == NaN) + IF (src2 == SNaN) + RETURN QNAN(src2) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) + RETURN QNAN(src1) + FI + IF (src2 != INF) + RETURN QNAN(src1) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (src2 is denormal AND MXCSR.DAZ) + tmp_src2 := 0 + FI + IF (src1 is denormal AND MXCSR.DAZ) + tmp_src1 := 0 + FI + FI + dst[63:0] := tmp_src1[63:0] * POW(2, FLOOR(tmp_src2[63:0])) + RETURN dst[63:0] +} + +IF k[0] + dst[63:0] := SCALE(a[63:0], b[63:0]) +ELSE + dst[63:0] := 0 +FI +dst[127:64] := b[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", store the result in the lower element of "dst", and copy the upper element from "b" to the upper element of "dst". + [round_note] + +SCALE(src1, src2){ + IF (src2 == NaN) + IF (src2 == SNaN) + RETURN QNAN(src2) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) + RETURN QNAN(src1) + FI + IF (src2 != INF) + RETURN QNAN(src1) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (src2 is denormal AND MXCSR.DAZ) + tmp_src2 := 0 + FI + IF (src1 is denormal AND MXCSR.DAZ) + tmp_src1 := 0 + FI + FI + dst[63:0] := tmp_src1[63:0] * POW(2, FLOOR(tmp_src2[63:0])) + RETURN dst[63:0] +} + +dst[63:0] := SCALE(a[63:0], b[63:0]) +dst[127:64] := b[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", store the result in the lower element of "dst", and copy the upper element from "b" to the upper element of "dst". + +SCALE(src1, src2){ + IF (src2 == NaN) + IF (src2 == SNaN) + RETURN QNAN(src2) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) + RETURN QNAN(src1) + FI + IF (src2 != INF) + RETURN QNAN(src1) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (src2 is denormal AND MXCSR.DAZ) + tmp_src2 := 0 + FI + IF (src1 is denormal AND MXCSR.DAZ) + tmp_src1 := 0 + FI + FI + dst[63:0] := tmp_src1[63:0] * POW(2, FLOOR(tmp_src2[63:0])) + RETURN dst[63:0] +} + +dst[63:0] := SCALE(a[63:0], b[63:0]) +dst[127:64] := b[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + + Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "b" to the upper elements of "dst". + [round_note] + +SCALE(src1, src2){ + IF (src2 == NaN) + IF (src2 == SNaN) + RETURN QNAN(src2) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) + RETURN QNAN(src1) + FI + IF (src2 != INF) + RETURN QNAN(src1) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (src2 is denormal AND MXCSR.DAZ) + tmp_src2 := 0 + FI + IF (src1 is denormal AND MXCSR.DAZ) + tmp_src1 := 0 + FI + FI + dst[31:0] := tmp_src1[31:0] * POW(2, FLOOR(tmp_src2[31:0])) + RETURN dst[63:0] +} + +IF k[0] + dst[31:0] := SCALE(a[31:0], b[31:0]) +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := b[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "b" to the upper elements of "dst". + +SCALE(src1, src2){ + IF (src2 == NaN) + IF (src2 == SNaN) + RETURN QNAN(src2) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) + RETURN QNAN(src1) + FI + IF (src2 != INF) + RETURN QNAN(src1) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (src2 is denormal AND MXCSR.DAZ) + tmp_src2 := 0 + FI + IF (src1 is denormal AND MXCSR.DAZ) + tmp_src1 := 0 + FI + FI + dst[31:0] := tmp_src1[31:0] * POW(2, FLOOR(tmp_src2[31:0])) + RETURN dst[63:0] +} + +IF k[0] + dst[31:0] := SCALE(a[31:0], b[31:0]) +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := b[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + + Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "b" to the upper elements of "dst". + [round_note] + +SCALE(src1, src2){ + IF (src2 == NaN) + IF (src2 == SNaN) + RETURN QNAN(src2) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) + RETURN QNAN(src1) + FI + IF (src2 != INF) + RETURN QNAN(src1) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (src2 is denormal AND MXCSR.DAZ) + tmp_src2 := 0 + FI + IF (src1 is denormal AND MXCSR.DAZ) + tmp_src1 := 0 + FI + FI + dst[31:0] := tmp_src1[31:0] * POW(2, FLOOR(tmp_src2[31:0])) + RETURN dst[63:0] +} + +IF k[0] + dst[31:0] := SCALE(a[31:0], b[31:0]) +ELSE + dst[31:0] := 0 +FI +dst[127:32] := b[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "b" to the upper elements of "dst". + +SCALE(src1, src2){ + IF (src2 == NaN) + IF (src2 == SNaN) + RETURN QNAN(src2) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) + RETURN QNAN(src1) + FI + IF (src2 != INF) + RETURN QNAN(src1) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (src2 is denormal AND MXCSR.DAZ) + tmp_src2 := 0 + FI + IF (src1 is denormal AND MXCSR.DAZ) + tmp_src1 := 0 + FI + FI + dst[31:0] := tmp_src1[31:0] * POW(2, FLOOR(tmp_src2[31:0])) + RETURN dst[63:0] +} + +IF k[0] + dst[31:0] := SCALE(a[31:0], b[31:0]) +ELSE + dst[31:0] := 0 +FI +dst[127:32] := b[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + + Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", store the result in the lower element of "dst", and copy the upper 3 packed elements from "b" to the upper elements of "dst". + [round_note] + +SCALE(src1, src2){ + IF (src2 == NaN) + IF (src2 == SNaN) + RETURN QNAN(src2) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) + RETURN QNAN(src1) + FI + IF (src2 != INF) + RETURN QNAN(src1) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (src2 is denormal AND MXCSR.DAZ) + tmp_src2 := 0 + FI + IF (src1 is denormal AND MXCSR.DAZ) + tmp_src1 := 0 + FI + FI + dst[31:0] := tmp_src1[31:0] * POW(2, FLOOR(tmp_src2[31:0])) + RETURN dst[63:0] +} + +dst[31:0] := SCALE(a[31:0], b[31:0]) +dst[127:32] := b[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Miscellaneous + + + Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", store the result in the lower element of "dst", and copy the upper 3 packed elements from "b" to the upper elements of "dst". + +SCALE(src1, src2){ + IF (src2 == NaN) + IF (src2 == SNaN) + RETURN QNAN(src2) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) + RETURN QNAN(src1) + FI + IF (src2 != INF) + RETURN QNAN(src1) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (src2 is denormal AND MXCSR.DAZ) + tmp_src2 := 0 + FI + IF (src1 is denormal AND MXCSR.DAZ) + tmp_src1 := 0 + FI + FI + dst[31:0] := tmp_src1[31:0] * POW(2, FLOOR(tmp_src2[31:0])) + RETURN dst[63:0] +} + +dst[31:0] := SCALE(a[31:0], b[31:0]) +dst[127:32] := b[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Store + + + + + Scatter double-precision (64-bit) floating-point elements from "a" into memory using 32-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 7 + i := j*64 + l := j*32 + MEM[base_addr + SignExtend(vindex[l+31:l])*scale] := a[i+63:i] +ENDFOR + + +
immintrin.h
+
+ + Floating Point + AVX512F + Store + + + + + + Scatter double-precision (64-bit) floating-point elements from "a" into memory using 32-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + MEM[base_addr + SignExtend(vindex[l+31:l])*scale] := a[i+63:i] + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Store + + + + + Scatter single-precision (32-bit) floating-point elements from "a" into memory using 32-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 15 + i := j*32 + MEM[base_addr + SignExtend(vindex[i+31:i])*scale] := a[i+31:i] +ENDFOR + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Store + + + + + + Scatter single-precision (32-bit) floating-point elements from "a" into memory using 32-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 15 + i := j*32 + IF k[j] + MEM[base_addr + SignExtend(vindex[i+31:i])*scale] := a[i+31:i] + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Store + + + + + Scatter double-precision (64-bit) floating-point elements from "a" into memory using 64-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 7 + i := j*64 + MEM[base_addr + SignExtend(vindex[i+63:i])*scale] := a[i+63:i] +ENDFOR + + +
immintrin.h
+
+ + Floating Point + AVX512F + Store + + + + + + Scatter double-precision (64-bit) floating-point elements from "a" into memory using 64-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 7 + i := j*64 + IF k[j] + MEM[base_addr + SignExtend(vindex[i+63:i])*scale] := a[i+63:i] + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Store + + + + + Scatter single-precision (32-bit) floating-point elements from "a" into memory using 64-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 7 + i := j*32 + l := j*64 + MEM[base_addr + SignExtend(vindex[l+63:l])*scale] := a[i+31:i] +ENDFOR + + +
immintrin.h
+
+ + Floating Point + AVX512F + Store + + + + + + Scatter single-precision (32-bit) floating-point elements from "a" into memory using 64-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 7 + i := j*32 + l := j*64 + IF k[j] + MEM[base_addr + SignExtend(vindex[l+63:l])*scale] := a[i+31:i] + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + + + Shuffle 128-bits (composed of 4 single-precision (32-bit) floating-point elements) selected by "imm8" from "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[127:0] := src[127:0] + 1: tmp[127:0] := src[255:128] + 2: tmp[127:0] := src[383:256] + 3: tmp[127:0] := src[511:384] + ESAC + RETURN tmp[127:0] +} + +tmp_dst[127:0] := SELECT4(a[511:0], imm8[1:0]) +tmp_dst[255:128] := SELECT4(a[511:0], imm8[3:2]) +tmp_dst[383:256] := SELECT4(b[511:0], imm8[5:4]) +tmp_dst[511:384] := SELECT4(b[511:0], imm8[7:6]) + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + + Shuffle 128-bits (composed of 4 single-precision (32-bit) floating-point elements) selected by "imm8" from "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[127:0] := src[127:0] + 1: tmp[127:0] := src[255:128] + 2: tmp[127:0] := src[383:256] + 3: tmp[127:0] := src[511:384] + ESAC + RETURN tmp[127:0] +} + +tmp_dst[127:0] := SELECT4(a[511:0], imm8[1:0]) +tmp_dst[255:128] := SELECT4(a[511:0], imm8[3:2]) +tmp_dst[383:256] := SELECT4(b[511:0], imm8[5:4]) +tmp_dst[511:384] := SELECT4(b[511:0], imm8[7:6]) + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + Shuffle 128-bits (composed of 4 single-precision (32-bit) floating-point elements) selected by "imm8" from "a" and "b", and store the results in "dst". + + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[127:0] := src[127:0] + 1: tmp[127:0] := src[255:128] + 2: tmp[127:0] := src[383:256] + 3: tmp[127:0] := src[511:384] + ESAC + RETURN tmp[127:0] +} + +dst[127:0] := SELECT4(a[511:0], imm8[1:0]) +dst[255:128] := SELECT4(a[511:0], imm8[3:2]) +dst[383:256] := SELECT4(b[511:0], imm8[5:4]) +dst[511:384] := SELECT4(b[511:0], imm8[7:6]) +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + + + Shuffle 128-bits (composed of 2 double-precision (64-bit) floating-point elements) selected by "imm8" from "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[127:0] := src[127:0] + 1: tmp[127:0] := src[255:128] + 2: tmp[127:0] := src[383:256] + 3: tmp[127:0] := src[511:384] + ESAC + RETURN tmp[127:0] +} + +tmp_dst[127:0] := SELECT4(a[511:0], imm8[1:0]) +tmp_dst[255:128] := SELECT4(a[511:0], imm8[3:2]) +tmp_dst[383:256] := SELECT4(b[511:0], imm8[5:4]) +tmp_dst[511:384] := SELECT4(b[511:0], imm8[7:6]) + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + + Shuffle 128-bits (composed of 2 double-precision (64-bit) floating-point elements) selected by "imm8" from "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[127:0] := src[127:0] + 1: tmp[127:0] := src[255:128] + 2: tmp[127:0] := src[383:256] + 3: tmp[127:0] := src[511:384] + ESAC + RETURN tmp[127:0] +} + +tmp_dst[127:0] := SELECT4(a[511:0], imm8[1:0]) +tmp_dst[255:128] := SELECT4(a[511:0], imm8[3:2]) +tmp_dst[383:256] := SELECT4(b[511:0], imm8[5:4]) +tmp_dst[511:384] := SELECT4(b[511:0], imm8[7:6]) + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + Shuffle 128-bits (composed of 2 double-precision (64-bit) floating-point elements) selected by "imm8" from "a" and "b", and store the results in "dst". + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[127:0] := src[127:0] + 1: tmp[127:0] := src[255:128] + 2: tmp[127:0] := src[383:256] + 3: tmp[127:0] := src[511:384] + ESAC + RETURN tmp[127:0] +} + +dst[127:0] := SELECT4(a[511:0], imm8[1:0]) +dst[255:128] := SELECT4(a[511:0], imm8[3:2]) +dst[383:256] := SELECT4(b[511:0], imm8[5:4]) +dst[511:384] := SELECT4(b[511:0], imm8[7:6]) +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + + + + Shuffle 128-bits (composed of 4 32-bit integers) selected by "imm8" from "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[127:0] := src[127:0] + 1: tmp[127:0] := src[255:128] + 2: tmp[127:0] := src[383:256] + 3: tmp[127:0] := src[511:384] + ESAC + RETURN tmp[127:0] +} + +tmp_dst[127:0] := SELECT4(a[511:0], imm8[1:0]) +tmp_dst[255:128] := SELECT4(a[511:0], imm8[3:2]) +tmp_dst[383:256] := SELECT4(b[511:0], imm8[5:4]) +tmp_dst[511:384] := SELECT4(b[511:0], imm8[7:6]) + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + + + Shuffle 128-bits (composed of 4 32-bit integers) selected by "imm8" from "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[127:0] := src[127:0] + 1: tmp[127:0] := src[255:128] + 2: tmp[127:0] := src[383:256] + 3: tmp[127:0] := src[511:384] + ESAC + RETURN tmp[127:0] +} + +tmp_dst[127:0] := SELECT4(a[511:0], imm8[1:0]) +tmp_dst[255:128] := SELECT4(a[511:0], imm8[3:2]) +tmp_dst[383:256] := SELECT4(b[511:0], imm8[5:4]) +tmp_dst[511:384] := SELECT4(b[511:0], imm8[7:6]) + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + + Shuffle 128-bits (composed of 4 32-bit integers) selected by "imm8" from "a" and "b", and store the results in "dst". + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[127:0] := src[127:0] + 1: tmp[127:0] := src[255:128] + 2: tmp[127:0] := src[383:256] + 3: tmp[127:0] := src[511:384] + ESAC + RETURN tmp[127:0] +} + +dst[127:0] := SELECT4(a[511:0], imm8[1:0]) +dst[255:128] := SELECT4(a[511:0], imm8[3:2]) +dst[383:256] := SELECT4(b[511:0], imm8[5:4]) +dst[511:384] := SELECT4(b[511:0], imm8[7:6]) +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + + + + Shuffle 128-bits (composed of 2 64-bit integers) selected by "imm8" from "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[127:0] := src[127:0] + 1: tmp[127:0] := src[255:128] + 2: tmp[127:0] := src[383:256] + 3: tmp[127:0] := src[511:384] + ESAC + RETURN tmp[127:0] +} + +tmp_dst[127:0] := SELECT4(a[511:0], imm8[1:0]) +tmp_dst[255:128] := SELECT4(a[511:0], imm8[3:2]) +tmp_dst[383:256] := SELECT4(b[511:0], imm8[5:4]) +tmp_dst[511:384] := SELECT4(b[511:0], imm8[7:6]) + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + + + Shuffle 128-bits (composed of 2 64-bit integers) selected by "imm8" from "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[127:0] := src[127:0] + 1: tmp[127:0] := src[255:128] + 2: tmp[127:0] := src[383:256] + 3: tmp[127:0] := src[511:384] + ESAC + RETURN tmp[127:0] +} + +tmp_dst[127:0] := SELECT4(a[511:0], imm8[1:0]) +tmp_dst[255:128] := SELECT4(a[511:0], imm8[3:2]) +tmp_dst[383:256] := SELECT4(b[511:0], imm8[5:4]) +tmp_dst[511:384] := SELECT4(b[511:0], imm8[7:6]) + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Swizzle + + + + Shuffle 128-bits (composed of 2 64-bit integers) selected by "imm8" from "a" and "b", and store the results in "dst". + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[127:0] := src[127:0] + 1: tmp[127:0] := src[255:128] + 2: tmp[127:0] := src[383:256] + 3: tmp[127:0] := src[511:384] + ESAC + RETURN tmp[127:0] +} + +dst[127:0] := SELECT4(a[511:0], imm8[1:0]) +dst[255:128] := SELECT4(a[511:0], imm8[3:2]) +dst[383:256] := SELECT4(b[511:0], imm8[5:4]) +dst[511:384] := SELECT4(b[511:0], imm8[7:6]) +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + + + Shuffle double-precision (64-bit) floating-point elements within 128-bit lanes using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp_dst[63:0] := (imm8[0] == 0) ? a[63:0] : a[127:64] +tmp_dst[127:64] := (imm8[1] == 0) ? b[63:0] : b[127:64] +tmp_dst[191:128] := (imm8[2] == 0) ? a[191:128] : a[255:192] +tmp_dst[255:192] := (imm8[3] == 0) ? b[191:128] : b[255:192] +tmp_dst[319:256] := (imm8[4] == 0) ? a[319:256] : a[383:320] +tmp_dst[383:320] := (imm8[5] == 0) ? b[319:256] : b[383:320] +tmp_dst[447:384] := (imm8[6] == 0) ? a[447:384] : a[511:448] +tmp_dst[511:448] := (imm8[7] == 0) ? b[447:384] : b[511:448] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + + Shuffle double-precision (64-bit) floating-point elements within 128-bit lanes using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp_dst[63:0] := (imm8[0] == 0) ? a[63:0] : a[127:64] +tmp_dst[127:64] := (imm8[1] == 0) ? b[63:0] : b[127:64] +tmp_dst[191:128] := (imm8[2] == 0) ? a[191:128] : a[255:192] +tmp_dst[255:192] := (imm8[3] == 0) ? b[191:128] : b[255:192] +tmp_dst[319:256] := (imm8[4] == 0) ? a[319:256] : a[383:320] +tmp_dst[383:320] := (imm8[5] == 0) ? b[319:256] : b[383:320] +tmp_dst[447:384] := (imm8[6] == 0) ? a[447:384] : a[511:448] +tmp_dst[511:448] := (imm8[7] == 0) ? b[447:384] : b[511:448] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + Shuffle double-precision (64-bit) floating-point elements within 128-bit lanes using the control in "imm8", and store the results in "dst". + +dst[63:0] := (imm8[0] == 0) ? a[63:0] : a[127:64] +dst[127:64] := (imm8[1] == 0) ? b[63:0] : b[127:64] +dst[191:128] := (imm8[2] == 0) ? a[191:128] : a[255:192] +dst[255:192] := (imm8[3] == 0) ? b[191:128] : b[255:192] +dst[319:256] := (imm8[4] == 0) ? a[319:256] : a[383:320] +dst[383:320] := (imm8[5] == 0) ? b[319:256] : b[383:320] +dst[447:384] := (imm8[6] == 0) ? a[447:384] : a[511:448] +dst[511:448] := (imm8[7] == 0) ? b[447:384] : b[511:448] +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} + +tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +tmp_dst[95:64] := SELECT4(b[127:0], imm8[5:4]) +tmp_dst[127:96] := SELECT4(b[127:0], imm8[7:6]) +tmp_dst[159:128] := SELECT4(a[255:128], imm8[1:0]) +tmp_dst[191:160] := SELECT4(a[255:128], imm8[3:2]) +tmp_dst[223:192] := SELECT4(b[255:128], imm8[5:4]) +tmp_dst[255:224] := SELECT4(b[255:128], imm8[7:6]) +tmp_dst[287:256] := SELECT4(a[383:256], imm8[1:0]) +tmp_dst[319:288] := SELECT4(a[383:256], imm8[3:2]) +tmp_dst[351:320] := SELECT4(b[383:256], imm8[5:4]) +tmp_dst[383:352] := SELECT4(b[383:256], imm8[7:6]) +tmp_dst[415:384] := SELECT4(a[511:384], imm8[1:0]) +tmp_dst[447:416] := SELECT4(a[511:384], imm8[3:2]) +tmp_dst[479:448] := SELECT4(b[511:384], imm8[5:4]) +tmp_dst[511:480] := SELECT4(b[511:384], imm8[7:6]) + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} + +tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +tmp_dst[95:64] := SELECT4(b[127:0], imm8[5:4]) +tmp_dst[127:96] := SELECT4(b[127:0], imm8[7:6]) +tmp_dst[159:128] := SELECT4(a[255:128], imm8[1:0]) +tmp_dst[191:160] := SELECT4(a[255:128], imm8[3:2]) +tmp_dst[223:192] := SELECT4(b[255:128], imm8[5:4]) +tmp_dst[255:224] := SELECT4(b[255:128], imm8[7:6]) +tmp_dst[287:256] := SELECT4(a[383:256], imm8[1:0]) +tmp_dst[319:288] := SELECT4(a[383:256], imm8[3:2]) +tmp_dst[351:320] := SELECT4(b[383:256], imm8[5:4]) +tmp_dst[383:352] := SELECT4(b[383:256], imm8[7:6]) +tmp_dst[415:384] := SELECT4(a[511:384], imm8[1:0]) +tmp_dst[447:416] := SELECT4(a[511:384], imm8[3:2]) +tmp_dst[479:448] := SELECT4(b[511:384], imm8[5:4]) +tmp_dst[511:480] := SELECT4(b[511:384], imm8[7:6]) + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst". + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} + +dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +dst[95:64] := SELECT4(b[127:0], imm8[5:4]) +dst[127:96] := SELECT4(b[127:0], imm8[7:6]) +dst[159:128] := SELECT4(a[255:128], imm8[1:0]) +dst[191:160] := SELECT4(a[255:128], imm8[3:2]) +dst[223:192] := SELECT4(b[255:128], imm8[5:4]) +dst[255:224] := SELECT4(b[255:128], imm8[7:6]) +dst[287:256] := SELECT4(a[383:256], imm8[1:0]) +dst[319:288] := SELECT4(a[383:256], imm8[3:2]) +dst[351:320] := SELECT4(b[383:256], imm8[5:4]) +dst[383:352] := SELECT4(b[383:256], imm8[7:6]) +dst[415:384] := SELECT4(a[511:384], imm8[1:0]) +dst[447:416] := SELECT4(a[511:384], imm8[3:2]) +dst[479:448] := SELECT4(b[511:384], imm8[5:4]) +dst[511:480] := SELECT4(b[511:384], imm8[7:6]) +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + Compute the square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := SQRT(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + + Compute the square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := SQRT(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + Compute the square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := SQRT(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + Compute the square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note]. + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := SQRT(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + Compute the square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := SQRT(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + Compute the square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + [round_note]. + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := SQRT(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + Compute the square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := SQRT(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + + Compute the square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := SQRT(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + Compute the square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := SQRT(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + Compute the square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := SQRT(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + Compute the square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := SQRT(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + Compute the square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + [round_note]. + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := SQRT(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + + + Compute the square root of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + [round_note] + + +IF k[0] + dst[63:0] := SQRT(b[63:0]) +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + + Compute the square root of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := SQRT(b[63:0]) +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + + Compute the square root of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + [round_note] + + +IF k[0] + dst[63:0] := SQRT(b[63:0]) +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + Compute the square root of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := SQRT(b[63:0]) +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + Compute the square root of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + [round_note] + + +dst[63:0] := SQRT(b[63:0]) +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + + + Compute the square root of the lower single-precision (32-bit) floating-point element in "a", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "b" to the upper elements of "dst". + [round_note] + + +IF k[0] + dst[31:0] := SQRT(b[31:0]) +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + + Compute the square root of the lower single-precision (32-bit) floating-point element in "a", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "b" to the upper elements of "dst". + +IF k[0] + dst[31:0] := SQRT(b[31:0]) +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + + Compute the square root of the lower single-precision (32-bit) floating-point element in "a", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "b" to the upper elements of "dst". + [round_note] + + +IF k[0] + dst[31:0] := SQRT(b[31:0]) +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + Compute the square root of the lower single-precision (32-bit) floating-point element in "a", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "b" to the upper elements of "dst". + +IF k[0] + dst[31:0] := SQRT(b[31:0]) +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + Compute the square root of the lower single-precision (32-bit) floating-point element in "a", store the result in the lower element of "dst", and copy the upper 3 packed elements from "b" to the upper elements of "dst". + [round_note] + + +dst[31:0] := SQRT(b[31:0]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + Subtract packed double-precision (64-bit) floating-point elements in "b" from packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] - b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + + Subtract packed double-precision (64-bit) floating-point elements in "b" from packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] - b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + Subtract packed double-precision (64-bit) floating-point elements in "b" from packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] - b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Subtract packed double-precision (64-bit) floating-point elements in "b" from packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] = + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] - b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + Subtract packed double-precision (64-bit) floating-point elements in "b" from packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := a[i+63:i] - b[i+63:i] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + Subtract packed double-precision (64-bit) floating-point elements in "b" from packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + [round_note] + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := a[i+63:i] - b[i+63:i] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + Subtract packed single-precision (32-bit) floating-point elements in "b" from packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] - b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + + + Subtract packed single-precision (32-bit) floating-point elements in "b" from packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] - b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + Subtract packed single-precision (32-bit) floating-point elements in "b" from packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] - b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Subtract packed single-precision (32-bit) floating-point elements in "b" from packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] - b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + Subtract packed single-precision (32-bit) floating-point elements in "b" from packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := a[i+31:i] - b[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + + Subtract packed single-precision (32-bit) floating-point elements in "b" from packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + [round_note] + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := a[i+31:i] - b[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Subtract the lower double-precision (64-bit) floating-point element in "b" from the lower double-precision (64-bit) floating-point element in "a", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + [round_note] + + +IF k[0] + dst[63:0] := a[63:0] - b[63:0] +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Subtract the lower double-precision (64-bit) floating-point element in "b" from the lower double-precision (64-bit) floating-point element in "a", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := a[63:0] - b[63:0] +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Subtract the lower double-precision (64-bit) floating-point element in "b" from the lower double-precision (64-bit) floating-point element in "a", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + [round_note] + + +IF k[0] + dst[63:0] := a[63:0] - b[63:0] +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + Subtract the lower double-precision (64-bit) floating-point element in "b" from the lower double-precision (64-bit) floating-point element in "a", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := a[63:0] - b[63:0] +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + Subtract the lower double-precision (64-bit) floating-point element in "b" from the lower double-precision (64-bit) floating-point element in "a", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + [round_note] + + +dst[63:0] := a[63:0] - b[63:0] +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + + Subtract the lower single-precision (32-bit) floating-point element in "b" from the lower single-precision (32-bit) floating-point element in "a", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + + +IF k[0] + dst[31:0] := a[31:0] - b[31:0] +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Subtract the lower single-precision (32-bit) floating-point element in "b" from the lower single-precision (32-bit) floating-point element in "a", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst[31:0] := a[31:0] - b[31:0] +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + + Subtract the lower single-precision (32-bit) floating-point element in "b" from the lower single-precision (32-bit) floating-point element in "a", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + + +IF k[0] + dst[31:0] := a[31:0] - b[31:0] +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + Subtract the lower single-precision (32-bit) floating-point element in "b" from the lower single-precision (32-bit) floating-point element in "a", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst[31:0] := a[31:0] - b[31:0] +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Arithmetic + + + + Subtract the lower single-precision (32-bit) floating-point element in "b" from the lower single-precision (32-bit) floating-point element in "a", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + + +dst[31:0] := a[31:0] - b[31:0] +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + + Unpack and interleave double-precision (64-bit) floating-point elements from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]){ + dst[63:0] := src1[127:64] + dst[127:64] := src2[127:64] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_HIGH_QWORDS(a[255:128], b[255:128]) +tmp_dst[383:256] := INTERLEAVE_HIGH_QWORDS(a[383:256], b[383:256]) +tmp_dst[511:384] := INTERLEAVE_HIGH_QWORDS(a[511:384], b[511:384]) + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + Unpack and interleave double-precision (64-bit) floating-point elements from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]){ + dst[63:0] := src1[127:64] + dst[127:64] := src2[127:64] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_HIGH_QWORDS(a[255:128], b[255:128]) +tmp_dst[383:256] := INTERLEAVE_HIGH_QWORDS(a[383:256], b[383:256]) +tmp_dst[511:384] := INTERLEAVE_HIGH_QWORDS(a[511:384], b[511:384]) + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + Unpack and interleave double-precision (64-bit) floating-point elements from the high half of each 128-bit lane in "a" and "b", and store the results in "dst". + +INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]){ + dst[63:0] := src1[127:64] + dst[127:64] := src2[127:64] + RETURN dst[127:0] +} + +dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) +dst[255:128] := INTERLEAVE_HIGH_QWORDS(a[255:128], b[255:128]) +dst[383:256] := INTERLEAVE_HIGH_QWORDS(a[383:256], b[383:256]) +dst[511:384] := INTERLEAVE_HIGH_QWORDS(a[511:384], b[511:384]) +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + + Unpack and interleave single-precision (32-bit) floating-point elements from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]){ + dst[31:0] := src1[95:64] + dst[63:32] := src2[95:64] + dst[95:64] := src1[127:96] + dst[127:96] := src2[127:96] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_HIGH_DWORDS(a[255:128], b[255:128]) +tmp_dst[383:256] := INTERLEAVE_HIGH_DWORDS(a[383:256], b[383:256]) +tmp_dst[511:384] := INTERLEAVE_HIGH_DWORDS(a[511:384], b[511:384]) + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + Unpack and interleave single-precision (32-bit) floating-point elements from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]){ + dst[31:0] := src1[95:64] + dst[63:32] := src2[95:64] + dst[95:64] := src1[127:96] + dst[127:96] := src2[127:96] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_HIGH_DWORDS(a[255:128], b[255:128]) +tmp_dst[383:256] := INTERLEAVE_HIGH_DWORDS(a[383:256], b[383:256]) +tmp_dst[511:384] := INTERLEAVE_HIGH_DWORDS(a[511:384], b[511:384]) + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + Unpack and interleave single-precision (32-bit) floating-point elements from the high half of each 128-bit lane in "a" and "b", and store the results in "dst". + +INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]){ + dst[31:0] := src1[95:64] + dst[63:32] := src2[95:64] + dst[95:64] := src1[127:96] + dst[127:96] := src2[127:96] + RETURN dst[127:0] +} + +dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) +dst[255:128] := INTERLEAVE_HIGH_DWORDS(a[255:128], b[255:128]) +dst[383:256] := INTERLEAVE_HIGH_DWORDS(a[383:256], b[383:256]) +dst[511:384] := INTERLEAVE_HIGH_DWORDS(a[511:384], b[511:384]) +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + + Unpack and interleave double-precision (64-bit) floating-point elements from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +INTERLEAVE_QWORDS(src1[127:0], src2[127:0]){ + dst[63:0] := src1[63:0] + dst[127:64] := src2[63:0] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_QWORDS(a[255:128], b[255:128]) +tmp_dst[383:256] := INTERLEAVE_QWORDS(a[383:256], b[383:256]) +tmp_dst[511:384] := INTERLEAVE_QWORDS(a[511:384], b[511:384]) + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + Unpack and interleave double-precision (64-bit) floating-point elements from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +INTERLEAVE_QWORDS(src1[127:0], src2[127:0]){ + dst[63:0] := src1[63:0] + dst[127:64] := src2[63:0] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_QWORDS(a[255:128], b[255:128]) +tmp_dst[383:256] := INTERLEAVE_QWORDS(a[383:256], b[383:256]) +tmp_dst[511:384] := INTERLEAVE_QWORDS(a[511:384], b[511:384]) + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + Unpack and interleave double-precision (64-bit) floating-point elements from the low half of each 128-bit lane in "a" and "b", and store the results in "dst". + +INTERLEAVE_QWORDS(src1[127:0], src2[127:0]){ + dst[63:0] := src1[63:0] + dst[127:64] := src2[63:0] + RETURN dst[127:0] +} + +dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) +dst[255:128] := INTERLEAVE_QWORDS(a[255:128], b[255:128]) +dst[383:256] := INTERLEAVE_QWORDS(a[383:256], b[383:256]) +dst[511:384] := INTERLEAVE_QWORDS(a[511:384], b[511:384]) +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + + Unpack and interleave single-precision (32-bit) floating-point elements from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +INTERLEAVE_DWORDS(src1[127:0], src2[127:0]){ + dst[31:0] := src1[31:0] + dst[63:32] := src2[31:0] + dst[95:64] := src1[63:32] + dst[127:96] := src2[63:32] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_DWORDS(a[255:128], b[255:128]) +tmp_dst[383:256] := INTERLEAVE_DWORDS(a[383:256], b[383:256]) +tmp_dst[511:384] := INTERLEAVE_DWORDS(a[511:384], b[511:384]) + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + + Unpack and interleave single-precision (32-bit) floating-point elements from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +INTERLEAVE_DWORDS(src1[127:0], src2[127:0]){ + dst[31:0] := src1[31:0] + dst[63:32] := src2[31:0] + dst[95:64] := src1[63:32] + dst[127:96] := src2[63:32] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_DWORDS(a[255:128], b[255:128]) +tmp_dst[383:256] := INTERLEAVE_DWORDS(a[383:256], b[383:256]) +tmp_dst[511:384] := INTERLEAVE_DWORDS(a[511:384], b[511:384]) + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Swizzle + + + Unpack and interleave single-precision (32-bit) floating-point elements from the low half of each 128-bit lane in "a" and "b", and store the results in "dst". + +INTERLEAVE_DWORDS(src1[127:0], src2[127:0]){ + dst[31:0] := src1[31:0] + dst[63:32] := src2[31:0] + dst[95:64] := src1[63:32] + dst[127:96] := src2[63:32] + RETURN dst[127:0] +} + +dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) +dst[255:128] := INTERLEAVE_DWORDS(a[255:128], b[255:128]) +dst[383:256] := INTERLEAVE_DWORDS(a[383:256], b[383:256]) +dst[511:384] := INTERLEAVE_DWORDS(a[511:384], b[511:384]) +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Cast + + Cast vector of type __m128d to type __m512d; the upper 384 bits of the result are undefined. + This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. +
immintrin.h
+
+ + Floating Point + AVX512F + Cast + + Cast vector of type __m256d to type __m512d; the upper 256 bits of the result are undefined. + This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. +
immintrin.h
+
+ + Floating Point + AVX512F + Cast + + Cast vector of type __m512d to type __m128d. + This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. +
immintrin.h
+
+ + Floating Point + AVX512F + Cast + + Cast vector of type __m512 to type __m128. + This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. +
immintrin.h
+
+ + Floating Point + AVX512F + Cast + + Cast vector of type __m512d to type __m256d. + This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Cast + + Cast vector of type __m512d to type __m512. + This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. +
immintrin.h
+
+ + Floating Point + Integer + AVX512F/KNCNI + Cast + + Cast vector of type __m512d to type __m512i. + This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. +
immintrin.h
+
+ + Floating Point + AVX512F + Cast + + Cast vector of type __m128 to type __m512; the upper 384 bits of the result are undefined. + This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. +
immintrin.h
+
+ + Floating Point + AVX512F + Cast + + Cast vector of type __m256 to type __m512; the upper 256 bits of the result are undefined. + This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. +
immintrin.h
+
+ + Floating Point + AVX512F + Cast + + Cast vector of type __m512 to type __m256. + This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Cast + + Cast vector of type __m512 to type __m512d. + This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. +
immintrin.h
+
+ + Floating Point + Integer + AVX512F/KNCNI + Cast + + Cast vector of type __m512 to type __m512i. + This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. +
immintrin.h
+
+ + Integer + AVX512F + Cast + + Cast vector of type __m128i to type __m512i; the upper 384 bits of the result are undefined. + This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. +
immintrin.h
+
+ + Integer + AVX512F + Cast + + Cast vector of type __m256i to type __m512i; the upper 256 bits of the result are undefined. + This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Cast + + Cast vector of type __m512i to type __m512d. + This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Cast + + Cast vector of type __m512i to type __m512. + This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. +
immintrin.h
+
+ + Integer + AVX512F + Cast + + Cast vector of type __m512i to type __m128i. + This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. +
immintrin.h
+
+ + Integer + AVX512F + Cast + + Cast vector of type __m512i to type __m256i. + This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. +
immintrin.h
+
+ + Floating Point + AVX512F + Cast + + Casts vector of type __m128d to type __m512d; the upper 384 bits of the result are zeroed. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + +
immintrin.h
+
+ + Floating Point + AVX512F + Cast + + Casts vector of type __m128 to type __m512; the upper 384 bits of the result are zeroed. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + +
immintrin.h
+
+ + Integer + AVX512F + Cast + + Casts vector of type __m128i to type __m512i; the upper 384 bits of the result are zeroed. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + +
immintrin.h
+
+ + Floating Point + AVX512F + Cast + + Casts vector of type __m256d to type __m512d; the upper 256 bits of the result are zeroed. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + +
immintrin.h
+
+ + Floating Point + AVX512F + Cast + + Casts vector of type __m256 to type __m512; the upper 256 bits of the result are zeroed. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + +
immintrin.h
+
+ + Integer + AVX512F + Cast + + Casts vector of type __m256i to type __m512i; the upper 256 bits of the result are zeroed. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Arithmetic + + + Reduce the packed 32-bit integers in "a" by addition using mask "k". Returns the sum of all active elements in "a". + +sum[31:0] := 0 +FOR j := 0 to 15 + i := j*32 + IF k[j] + sum[31:0] := sum[31:0] + a[i+31:i] + FI +ENDFOR +RETURN sum[31:0] + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Arithmetic + + + Reduce the packed 64-bit integers in "a" by addition using mask "k". Returns the sum of all active elements in "a". + +sum[63:0] := 0 +FOR j := 0 to 7 + i := j*64 + IF k[j] + sum[63:0] := sum[63:0] + a[i+63:i] + FI +ENDFOR +RETURN sum[63:0] + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + Reduce the packed double-precision (64-bit) floating-point elements in "a" by addition using mask "k". Returns the sum of all active elements in "a". + +sum[63:0] := 0 +FOR j := 0 to 7 + i := j*64 + IF k[j] + sum[63:0] := sum[63:0] + a[i+63:i] + FI +ENDFOR +RETURN sum[63:0] + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + Reduce the packed single-precision (32-bit) floating-point elements in "a" by addition using mask "k". Returns the sum of all active elements in "a". + +sum[31:0] := 0 +FOR j := 0 to 15 + i := j*32 + IF k[j] + sum[31:0] := sum[31:0] + a[i+31:i] + FI +ENDFOR +RETURN sum[31:0] + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Logical + + + Reduce the packed 32-bit integers in "a" by bitwise AND using mask "k". Returns the bitwise AND of all active elements in "a". + +reduced[31:0] := 0xFFFFFFFF +FOR j := 0 to 15 + i := j*32 + IF k[j] + reduced[31:0] := reduced[31:0] AND a[i+31:i] + FI +ENDFOR +RETURN reduced[31:0] + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Logical + + + Reduce the packed 64-bit integers in "a" by bitwise AND using mask "k". Returns the bitwise AND of all active elements in "a". + +reduced[63:0] := 0xFFFFFFFFFFFFFFFF +FOR j := 0 to 7 + i := j*64 + IF k[j] + reduced[63:0] := reduced[63:0] AND a[i+63:i] + FI +ENDFOR +RETURN reduced[63:0] + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Special Math Functions + + + Reduce the packed 32-bit integers in "a" by maximum using mask "k". Returns the maximum of all active elements in "a". + +max[31:0] := MIN_INT +FOR j := 0 to 15 + i := j*32 + IF k[j] + max[31:0] := MAXIMUM(max[31:0], a[i+31:i]) + FI +ENDFOR +RETURN max[31:0] + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Special Math Functions + + + Reduce the packed 64-bit integers in "a" by maximum using mask "k". Returns the maximum of all active elements in "a". + +max[63:0] := MIN_INT +FOR j := 0 to 7 + i := j*64 + IF k[j] + max[63:0] := MAXIMUM(max[63:0], a[i+63:i]) + FI +ENDFOR +RETURN max[63:0] + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Special Math Functions + + + Reduce the packed unsigned 32-bit integers in "a" by maximum using mask "k". Returns the maximum of all active elements in "a". + +max[31:0] := 0 +FOR j := 0 to 15 + i := j*32 + IF k[j] + max[31:0] := MAXIMUM(max[31:0], a[i+31:i]) + FI +ENDFOR +RETURN max[31:0] + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Special Math Functions + + + Reduce the packed unsigned 64-bit integers in "a" by maximum using mask "k". Returns the maximum of all active elements in "a". + +max[63:0] := 0 +FOR j := 0 to 7 + i := j*64 + IF k[j] + max[63:0] := MAXIMUM(max[63:0], a[i+63:i]) + FI +ENDFOR +RETURN max[63:0] + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Special Math Functions + + + Reduce the packed double-precision (64-bit) floating-point elements in "a" by maximum using mask "k". Returns the maximum of all active elements in "a". + +max[63:0] := MIN_DOUBLE +FOR j := 0 to 7 + i := j*64 + IF k[j] + max[63:0] := MAXIMUM(max[63:0], a[i+63:i]) + FI +ENDFOR +RETURN max[63:0] + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Special Math Functions + + + Reduce the packed single-precision (32-bit) floating-point elements in "a" by maximum using mask "k". Returns the maximum of all active elements in "a". + +max[31:0] := MIN_FLOAT +FOR j := 0 to 15 + i := j*32 + IF k[j] + max[31:0] := MAXIMUM(max[31:0], a[i+31:i]) + FI +ENDFOR +RETURN max[31:0] + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Special Math Functions + + + Reduce the packed 32-bit integers in "a" by maximum using mask "k". Returns the minimum of all active elements in "a". + +min[31:0] := MAX_INT +FOR j := 0 to 15 + i := j*32 + IF k[j] + min[31:0] := MINIMUM(min[31:0], a[i+31:i]) + FI +ENDFOR +RETURN min[31:0] + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Special Math Functions + + + Reduce the packed 64-bit integers in "a" by maximum using mask "k". Returns the minimum of all active elements in "a". + +min[63:0] := MAX_INT +FOR j := 0 to 7 + i := j*64 + IF k[j] + min[63:0] := MINIMUM(min[63:0], a[i+63:i]) + FI +ENDFOR +RETURN min[63:0] + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Special Math Functions + + + Reduce the packed unsigned 32-bit integers in "a" by maximum using mask "k". Returns the minimum of all active elements in "a". + +min[31:0] := MAX_UINT +FOR j := 0 to 15 + i := j*32 + IF k[j] + min[31:0] := MINIMUM(min[31:0], a[i+31:i]) + FI +ENDFOR +RETURN min[31:0] + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Special Math Functions + + + Reduce the packed unsigned 64-bit integers in "a" by minimum using mask "k". Returns the minimum of all active elements in "a". + +min[63:0] := MAX_UINT +FOR j := 0 to 7 + i := j*64 + IF k[j] + min[63:0] := MINIMUM(min[63:0], a[i+63:i]) + FI +ENDFOR +RETURN min[63:0] + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Special Math Functions + + + Reduce the packed double-precision (64-bit) floating-point elements in "a" by maximum using mask "k". Returns the minimum of all active elements in "a". + +min[63:0] := MAX_DOUBLE +FOR j := 0 to 7 + i := j*64 + IF k[j] + min[63:0] := MINIMUM(min[63:0], a[i+63:i]) + FI +ENDFOR +RETURN min[63:0] + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Special Math Functions + + + Reduce the packed single-precision (32-bit) floating-point elements in "a" by maximum using mask "k". Returns the minimum of all active elements in "a". + +min[31:0] := MAX_FLOAT +FOR j := 0 to 15 + i := j*32 + IF k[j] + min[31:0] := MINIMUM(min[31:0], a[i+31:i]) + FI +ENDFOR +RETURN min[31:0] + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Arithmetic + + + Reduce the packed 32-bit integers in "a" by multiplication using mask "k". Returns the product of all active elements in "a". + +prod[31:0] := 1 +FOR j := 0 to 15 + i := j*32 + IF k[j] + prod[31:0] := prod[31:0] * a[i+31:i] + FI +ENDFOR +RETURN prod[31:0] + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Arithmetic + + + Reduce the packed 64-bit integers in "a" by multiplication using mask "k". Returns the product of all active elements in "a". + +prod[63:0] := 1 +FOR j := 0 to 7 + i := j*64 + IF k[j] + prod[63:0] := prod[63:0] * a[i+63:i] + FI +ENDFOR +RETURN prod[63:0] + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + Reduce the packed double-precision (64-bit) floating-point elements in "a" by multiplication using mask "k". Returns the product of all active elements in "a". + +prod[63:0] := 1 +FOR j := 0 to 7 + i := j*64 + IF k[j] + prod[63:0] := prod[63:0] * a[i+63:i] + FI +ENDFOR +RETURN prod[63:0] + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + + Reduce the packed single-precision (32-bit) floating-point elements in "a" by multiplication using mask "k". Returns the product of all active elements in "a". + +prod[31:0] := 1 +FOR j := 0 to 15 + i := j*32 + IF k[j] + prod[31:0] := prod[31:0] * a[i+31:i] + FI +ENDFOR +RETURN prod[31:0] + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Logical + + + Reduce the packed 32-bit integers in "a" by bitwise OR using mask "k". Returns the bitwise OR of all active elements in "a". + +reduced[31:0] := 0 +FOR j := 0 to 15 + i := j*32 + IF k[j] + reduced[31:0] := reduced[31:0] OR a[i+31:i] + FI +ENDFOR +RETURN reduced[31:0] + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Logical + + + Reduce the packed 64-bit integers in "a" by bitwise OR using mask "k". Returns the bitwise OR of all active elements in "a". + +reduced[63:0] := 0 +FOR j := 0 to 7 + i := j*64 + IF k[j] + reduced[63:0] := reduced[63:0] OR a[i+63:i] + FI +ENDFOR +RETURN reduced[63:0] + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Arithmetic + + Reduce the packed 32-bit integers in "a" by addition. Returns the sum of all elements in "a". + +sum[31:0] := 0 +FOR j := 0 to 15 + i := j*32 + sum[31:0] := sum[31:0] + a[i+31:i] +ENDFOR +RETURN sum[63:0] + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Arithmetic + + Reduce the packed 64-bit integers in "a" by addition. Returns the sum of all elements in "a". + +sum[63:0] := 0 +FOR j := 0 to 7 + i := j*64 + sum[63:0] := sum[63:0] + a[i+63:i] +ENDFOR +RETURN sum[63:0] + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + Reduce the packed double-precision (64-bit) floating-point elements in "a" by addition. Returns the sum of all elements in "a". + +sum[63:0] := 0 +FOR j := 0 to 7 + i := j*64 + sum[63:0] := sum[63:0] + a[i+63:i] +ENDFOR +RETURN sum[63:0] + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + Reduce the packed single-precision (32-bit) floating-point elements in "a" by addition. Returns the sum of all elements in "a". + +sum[31:0] := 0 +FOR j := 0 to 15 + i := j*32 + sum[31:0] := sum[31:0] + a[i+31:i] +ENDFOR +RETURN sum[63:0] + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Logical + + Reduce the packed 32-bit integers in "a" by bitwise AND. Returns the bitwise AND of all elements in "a". + +reduced[31:0] := 0xFFFFFFFF +FOR j := 0 to 15 + i := j*32 + reduced[31:0] := reduced[31:0] AND a[i+31:i] +ENDFOR +RETURN reduced[31:0] + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Logical + + Reduce the packed 64-bit integers in "a" by bitwise AND. Returns the bitwise AND of all elements in "a". + +reduced[63:0] := 0xFFFFFFFFFFFFFFFF +FOR j := 0 to 7 + i := j*64 + reduced[63:0] := reduced[63:0] AND a[i+63:i] +ENDFOR +RETURN reduced[63:0] + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Special Math Functions + + Reduce the packed 32-bit integers in "a" by maximum. Returns the maximum of all elements in "a". + +max[31:0] := MIN_INT +FOR j := 0 to 15 + i := j*32 + max[31:0] := MAXIMUM(max[31:0], a[i+31:i]) +ENDFOR +RETURN max[31:0] + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Special Math Functions + + Reduce the packed 64-bit integers in "a" by maximum. Returns the maximum of all elements in "a". + +max[63:0] := MIN_INT +FOR j := 0 to 7 + i := j*64 + max[63:0] := MAXIMUM(max[63:0], a[i+63:i]) +ENDFOR +RETURN max[63:0] + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Special Math Functions + + Reduce the packed unsigned 32-bit integers in "a" by maximum. Returns the maximum of all elements in "a". + +max[31:0] := 0 +FOR j := 0 to 15 + i := j*32 + max[31:0] := MAXIMUM(max[31:0], a[i+31:i]) +ENDFOR +RETURN max[31:0] + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Special Math Functions + + Reduce the packed unsigned 64-bit integers in "a" by maximum. Returns the maximum of all elements in "a". + +max[63:0] := 0 +FOR j := 0 to 7 + i := j*64 + max[63:0] := MAXIMUM(max[63:0], a[i+63:i]) +ENDFOR +RETURN max[63:0] + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Special Math Functions + + Reduce the packed double-precision (64-bit) floating-point elements in "a" by maximum. Returns the maximum of all elements in "a". + +max[63:0] := MIN_DOUBLE +FOR j := 0 to 7 + i := j*64 + max[63:0] := MAXIMUM(max[63:0], a[i+63:i]) +ENDFOR +RETURN max[63:0] + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Special Math Functions + + Reduce the packed single-precision (32-bit) floating-point elements in "a" by maximum. Returns the maximum of all elements in "a". + +max[31:0] := MIN_FLOAT +FOR j := 0 to 15 + i := j*32 + max[31:0] := MAXIMUM(max[31:0], a[i+31:i]) +ENDFOR +RETURN max[31:0] + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Special Math Functions + + Reduce the packed 32-bit integers in "a" by minimum. Returns the minimum of all elements in "a". + +min[31:0] := MAX_INT +FOR j := 0 to 15 + i := j*32 + min[31:0] := MINIMUM(min[31:0], a[i+31:i]) +ENDFOR +RETURN min[31:0] + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Special Math Functions + + Reduce the packed 64-bit integers in "a" by minimum. Returns the minimum of all elements in "a". + +min[63:0] := MAX_INT +FOR j := 0 to 7 + i := j*64 + min[63:0] := MINIMUM(min[63:0], a[i+63:i]) +ENDFOR +RETURN min[63:0] + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Special Math Functions + + Reduce the packed unsigned 32-bit integers in "a" by minimum. Returns the minimum of all elements in "a". + +min[31:0] := MAX_UINT +FOR j := 0 to 15 + i := j*32 + min[31:0] := MINIMUM(min[31:0], a[i+31:i]) +ENDFOR +RETURN min[31:0] + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Special Math Functions + + Reduce the packed unsigned 64-bit integers in "a" by minimum. Returns the minimum of all elements in "a". + +min[63:0] := MAX_UINT +FOR j := 0 to 7 + i := j*64 + min[63:0] := MINIMUM(min[63:0], a[i+63:i]) +ENDFOR +RETURN min[63:0] + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Special Math Functions + + Reduce the packed double-precision (64-bit) floating-point elements in "a" by minimum. Returns the minimum of all elements in "a". + +min[63:0] := MAX_DOUBLE +FOR j := 0 to 7 + i := j*64 + min[63:0] := MINIMUM(min[63:0], a[i+63:i]) +ENDFOR +RETURN min[63:0] + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Special Math Functions + + Reduce the packed single-precision (32-bit) floating-point elements in "a" by minimum. Returns the minimum of all elements in "a". + +min[31:0] := MAX_INT +FOR j := 0 to 15 + i := j*32 + min[31:0] := MINIMUM(min[31:0], a[i+31:i]) +ENDFOR +RETURN min[31:0] + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Arithmetic + + Reduce the packed 32-bit integers in "a" by multiplication. Returns the product of all elements in "a". + +prod[31:0] := 1 +FOR j := 0 to 15 + i := j*32 + prod[31:0] := prod[31:0] * a[i+31:i] +ENDFOR +RETURN prod[31:0] + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Arithmetic + + Reduce the packed 64-bit integers in "a" by multiplication. Returns the product of all elements in "a". + +prod[63:0] := 1 +FOR j := 0 to 7 + i := j*64 + prod[63:0] := prod[63:0] * a[i+63:i] +ENDFOR +RETURN prod[63:0] + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + Reduce the packed double-precision (64-bit) floating-point elements in "a" by multiplication. Returns the product of all elements in "a". + +prod[63:0] := 1 +FOR j := 0 to 7 + i := j*64 + prod[63:0] := prod[63:0] * a[i+63:i] +ENDFOR +RETURN prod[63:0] + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Arithmetic + + Reduce the packed single-precision (32-bit) floating-point elements in "a" by multiplication. Returns the product of all elements in "a". + +prod[31:0] := 1 +FOR j := 0 to 15 + i := j*32 + prod[31:0] := prod[31:0] * a[i+31:i] +ENDFOR +RETURN prod[31:0] + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Logical + + Reduce the packed 32-bit integers in "a" by bitwise OR. Returns the bitwise OR of all elements in "a". + +reduced[31:0] := 0 +FOR j := 0 to 15 + i := j*32 + reduced[31:0] := reduced[31:0] OR a[i+31:i] +ENDFOR +RETURN reduced[31:0] + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Logical + + Reduce the packed 64-bit integers in "a" by bitwise OR. Returns the bitwise OR of all elements in "a". + +reduced[63:0] := 0 +FOR j := 0 to 7 + i := j*64 + reduced[63:0] := reduced[63:0] OR a[i+63:i] +ENDFOR +RETURN reduced[63:0] + +
immintrin.h
+
+ + Floating Point + AVX512F + Set + + Broadcast double-precision (64-bit) floating-point value "a" to all elements of "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := a[63:0] +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Set + + Broadcast single-precision (32-bit) floating-point value "a" to all elements of "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := a[31:0] +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Integer + AVX512F + Set + + + + + Set packed 32-bit integers in "dst" with the repeated 4 element sequence. + +dst[31:0] := d +dst[63:32] := c +dst[95:64] := b +dst[127:96] := a +dst[159:128] := d +dst[191:160] := c +dst[223:192] := b +dst[255:224] := a +dst[287:256] := d +dst[319:288] := c +dst[351:320] := b +dst[383:352] := a +dst[415:384] := d +dst[447:416] := c +dst[479:448] := b +dst[511:480] := a +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Integer + AVX512F + Set + + + + + Set packed 64-bit integers in "dst" with the repeated 4 element sequence. + +dst[63:0] := d +dst[127:64] := c +dst[191:128] := b +dst[255:192] := a +dst[319:256] := d +dst[383:320] := c +dst[447:384] := b +dst[511:448] := a +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Set + + + + + Set packed double-precision (64-bit) floating-point elements in "dst" with the repeated 4 element sequence. + +dst[63:0] := d +dst[127:64] := c +dst[191:128] := b +dst[255:192] := a +dst[319:256] := d +dst[383:320] := c +dst[447:384] := b +dst[511:448] := a +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Set + + + + + Set packed single-precision (32-bit) floating-point elements in "dst" with the repeated 4 element sequence. + +dst[31:0] := d +dst[63:32] := c +dst[95:64] := b +dst[127:96] := a +dst[159:128] := d +dst[191:160] := c +dst[223:192] := b +dst[255:224] := a +dst[287:256] := d +dst[319:288] := c +dst[351:320] := b +dst[383:352] := a +dst[415:384] := d +dst[447:416] := c +dst[479:448] := b +dst[511:480] := a +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Integer + AVX512F + Set + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Set packed 8-bit integers in "dst" with the supplied values in reverse order. + +dst[7:0] := e0 +dst[15:8] := e1 +dst[23:16] := e2 +dst[31:24] := e3 +dst[39:32] := e4 +dst[47:40] := e5 +dst[55:48] := e6 +dst[63:56] := e7 +dst[71:64] := e8 +dst[79:72] := e9 +dst[87:80] := e10 +dst[95:88] := e11 +dst[103:96] := e12 +dst[111:104] := e13 +dst[119:112] := e14 +dst[127:120] := e15 +dst[135:128] := e16 +dst[143:136] := e17 +dst[151:144] := e18 +dst[159:152] := e19 +dst[167:160] := e20 +dst[175:168] := e21 +dst[183:176] := e22 +dst[191:184] := e23 +dst[199:192] := e24 +dst[207:200] := e25 +dst[215:208] := e26 +dst[223:216] := e27 +dst[231:224] := e28 +dst[239:232] := e29 +dst[247:240] := e30 +dst[255:248] := e31 +dst[263:256] := e32 +dst[271:264] := e33 +dst[279:272] := e34 +dst[287:280] := e35 +dst[295:288] := e36 +dst[303:296] := e37 +dst[311:304] := e38 +dst[319:312] := e39 +dst[327:320] := e40 +dst[335:328] := e41 +dst[343:336] := e42 +dst[351:344] := e43 +dst[359:352] := e44 +dst[367:360] := e45 +dst[375:368] := e46 +dst[383:376] := e47 +dst[391:384] := e48 +dst[399:392] := e49 +dst[407:400] := e50 +dst[415:408] := e51 +dst[423:416] := e52 +dst[431:424] := e53 +dst[439:432] := e54 +dst[447:440] := e55 +dst[455:448] := e56 +dst[463:456] := e57 +dst[471:464] := e58 +dst[479:472] := e59 +dst[487:480] := e60 +dst[495:488] := e61 +dst[503:496] := e62 +dst[511:503] := e63 +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Integer + AVX512F + Set + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Set packed 16-bit integers in "dst" with the supplied values in reverse order. + +dst[15:0] := e0 +dst[31:16] := e1 +dst[47:32] := e2 +dst[63:48] := e3 +dst[79:64] := e4 +dst[95:80] := e5 +dst[111:96] := e6 +dst[127:112] := e7 +dst[145:128] := e8 +dst[159:144] := e9 +dst[175:160] := e10 +dst[191:176] := e11 +dst[207:192] := e12 +dst[223:208] := e13 +dst[239:224] := e14 +dst[255:240] := e15 +dst[271:256] := e16 +dst[287:272] := e17 +dst[303:288] := e18 +dst[319:304] := e19 +dst[335:320] := e20 +dst[351:336] := e21 +dst[367:352] := e22 +dst[383:368] := e23 +dst[399:384] := e24 +dst[415:400] := e25 +dst[431:416] := e26 +dst[447:432] := e27 +dst[463:448] := e28 +dst[479:464] := e29 +dst[495:480] := e30 +dst[511:496] := e31 +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Integer + AVX512F + Set + + + + + + + + + + + + + + + + + Set packed 32-bit integers in "dst" with the supplied values. + +dst[31:0] := e0 +dst[63:32] := e1 +dst[95:64] := e2 +dst[127:96] := e3 +dst[159:128] := e4 +dst[191:160] := e5 +dst[223:192] := e6 +dst[255:224] := e7 +dst[287:256] := e8 +dst[319:288] := e9 +dst[351:320] := e10 +dst[383:352] := e11 +dst[415:384] := e12 +dst[447:416] := e13 +dst[479:448] := e14 +dst[511:480] := e15 +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Integer + AVX512F + Set + + + + + + + + + Set packed 64-bit integers in "dst" with the supplied values. + +dst[63:0] := e0 +dst[127:64] := e1 +dst[191:128] := e2 +dst[255:192] := e3 +dst[319:256] := e4 +dst[383:320] := e5 +dst[447:384] := e6 +dst[511:448] := e7 +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Set + + + + + + + + + Set packed double-precision (64-bit) floating-point elements in "dst" with the supplied values. + +dst[63:0] := e0 +dst[127:64] := e1 +dst[191:128] := e2 +dst[255:192] := e3 +dst[319:256] := e4 +dst[383:320] := e5 +dst[447:384] := e6 +dst[511:448] := e7 +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Set + + + + + + + + + + + + + + + + + Set packed single-precision (32-bit) floating-point elements in "dst" with the supplied values. + +dst[31:0] := e0 +dst[63:32] := e1 +dst[95:64] := e2 +dst[127:96] := e3 +dst[159:128] := e4 +dst[191:160] := e5 +dst[223:192] := e6 +dst[255:224] := e7 +dst[287:256] := e8 +dst[319:288] := e9 +dst[351:320] := e10 +dst[383:352] := e11 +dst[415:384] := e12 +dst[447:416] := e13 +dst[479:448] := e14 +dst[511:480] := e15 +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Integer + AVX512F + Set + + + + + Set packed 32-bit integers in "dst" with the repeated 4 element sequence in reverse order. + +dst[31:0] := a +dst[63:32] := b +dst[95:64] := c +dst[127:96] := d +dst[159:128] := a +dst[191:160] := b +dst[223:192] := c +dst[255:224] := d +dst[287:256] := a +dst[319:288] := b +dst[351:320] := c +dst[383:352] := d +dst[415:384] := a +dst[447:416] := b +dst[479:448] := c +dst[511:480] := d +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Integer + AVX512F + Set + + + + + Set packed 64-bit integers in "dst" with the repeated 4 element sequence in reverse order. + +dst[63:0] := a +dst[127:64] := b +dst[191:128] := c +dst[255:192] := d +dst[319:256] := a +dst[383:320] := b +dst[447:384] := c +dst[511:448] := d +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Set + + + + + Set packed double-precision (64-bit) floating-point elements in "dst" with the repeated 4 element sequence in reverse order. + +dst[63:0] := a +dst[127:64] := b +dst[191:128] := c +dst[255:192] := d +dst[319:256] := a +dst[383:320] := b +dst[447:384] := c +dst[511:448] := d +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Set + + + + + Set packed single-precision (32-bit) floating-point elements in "dst" with the repeated 4 element sequence in reverse order. + +dst[31:0] := a +dst[63:32] := b +dst[95:64] := c +dst[127:96] := d +dst[159:128] := a +dst[191:160] := b +dst[223:192] := c +dst[255:224] := d +dst[287:256] := a +dst[319:288] := b +dst[351:320] := c +dst[383:352] := d +dst[415:384] := a +dst[447:416] := b +dst[479:448] := c +dst[511:480] := d +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Integer + AVX512F + Set + + + + + + + + + + + + + + + + + Set packed 32-bit integers in "dst" with the supplied values in reverse order. + +dst[31:0] := e15 +dst[63:32] := e14 +dst[95:64] := e13 +dst[127:96] := e12 +dst[159:128] := e11 +dst[191:160] := e10 +dst[223:192] := e9 +dst[255:224] := e8 +dst[287:256] := e7 +dst[319:288] := e6 +dst[351:320] := e5 +dst[383:352] := e4 +dst[415:384] := e3 +dst[447:416] := e2 +dst[479:448] := e1 +dst[511:480] := e0 +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Integer + AVX512F + Set + + + + + + + + + Set packed 64-bit integers in "dst" with the supplied values in reverse order. + +dst[63:0] := e7 +dst[127:64] := e6 +dst[191:128] := e5 +dst[255:192] := e4 +dst[319:256] := e3 +dst[383:320] := e2 +dst[447:384] := e1 +dst[511:448] := e0 +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Set + + + + + + + + + Set packed double-precision (64-bit) floating-point elements in "dst" with the supplied values in reverse order. + +dst[63:0] := e7 +dst[127:64] := e6 +dst[191:128] := e5 +dst[255:192] := e4 +dst[319:256] := e3 +dst[383:320] := e2 +dst[447:384] := e1 +dst[511:448] := e0 +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Set + + + + + + + + + + + + + + + + + Set packed single-precision (32-bit) floating-point elements in "dst" with the supplied values in reverse order. + +dst[31:0] := e15 +dst[63:32] := e14 +dst[95:64] := e13 +dst[127:96] := e12 +dst[159:128] := e11 +dst[191:160] := e10 +dst[223:192] := e9 +dst[255:224] := e8 +dst[287:256] := e7 +dst[319:288] := e6 +dst[351:320] := e5 +dst[383:352] := e4 +dst[415:384] := e3 +dst[447:416] := e2 +dst[479:448] := e1 +dst[511:480] := e0 +dst[MAX:512] := 0 + +
immintrin.h
+
+ + AVX512F + Set + + Return vector of type __m512 with all elements set to zero. + +dst[MAX:0] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Set + Return vector of type __m512i with all elements set to zero. + +dst[MAX:0] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Set + Return vector of type __m512d with all elements set to zero. + +dst[MAX:0] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Set + Return vector of type __m512 with all elements set to zero. + +dst[MAX:0] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Set + Return vector of type __m512i with all elements set to zero. + +dst[MAX:0] := 0 + + +
immintrin.h
+
+ + AVX512F + General Support + + Return vector of type __m512 with undefined elements. +
immintrin.h
+
+ + Integer + AVX512F + General Support + Return vector of type __m512i with undefined elements. +
immintrin.h
+
+ + Floating Point + AVX512F + General Support + Return vector of type __m512d with undefined elements. +
immintrin.h
+
+ + Floating Point + AVX512F + General Support + Return vector of type __m512 with undefined elements. +
immintrin.h
+
+ + Floating Point + AVX512PF + Load + + + + + Prefetch single-precision (32-bit) floating-point elements from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged in cache. "scale" should be 1, 2, 4 or 8. The "hint" parameter may be 1 (_MM_HINT_T0) for prefetching to L1 cache, or 2 (_MM_HINT_T1) for prefetching to L2 cache. + +FOR j:= 0 to 7 + i := j*64; + Prefetch([base_addr + SignExtend(vindex[i+63:i]) * scale], hint, RFO=0); +ENDFOR; + + + +
immintrin.h
+
+ + Floating Point + AVX512PF + Load + + + + + + Prefetch single-precision (32-bit) floating-point elements from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged in cache using writemask "k" (elements are only brought into cache when their corresponding mask bit is set). "scale" should be 1, 2, 4 or 8.. The "hint" parameter may be 1 (_MM_HINT_T0) for prefetching to L1 cache, or 2 (_MM_HINT_T1) for prefetching to L2 cache. + +FOR j:= 0 to 7 + i := j*64; + IF mask[j] THEN + Prefetch([base_addr + SignExtend(vindex[i+63:i]) * scale], hint, RFO=0); + FI +ENDFOR; + + + +
immintrin.h
+
+ + Floating Point + AVX512PF + Store + + + + + Prefetch single-precision (32-bit) floating-point elements with intent to write into memory using 64-bit indices. Elements are prefetched into cache level "hint", where "hint" is 0 or 1. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 7 + i := j*64; + Prefetch([base_addr + SignExtend(vindex[i+63:i]) * scale], Level=hint, RFO=1); +ENDFOR; + + + +
immintrin.h
+
+ + Floating Point + AVX512PF + Store + + + + + + Prefetch single-precision (32-bit) floating-point elements with intent to write into memory using 64-bit indices. The "hint" parameter may be 1 (_MM_HINT_T0) for prefetching to L1 cache, or 2 (_MM_HINT_T1) for prefetching to L2 cache. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not brought into cache when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 7 + i := j*64; + IF mask[j] THEN + Prefetch([base_addr + SignExtend(vindex[i+63:i]) * scale], Level=hint, RFO=1); + FI +ENDFOR; + + + +
immintrin.h
+
+ + Floating Point + AVX512PF + Load + + + + + Prefetch double-precision (64-bit) floating-point elements from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged in cache. "scale" should be 1, 2, 4 or 8. The "hint" parameter may be 1 (_MM_HINT_T0) for prefetching to L1 cache, or 2 (_MM_HINT_T1) for prefetching to L2 cache. + +FOR j := 0 to 7 + i := j*32; + Prefetch([base_addr + SignExtend(vindex[i*31:i]) * scale], hint, RFO=0); +ENDFOR; + + + +
immintrin.h
+
+ + Floating Point + AVX512PF + Load + + + + + + Prefetch double-precision (64-bit) floating-point elements from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged in cache using writemask "k" (elements are brought into cache only when their corresponding mask bits are set). "scale" should be 1, 2, 4 or 8. The "hint" parameter may be 1 (_MM_HINT_T0) for prefetching to L1 cache, or 2 (_MM_HINT_T1) for prefetching to L2 cache. + +FOR j := 0 to 7 + i := j*32; + IF mask[j] THEN + Prefetch([base_addr + SignExtend(vindex[i*31:i]) * scale], hint, RFO=0); + FI +ENDFOR; + + + +
immintrin.h
+
+ + Floating Point + AVX512PF/KNCNI + Load + + + + + + Prefetch single-precision (32-bit) floating-point elements from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged in cache using writemask "k" (elements are brought into cache only when their corresponding mask bits are set). "scale" should be 1, 2, 4 or 8. The "hint" parameter may be 1 (_MM_HINT_T0) for prefetching to L1 cache, or 2 (_MM_HINT_T1) for prefetching to L2 cache. + +FOR j := 0 to 15 + i := j*16; + IF mask[j] THEN + Prefetch([base_addr + SignExtend(vindex[i*31:i]) * scale], hint, RFO=0); + FI +ENDFOR; + + + +
immintrin.h
+
+ + Floating Point + AVX512PF + Store + + + + + Prefetch double-precision (64-bit) floating-point elements with intent to write using 32-bit indices. The "hint" parameter may be 1 (_MM_HINT_T0) for prefetching to L1 cache, or 2 (_MM_HINT_T1) for prefetching to L2 cache. 64-bit elements are brought into cache from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 TO 7 + i := j*32; + Prefetch(base_addr + SignExtend(vindex[i+31:i]) * scale], Level=hint, RFO=1); +ENDFOR; + + + +
immintrin.h
+
+ + Floating Point + AVX512PF + Store + + + + + + Prefetch double-precision (64-bit) floating-point elements with intent to write using 32-bit indices. The "hint" parameter may be 1 (_MM_HINT_T0) for prefetching to L1 cache, or 2 (_MM_HINT_T1) for prefetching to L2 cache. 64-bit elements are brought into cache from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not brought into cache when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 TO 7 + i := j*32; + IF mask[j] THEN + Prefetch(base_addr + SignExtend(vindex[i+31:i]) * scale], Level=hint, RFO=1); + FI +ENDFOR; + + + +
immintrin.h
+
+ + Floating Point + AVX512PF + Load + + + + + Prefetch double-precision (64-bit) floating-point elements from memory into cache level specified by "hint" using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8. The "hint" parameter may be 1 (_MM_HINT_T0) for prefetching to L1 cache, or 2 (_MM_HINT_T1) for prefetching to L2 cache. + +FOR j := 0 to 7 + i := j*64; + Prefetch([base_addr + SignExtend(vindex[i*63:i] * scale]), Level=hint, RFO=0); +ENDFOR; + + + +
immintrin.h
+
+ + Floating Point + AVX512PF + Load + + + + + + Prefetch double-precision (64-bit) floating-point elements from memory into cache level specified by "hint" using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Prefetched elements are merged in cache using writemask "k" (elements are copied from memory when the corresponding mask bit is set). "scale" should be 1, 2, 4 or 8. The "hint" parameter may be 1 (_MM_HINT_T0) for prefetching to L1 cache, or 2 (_MM_HINT_T1) for prefetching to L2 cache. + +FOR j := 0 to 7 + i := j*64; + IF mask[j] THEN + Prefetch([base_addr + SignExtend(vindex[i*63:i] * scale]), Level=hint, RFO=0); + FI +ENDFOR; + + + +
immintrin.h
+
+ + Floating Point + AVX512PF + Store + + + + + Prefetch double-precision (64-bit) floating-point elements with intent to write into memory using 64-bit indices. The "hint" parameter may be 1 (_MM_HINT_T0) for prefetching to L1 cache, or 2 (_MM_HINT_T1) for prefetching to L2 cache. 64-bit elements are brought into cache from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 7 + i := j*64; + Prefetch([base_addr + SignExtend(vindex[i+63:i]) * scale], Level=hint, RFO=1); +ENDFOR; + + + +
immintrin.h
+
+ + Floating Point + AVX512PF + Store + + + + + + Prefetch double-precision (64-bit) floating-point elements with intent to write into memory using 64-bit indices. The "hint" parameter may be 1 (_MM_HINT_T0) for prefetching to L1 cache, or 2 (_MM_HINT_T1) for prefetching to L2 cache. 64-bit elements are brought into cache from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not brought into cache when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 7 + i := j*64; + IF mask[j] THEN + Prefetch([base_addr + SignExtend(vindex[i+63:i]) * scale], Level=hint, RFO=1); + FI +ENDFOR; + + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + Compute the approximate exponential value of 2 raised to the power of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 2^-23. [round_note] + +FOR j := 0 to 15 + i := j*32; + dst[i+31:i] := EXP_2_23_SP(a[i+31:i]); +ENDFOR; +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + Compute the approximate exponential value of 2 raised to the power of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 2^-23. + +FOR j := 0 to 15 + i := j*32; + dst[i+31:i] := EXP_2_23_SP(a[i+31:i]); +ENDFOR; +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + + + Compute the approximate exponential value of 2 raised to the power of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-23. [round_note] + +FOR j := 0 to 15 + i := j*32; + IF k[j] THEN + dst[i+31:i] := EXP_2_23_SP(a[i+31:i]); + ELSE + dst[i*31:i] := src[i*31:i]; + FI +ENDFOR; +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + + Compute the approximate exponential value of 2 raised to the power of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-23. + +FOR j := 0 to 15 + i := j*32; + IF k[j] THEN + dst[i+31:i] := EXP_2_23_SP(a[i+31:i]); + ELSE + dst[i*31:i] := src[i*31:i]; + FI +ENDFOR; +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point +AVX512ER + Elementary Math Functions + + + + Compute the approximate exponential value of 2 raised to the power of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-23. [round_note] + +FOR j := 0 to 15 + i := j*32; + IF k[j] THEN + dst[i+31:i] := EXP_2_23_SP(a[i+31:i]); + ELSE + dst[i*31:i] := 0; + FI +ENDFOR; +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + Compute the approximate exponential value of 2 raised to the power of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-23. + +FOR j := 0 to 15 + i := j*32; + IF k[j] THEN + dst[i+31:i] := EXP_2_23_SP(a[i+31:i]); + ELSE + dst[i*31:i] := 0; + FI +ENDFOR; +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + Compute the approximate exponential value of 2 raised to the power of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 2^-23. [round_note] + +FOR j := 0 to 7 + i := j*64; + dst[i+63:i] := EXP_2_23_DP(a[i+63:i]); +ENDFOR; +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + Compute the approximate exponential value of 2 raised to the power of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 2^-23. + +FOR j := 0 to 7 + i := j*64; + dst[i+63:i] := EXP_2_23_DP(a[i+63:i]); +ENDFOR; +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + + + Compute the approximate exponential value of 2 raised to the power of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-23. [round_note] + +FOR j := 0 to 7 + i := j*64; + IF k[j] THEN + dst[i+63:i] := EXP_2_23_DP(a[i+63:i]); + ELSE + dst[i+63:i] := src[i+63:i]; + FI +ENDFOR; +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + + Compute the approximate exponential value of 2 raised to the power of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-23. + +FOR j := 0 to 7 + i := j*64; + IF k[j] THEN + dst[i+63:i] := EXP_2_23_DP(a[i+63:i]); + ELSE + dst[i+63:i] := src[i+63:i]; + FI +ENDFOR; +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + + Compute the approximate exponential value of 2 raised to the power of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-23. [round_note] + +FOR j := 0 to 7 + i := j*64; + IF k[j] THEN + dst[i+63:i] := EXP_2_23_DP(a[i+63:i]); + ELSE + dst[i+63:i] := 0; + FI +ENDFOR; +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + Compute the approximate exponential value of 2 raised to the power of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-23. + +FOR j := 0 to 7 + i := j*64; + IF k[j] THEN + dst[i+63:i] := EXP_2_23_DP(a[i+63:i]); + ELSE + dst[i+63:i] := 0; + FI +ENDFOR; +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + + Compute the approximate reciprocal of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". The maximum relative error for this approximation is less than 2^-28. [round_note] + +dst[63:0] := RCP_28_DP(1.0/b[63:0]; +dst[127:64] := a[127:64]; +dst[MAX:128] := 0; + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + Compute the approximate reciprocal of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". The maximum relative error for this approximation is less than 2^-28. + +dst[63:0] := RCP_28_DP(1.0/b[63:0]; +dst[127:64] := a[127:64]; +dst[MAX:128] := 0; + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + + + + Compute the approximate reciprocal of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". The maximum relative error for this approximation is less than 2^-28. [round_note] + +IF k[0] THEN + dst[63:0] := RCP_28_DP(1.0/b[63:0]; +ELSE + dst[63:0] := src[63:0]; +FI +dst[127:64] := a[127:64]; +dst[MAX:128] := 0; + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + + + Compute the approximate reciprocal of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". The maximum relative error for this approximation is less than 2^-28. + +IF k[0] THEN + dst[63:0] := RCP_28_DP(1.0/b[63:0]; +ELSE + dst[63:0] := src[63:0]; +FI +dst[127:64] := a[127:64]; +dst[MAX:128] := 0; + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + + + Compute the approximate reciprocal of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". The maximum relative error for this approximation is less than 2^-28. [round_note] + +IF k[0] THEN + dst[63:0] := RCP_28_DP(1.0/b[63:0]; +ELSE + dst[63:0] := 0; +FI +dst[127:64] := a[127:64]; +dst[MAX:128] := 0; + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + + Compute the approximate reciprocal of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". The maximum relative error for this approximation is less than 2^-28. + +IF k[0] THEN + dst[63:0] := RCP_28_DP(1.0/b[63:0]; +ELSE + dst[63:0] := 0; +FI +dst[127:64] := a[127:64]; +dst[MAX:128] := 0; + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + + Compute the approximate reciprocal of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst". The maximum relative error for this approximation is less than 2^-28, and copy the upper 3 packed elements from "a" to the upper elements of "dst". [round_note] + +dst[31:0] := RCP_28_DP(1.0/b[31:0]; +dst[127:32] := a[127:32]; +dst[MAX:128] := 0; + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + Compute the approximate reciprocal of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 2^-28. + +dst[31:0] := RCP_28_DP(1.0/b[31:0]; +dst[127:32] := a[127:32]; +dst[MAX:128] := 0; + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + + + + Compute the approximate reciprocal of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 2^-28. [round_note] + +IF k[0] THEN + dst[31:0] := RCP_28_DP(1.0/b[31:0]; +ELSE + dst[31:0] := src[31:0]; +FI +dst[127:32] := a[127:32]; +dst[MAX:128] := 0; + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + + + Compute the approximate reciprocal of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 2^-28. + +IF k[0] THEN + dst[31:0] := RCP_28_DP(1.0/b[31:0]; +ELSE + dst[31:0] := src[31:0]; +FI +dst[127:32] := a[127:32]; +dst[MAX:128] := 0; + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + + + Compute the approximate reciprocal of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 2^-28. [round_note] + +IF k[0] THEN + dst[31:0] := RCP_28_DP(1.0/b[31:0]; +ELSE + dst[31:0] := 0; +FI +dst[127:32] := a[127:32]; +dst[MAX:128] := 0; + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + + Compute the approximate reciprocal of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 2^-28. + +IF k[0] THEN + dst[31:0] := RCP_28_DP(1.0/b[31:0]; +ELSE + dst[31:0] := 0; +FI +dst[127:32] := a[127:32]; +dst[MAX:128] := 0; + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 2^-28. [round_note] + +FOR j := 0 to 15 + i := j*32; + dst[i+31:i] := RCP_28_SP(1.0/a[i+31:i]; +ENDFOR; + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 2^-28. + +FOR j := 0 to 15 + i := j*32; + dst[i+31:i] := RCP_28_SP(1.0/a[i+31:i]; +ENDFOR; + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + + + Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-28. [round_note] + +FOR j := 0 to 15 + i := j*32; + IF k[j] THEN + dst[i+31:i] := RCP_28_SP(1.0/a[i+31:i]; + ELSE + dst[i+31:i] := src[i+31:i]; + FI +ENDFOR; + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + + Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-28. + +FOR j := 0 to 15 + i := j*32; + IF k[j] THEN + dst[i+31:i] := RCP_28_SP(1.0/a[i+31:i]; + ELSE + dst[i+31:i] := src[i+31:i]; + FI +ENDFOR; + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + + Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-28. [round_note] + +FOR j := 0 to 15 + i := j*32; + IF k[j] THEN + dst[i+31:i] := RCP_28_SP(1.0/a[i+31:i]; + ELSE + dst[i+31:i] := 0; + FI +ENDFOR; + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-28. + +FOR j := 0 to 15 + i := j*32; + IF k[j] THEN + dst[i+31:i] := RCP_28_SP(1.0/a[i+31:i]; + ELSE + dst[i+31:i] := 0; + FI +ENDFOR; + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + Compute the approximate reciprocal of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 2^-28. [round_note] + +FOR j := 0 to 7 + i := j*64; + dst[i+63:i] := RCP_28_SP(1.0/a[i+63:i]; +ENDFOR; + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + Compute the approximate reciprocal of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 2^-28. + +FOR j := 0 to 7 + i := j*64; + dst[i+63:i] := RCP_28_SP(1.0/a[i+63:i]; +ENDFOR; + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + + + Compute the approximate reciprocal of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-28. [round_note] + +FOR j := 0 to 7 + i := j*64; + IF k[j] THEN + dst[i+63:i] := RCP_28_SP(1.0/a[i+63:i]; + ELSE + dst[i+63:i] := src[i+63:i]; + FI +ENDFOR; + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + + Compute the approximate reciprocal of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-28. + +FOR j := 0 to 7 + i := j*64; + IF k[j] THEN + dst[i+63:i] := RCP_28_SP(1.0/a[i+63:i]; + ELSE + dst[i+63:i] := src[i+63:i]; + FI +ENDFOR; + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + + Compute the approximate reciprocal of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-28. [round_note] + +FOR j := 0 to 7 + i := j*64; + IF k[j] THEN + dst[i+63:i] := RCP_28_SP(1.0/a[i+63:i]; + ELSE + dst[i+63:i] := 0; + FI +ENDFOR; + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + Compute the approximate reciprocal of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-28. + +FOR j := 0 to 7 + i := j*64; + IF k[j] THEN + dst[i+63:i] := RCP_28_SP(1.0/a[i+63:i]; + ELSE + dst[i+63:i] := 0; + FI +ENDFOR; + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + + Compute the approximate reciprocal square root of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". The maximum relative error for this approximation is less than 2^-28. [round_note] + +dst[63:0] := (1.0/SQRT(b[63:0])); +dst[127:64] := a[127:64]; +dst[MAX:128] := 0; + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + Compute the approximate reciprocal square root of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". The maximum relative error for this approximation is less than 2^-28. + +dst[63:0] := (1.0/SQRT(b[63:0])); +dst[127:64] := a[127:64]; +dst[MAX:128] := 0; + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + + + + Compute the approximate reciprocal square root of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". The maximum relative error for this approximation is less than 2^-28. [round_note] + +IF k[0] THEN + dst[63:0] := (1.0/SQRT(b[63:0])); +ELSE + dst[63:0] := src[63:0]; +FI +dst[127:64] := a[127:64]; +dst[MAX:128] := 0; + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + + + Compute the approximate reciprocal square root of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". The maximum relative error for this approximation is less than 2^-28. + +IF k[0] THEN + dst[63:0] := (1.0/SQRT(b[63:0])); +ELSE + dst[63:0] := src[63:0]; +FI +dst[127:64] := a[127:64]; +dst[MAX:128] := 0; + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + + + Compute the approximate reciprocal square root of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". The maximum relative error for this approximation is less than 2^-28. [round_note] + +IF k[0] THEN + dst[63:0] := (1.0/SQRT(b[63:0])); +ELSE + dst[63:0] := 0; +FI +dst[127:64] := a[127:64]; +dst[MAX:128] := 0; + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + + Compute the approximate reciprocal square root of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". The maximum relative error for this approximation is less than 2^-28. + +IF k[0] THEN + dst[63:0] := (1.0/SQRT(b[63:0])); +ELSE + dst[63:0] := 0; +FI +dst[127:64] := a[127:64]; +dst[MAX:128] := 0; + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + + Compute the approximate reciprocal square root of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 2^-28. [round_note] + +dst[31:0] := (1.0/SQRT(b[31:0])); +dst[127:32] := a[127:32]; +dst[MAX:128] := 0; + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + Compute the approximate reciprocal square root of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 2^-28. + +dst[31:0] := (1.0/SQRT(b[31:0])); +dst[127:32] := a[127:32]; +dst[MAX:128] := 0; + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + + + + Compute the approximate reciprocal square root of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 2^-28. [round_note] + +IF k[0] THEN + dst[31:0] := (1.0/SQRT(b[31:0])); +ELSE + dst[31:0] := src[31:0]; +FI +dst[127:32] := a[127:32]; +dst[MAX:128] := 0; + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + + + Compute the approximate reciprocal square root of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 2^-28. + +IF k[0] THEN + dst[31:0] := (1.0/SQRT(b[31:0])); +ELSE + dst[31:0] := src[31:0]; +FI +dst[127:32] := a[127:32]; +dst[MAX:128] := 0; + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + + + Compute the approximate reciprocal square root of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 2^-28. [round_note] + +IF k[0] THEN + dst[31:0] := (1.0/SQRT(b[31:0])); +ELSE + dst[31:0] := 0; +FI +dst[127:32] := a[127:32]; +dst[MAX:128] := 0; + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + + Compute the approximate reciprocal square root of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 2^-28. + +IF k[0] THEN + dst[31:0] := (1.0/SQRT(b[31:0])); +ELSE + dst[31:0] := 0; +FI +dst[127:32] := a[127:32]; +dst[MAX:128] := 0; + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + Compute the approximate reciprocal square root of packed single-precision (32-bit) floating-point elements in "a", store the results in "dst". The maximum relative error for this approximation is less than 2^-28. [round_note] + +FOR j := 0 to 15 + i := j*32; + dst[i+31:i] := (1.0/SQRT(a[i+31:i])); +ENDFOR; + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + Compute the approximate reciprocal square root of packed single-precision (32-bit) floating-point elements in "a", store the results in "dst". The maximum relative error for this approximation is less than 2^-28. + +FOR j := 0 to 15 + i := j*32; + dst[i+31:i] := (1.0/SQRT(a[i+31:i])); +ENDFOR; + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + + + Compute the approximate reciprocal square root of packed single-precision (32-bit) floating-point elements in "a", store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-28. [round_note] + +FOR j := 0 to 15 + i := j*32; + IF k[j] THEN + dst[i+31:i] := (1.0/SQRT(a[i+31:i])); + ELSE + dst[i+31:i] := src[i+31:i]; + FI +ENDFOR; + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + + Compute the approximate reciprocal square root of packed single-precision (32-bit) floating-point elements in "a", store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-28. + +FOR j := 0 to 15 + i := j*32; + IF k[j] THEN + dst[i+31:i] := (1.0/SQRT(a[i+31:i])); + ELSE + dst[i+31:i] := src[i+31:i]; + FI +ENDFOR; + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + + Compute the approximate reciprocal square root of packed single-precision (32-bit) floating-point elements in "a", store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-28. [round_note] + +FOR j := 0 to 15 + i := j*32; + IF k[j] THEN + dst[i+31:i] := (1.0/SQRT(a[i+31:i])); + ELSE + dst[i+31:i] := 0; + FI +ENDFOR; + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + Compute the approximate reciprocal square root of packed single-precision (32-bit) floating-point elements in "a", store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-28. + +FOR j := 0 to 15 + i := j*32; + IF k[j] THEN + dst[i+31:i] := (1.0/SQRT(a[i+31:i])); + ELSE + dst[i+31:i] := 0; + FI +ENDFOR; + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + Compute the approximate reciprocal square root of packed double-precision (64-bit) floating-point elements in "a", store the results in "dst". The maximum relative error for this approximation is less than 2^-28. [round_note] + +FOR j := 0 to 7 + i := j*64; + dst[i+63:i] := (1.0/SQRT(a[i+63:i])); +ENDFOR; + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + Compute the approximate reciprocal square root of packed double-precision (64-bit) floating-point elements in "a", store the results in "dst". The maximum relative error for this approximation is less than 2^-28. + +FOR j := 0 to 7 + i := j*64; + dst[i+63:i] := (1.0/SQRT(a[i+63:i])); +ENDFOR; + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + + + Compute the approximate reciprocal square root of packed double-precision (64-bit) floating-point elements in "a", store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-28. [round_note] + +FOR j := 0 to 7 + i := j*64; + IF k[j] THEN + dst[i+63:i] := (1.0/SQRT(a[i+63:i])); + ELSE + dst[i+63:i] := src[i+63:i]; + FI +ENDFOR; + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + + Compute the approximate reciprocal square root of packed double-precision (64-bit) floating-point elements in "a", store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-28. + +FOR j := 0 to 7 + i := j*64; + IF k[j] THEN + dst[i+63:i] := (1.0/SQRT(a[i+63:i])); + ELSE + dst[i+63:i] := src[i+63:i]; + FI +ENDFOR; + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + + Compute the approximate reciprocal square root of packed double-precision (64-bit) floating-point elements in "a", store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-28. [round_note] + +FOR j := 0 to 7 + i := j*64; + IF k[j] THEN + dst[i+63:i] := (1.0/SQRT(a[i+63:i])); + ELSE + dst[i+63:i] := 0; + FI +ENDFOR; + + +
immintrin.h
+
+ + Floating Point + AVX512ER + Elementary Math Functions + + + Compute the approximate reciprocal square root of packed double-precision (64-bit) floating-point elements in "a", store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-28. + +FOR j := 0 to 7 + i := j*64; + IF k[j] THEN + dst[i+63:i] := (1.0/SQRT(a[i+63:i])); + ELSE + dst[i+63:i] := 0; + FI +ENDFOR; + + +
immintrin.h
+
+ + Integer + BMI1 + Bit Manipulation + + Count the number of trailing zero bits in unsigned 32-bit integer "a", and return that count in "dst". + +tmp := 0 +dst := 0 +DO WHILE ((tmp < 32) AND a[tmp] = 0) + tmp := tmp + 1 + dst := dst + 1 +OD + + +
immintrin.h
+
+ + Integer + BMI1 + Bit Manipulation + + Count the number of trailing zero bits in unsigned 64-bit integer "a", and return that count in "dst". + +tmp := 0 +dst := 0 +DO WHILE ((tmp < 64) AND a[tmp] = 0) + tmp := tmp + 1 + dst := dst + 1 +OD + + +
immintrin.h
+
+ + Floating Point + KNCNI + Load + + + + + Depending on "bc", loads 1, 4, or 16 elements of type and size determined by "conv" from memory address "mt" and converts all elements to single-precision (32-bit) floating-point elements, storing the results in "dst". "hint" indicates to the processor whether the data is non-temporal. + +addr = MEM[mt] +FOR j := 0 to 15 + i := j*32 + CASE bc OF + _MM_BROADCAST32_NONE: + CASE conv OF + _MM_UPCONV_PS_NONE: + n := j*32 + dst[i+31:i] := addr[n+31:n] + _MM_UPCONV_PS_FLOAT16: + n := j*16 + dst[i+31:i] := Float16ToFloat32(addr[n+15:n]) + _MM_UPCONV_PS_UINT8: + n := j*8 + dst[i+31:i] := UInt8ToFloat32(addr[n+7:n]) + _MM_UPCONV_PS_SINT8: + n := j*8 + dst[i+31:i] := SInt8ToFloat32(addr[n+7:n]) + _MM_UPCONV_PS_UINT16: + n := j*16 + dst[i+31:i] := UInt16ToFloat32(addr[n+15:n]) + _MM_UPCONV_PS_SINT16: + n := j*16 + dst[i+31:i] := SInt16ToFloat32(addr[n+15:n]) + ESAC + _MM_BROADCAST_1X16: + CASE conv OF + _MM_UPCONV_PS_NONE: + n := j*32 + dst[i+31:i] := addr[31:0] + _MM_UPCONV_PS_FLOAT16: + n := j*16 + dst[i+31:i] := Float16ToFloat32(addr[15:0]) + _MM_UPCONV_PS_UINT8: + n := j*8 + dst[i+31:i] := UInt8ToFloat32(addr[7:0]) + _MM_UPCONV_PS_SINT8: + n := j*8 + dst[i+31:i] := SInt8ToFloat32(addr[7:0]) + _MM_UPCONV_PS_UINT16: + n := j*16 + dst[i+31:i] := UInt16ToFloat32(addr[15:0]) + _MM_UPCONV_PS_SINT16: + n := j*16 + dst[i+31:i] := SInt16ToFloat32(addr[15:0]) + ESAC + _MM_BROADCAST_4X16: + mod := j%4 + CASE conv OF + _MM_UPCONV_PS_NONE: + n := mod*32 + dst[i+31:i] := addr[n+31:n] + _MM_UPCONV_PS_FLOAT16: + n := mod*16 + dst[i+31:i] := Float16ToFloat32(addr[n+15:n]) + _MM_UPCONV_PS_UINT8: + n := mod*8 + dst[i+31:i] := UInt8ToFloat32(addr[n+7:n]) + _MM_UPCONV_PS_SINT8: + n := mod*8 + dst[i+31:i] := SInt8ToFloat32(addr[n+7:n]) + _MM_UPCONV_PS_UINT16: + n := mod*16 + dst[i+31:i] := UInt16ToFloat32(addr[n+15:n]) + _MM_UPCONV_PS_SINT16: + n := mod*16 + dst[i+31:i] := SInt16ToFloat32(addr[n+15:n]) + ESAC + ESAC +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + KNCNI + Load + + + + + + + Depending on "bc", loads 1, 4, or 16 elements of type and size determined by "conv" from memory address "mt" and converts all elements to single-precision (32-bit) floating-point elements, storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "hint" indicates to the processor whether the data is non-temporal. + +addr = MEM[mt] +FOR j := 0 to 15 + i := j*32 + IF k[j] + CASE bc OF + _MM_BROADCAST32_NONE: + CASE conv OF + _MM_UPCONV_PS_NONE: + n := j*32 + dst[i+31:i] := addr[n+31:n] + _MM_UPCONV_PS_FLOAT16: + n := j*16 + dst[i+31:i] := Float16ToFloat32(addr[n+15:n]) + _MM_UPCONV_PS_UINT8: + n := j*8 + dst[i+31:i] := UInt8ToFloat32(addr[n+7:n]) + _MM_UPCONV_PS_SINT8: + n := j*8 + dst[i+31:i] := SInt8ToFloat32(addr[n+7:n]) + _MM_UPCONV_PS_UINT16: + n := j*16 + dst[i+31:i] := UInt16ToFloat32(addr[n+15:n]) + _MM_UPCONV_PS_SINT16: + n := j*16 + dst[i+31:i] := SInt16ToFloat32(addr[n+15:n]) + ESAC + _MM_BROADCAST_1X16: + CASE conv OF + _MM_UPCONV_PS_NONE: + n := j*32 + dst[i+31:i] := addr[31:0] + _MM_UPCONV_PS_FLOAT16: + n := j*16 + dst[i+31:i] := Float16ToFloat32(addr[15:0]) + _MM_UPCONV_PS_UINT8: + n := j*8 + dst[i+31:i] := UInt8ToFloat32(addr[7:0]) + _MM_UPCONV_PS_SINT8: + n := j*8 + dst[i+31:i] := SInt8ToFloat32(addr[7:0]) + _MM_UPCONV_PS_UINT16: + n := j*16 + dst[i+31:i] := UInt16ToFloat32(addr[15:0]) + _MM_UPCONV_PS_SINT16: + n := j*16 + dst[i+31:i] := SInt16ToFloat32(addr[15:0]) + ESAC + _MM_BROADCAST_4X16: + mod := j%4 + CASE conv OF + _MM_UPCONV_PS_NONE: + n := mod*32 + dst[i+31:i] := addr[n+31:n] + _MM_UPCONV_PS_FLOAT16: + n := mod*16 + dst[i+31:i] := Float16ToFloat32(addr[n+15:n]) + _MM_UPCONV_PS_UINT8: + n := mod*8 + dst[i+31:i] := UInt8ToFloat32(addr[n+7:n]) + _MM_UPCONV_PS_SINT8: + n := mod*8 + dst[i+31:i] := SInt8ToFloat32(addr[n+7:n]) + _MM_UPCONV_PS_UINT16: + n := mod*16 + dst[i+31:i] := UInt16ToFloat32(addr[n+15:n]) + _MM_UPCONV_PS_SINT16: + n := mod*16 + dst[i+31:i] := SInt16ToFloat32(addr[n+15:n]) + ESAC + ESAC + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Integer + KNCNI + Load + + + + + Depending on "bc", loads 1, 4, or 16 elements of type and size determined by "conv" from memory address "mt" and converts all elements to 32-bit integer elements, storing the results in "dst". "hint" indicates to the processor whether the data is non-temporal. + +addr = MEM[mt] +FOR j := 0 to 15 + i := j*32 + CASE bc OF + _MM_BROADCAST32_NONE: + CASE conv OF + _MM_UPCONV_EPI32_NONE: + n := j*32 + dst[i+31:i] := addr[n+31:n] + _MM_UPCONV_EPI32_UINT8: + n := j*8 + dst[i+31:i] := UInt8ToInt32(addr[n+7:n]) + _MM_UPCONV_EPI32_SINT8: + n := j*8 + dst[i+31:i] := SInt8ToInt32(addr[n+7:n]) + _MM_UPCONV_EPI32_UINT16: + n := j*16 + dst[i+31:i] := UInt16ToInt32(addr[n+15:n]) + _MM_UPCONV_EPI32_SINT16: + n := j*16 + dst[i+31:i] := SInt16ToInt32(addr[n+15:n]) + ESAC + _MM_BROADCAST_1X16: + CASE conv OF + _MM_UPCONV_EPI32_NONE: + n := j*32 + dst[i+31:i] := addr[31:0] + _MM_UPCONV_EPI32_UINT8: + n := j*8 + dst[i+31:i] := UInt8ToInt32(addr[7:0]) + _MM_UPCONV_EPI32_SINT8: + n := j*8 + dst[i+31:i] := SInt8ToInt32(addr[7:0]) + _MM_UPCONV_EPI32_UINT16: + n := j*16 + dst[i+31:i] := UInt16ToInt32(addr[15:0]) + _MM_UPCONV_EPI32_SINT16: + n := j*16 + dst[i+31:i] := SInt16ToInt32(addr[15:0]) + ESAC + _MM_BROADCAST_4X16: + mod := j%4 + CASE conv OF + _MM_UPCONV_EPI32_NONE: + n := mod*32 + dst[i+31:i] := addr[n+31:n] + _MM_UPCONV_EPI32_UINT8: + n := mod*8 + dst[i+31:i] := UInt8ToInt32(addr[n+7:n]) + _MM_UPCONV_EPI32_SINT8: + n := mod*8 + dst[i+31:i] := SInt8ToInt32(addr[n+7:n]) + _MM_UPCONV_EPI32_UINT16: + n := mod*16 + dst[i+31:i] := UInt16ToInt32(addr[n+15:n]) + _MM_UPCONV_EPI32_SINT16: + n := mod*16 + dst[i+31:i] := SInt16ToInt32(addr[n+15:n]) + ESAC + ESAC +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Integer + KNCNI + Load + + + + + + + Depending on "bc", loads 1, 4, or 16 elements of type and size determined by "conv" from memory address "mt" and converts all elements to 32-bit integer elements, storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "hint" indicates to the processor whether the data is non-temporal. + +addr = MEM[mt] +FOR j := 0 to 15 + i := j*32 + IF k[j] + CASE bc OF + _MM_BROADCAST32_NONE: + CASE conv OF + _MM_UPCONV_EPI32_NONE: + n := j*32 + dst[i+31:i] := addr[n+31:n] + _MM_UPCONV_EPI32_UINT8: + n := j*8 + dst[i+31:i] := UInt8ToInt32(addr[n+7:n]) + _MM_UPCONV_EPI32_SINT8: + n := j*8 + dst[i+31:i] := SInt8ToInt32(addr[n+7:n]) + _MM_UPCONV_EPI32_UINT16: + n := j*16 + dst[i+31:i] := UInt16ToInt32(addr[n+15:n]) + _MM_UPCONV_EPI32_SINT16: + n := j*16 + dst[i+31:i] := SInt16ToInt32(addr[n+15:n]) + ESAC + _MM_BROADCAST_1X16: + CASE conv OF + _MM_UPCONV_EPI32_NONE: + n := j*32 + dst[i+31:i] := addr[31:0] + _MM_UPCONV_EPI32_UINT8: + n := j*8 + dst[i+31:i] := UInt8ToInt32(addr[7:0]) + _MM_UPCONV_EPI32_SINT8: + n := j*8 + dst[i+31:i] := SInt8ToInt32(addr[7:0]) + _MM_UPCONV_EPI32_UINT16: + n := j*16 + dst[i+31:i] := UInt16ToInt32(addr[15:0]) + _MM_UPCONV_EPI32_SINT16: + n := j*16 + dst[i+31:i] := SInt16ToInt32(addr[15:0]) + ESAC + _MM_BROADCAST_4X16: + mod := j%4 + CASE conv OF + _MM_UPCONV_EPI32_NONE: + n := mod*32 + dst[i+31:i] := addr[n+31:n] + _MM_UPCONV_EPI32_UINT8: + n := mod*8 + dst[i+31:i] := UInt8ToInt32(addr[n+7:n]) + _MM_UPCONV_EPI32_SINT8: + n := mod*8 + dst[i+31:i] := SInt8ToInt32(addr[n+7:n]) + _MM_UPCONV_EPI32_UINT16: + n := mod*16 + dst[i+31:i] := UInt16ToInt32(addr[n+15:n]) + _MM_UPCONV_EPI32_SINT16: + n := mod*16 + dst[i+31:i] := SInt16ToInt32(addr[n+15:n]) + ESAC + ESAC + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + KNCNI + Load + + + + + Depending on "bc", loads 1, 4, or 8 elements of type and size determined by "conv" from memory address "mt" and converts all elements to double-precision (64-bit) floating-point elements, storing the results in "dst". "hint" indicates to the processor whether the data is non-temporal. + +addr = MEM[mt] +FOR j := 0 to 7 + i := j*64 + CASE bc OF + _MM_BROADCAST64_NONE: + CASE conv OF + _MM_UPCONV_PD_NONE: + n := j*64 + dst[i+63:i] := addr[n+63:n] + ESAC + _MM_BROADCAST_1X8: + CASE conv OF + _MM_UPCONV_PD_NONE: + n := j*64 + dst[i+63:i] := addr[63:0] + ESAC + _MM_BROADCAST_4X8: + mod := j%4 + CASE conv OF + _MM_UPCONV_PD_NONE: + n := mod*64 + dst[i+63:i] := addr[n+63:n] + ESAC + ESAC +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + KNCNI + Load + + + + + + + Depending on "bc", loads 1, 4, or 8 elements of type and size determined by "conv" from memory address "mt" and converts all elements to double-precision (64-bit) floating-point elements, storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "hint" indicates to the processor whether the data is non-temporal. + +addr = MEM[mt] +FOR j := 0 to 7 + i := j*64 + IF k[j] + CASE bc OF + _MM_BROADCAST64_NONE: + CASE conv OF + _MM_UPCONV_PD_NONE: + n := j*64 + dst[i+63:i] := addr[n+63:n] + ESAC + _MM_BROADCAST_1X8: + CASE conv OF + _MM_UPCONV_PD_NONE: + n := j*64 + dst[i+63:i] := addr[63:0] + ESAC + _MM_BROADCAST_4X8: + mod := j%4 + CASE conv OF + _MM_UPCONV_PD_NONE: + n := mod*64 + dst[i+63:i] := addr[n+63:n] + ESAC + ESAC + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Integer + KNCNI + Load + + + + + Depending on "bc", loads 1, 4, or 8 elements of type and size determined by "conv" from memory address "mt" and converts all elements to 64-bit integer elements, storing the results in "dst". "hint" indicates to the processor whether the data is non-temporal. + +addr = MEM[mt] +FOR j := 0 to 7 + i := j*64 + CASE bc OF + _MM_BROADCAST64_NONE: + CASE conv OF + _MM_UPCONV_EPI64_NONE: + n := j*64 + dst[i+63:i] := addr[n+63:n] + ESAC + _MM_BROADCAST_1X8: + CASE conv OF + _MM_UPCONV_EPI64_NONE: + n := j*64 + dst[i+63:i] := addr[63:0] + ESAC + _MM_BROADCAST_4X8: + mod := j%4 + CASE conv OF + _MM_UPCONV_EPI64_NONE: + n := mod*64 + dst[i+63:i] := addr[n+63:n] + ESAC + ESAC +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Integer + KNCNI + Load + + + + + + + Depending on "bc", loads 1, 4, or 8 elements of type and size determined by "conv" from memory address "mt" and converts all elements to 64-bit integer elements, storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "hint" indicates to the processor whether the data is non-temporal. + +addr = MEM[mt] +FOR j := 0 to 7 + i := j*64 + IF k[j] + CASE bc OF + _MM_BROADCAST64_NONE: + CASE conv OF + _MM_UPCONV_EPI64_NONE: + n := j*64 + dst[i+63:i] := addr[n+63:n] + ESAC + _MM_BROADCAST_1X8: + CASE conv OF + _MM_UPCONV_EPI64_NONE: + n := j*64 + dst[i+63:i] := addr[63:0] + ESAC + _MM_BROADCAST_4X8: + mod := j%4 + CASE conv OF + _MM_UPCONV_EPI64_NONE: + n := mod*64 + dst[i+63:i] := addr[n+63:n] + ESAC + ESAC + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + +
immintrin.h
+
+ + Floating Point + KNCNI + Swizzle + + + Performs a swizzle transformation of each of the four groups of packed 4xsingle-precision (32-bit) floating-point elements in "v" using swizzle parameter "s", storing the results in "dst". + +CASE s OF +_MM_SWIZ_REG_NONE: + dst[511:0] := v[511:0] +_MM_SWIZ_REG_DCBA: + dst[511:0] := v[511:0] +_MM_SWIZ_REG_CDAB: + FOR j := 0 to 7 + i := j*64 + dst[i+31:i] := v[i+63:i+32] + dst[i+63:i+32] := v[i+31:i] + ENDFOR +_MM_SWIZ_REG_BADC: + FOR j := 0 to 3 + i := j*128 + dst[i+31:i] := v[i+95:i+64] + dst[i+63:i+32] := v[i+127:i+96] + dst[i+95:i+64] := v[i+31:i] + dst[i+127:i+96] := v[i+63:i+32] + ENDFOR +_MM_SWIZ_REG_AAAA: + FOR j := 0 to 3 + i := j*128 + dst[i+31:i] := v[i+31:i] + dst[i+63:i+32] := v[i+31:i] + dst[i+95:i+64] := v[i+31:i] + dst[i+127:i+96] := v[i+31:i] + ENDFOR +_MM_SWIZ_REG_BBBB: + FOR j := 0 to 3 + i := j*128 + dst[i+31:i] := v[i+63:i+32] + dst[i+63:i+32] := v[i+63:i+32] + dst[i+95:i+64] := v[i+63:i+32] + dst[i+127:i+96] := v[i+63:i+32] + ENDFOR +_MM_SWIZ_REG_CCCC: + FOR j := 0 to 3 + i := j*128 + dst[i+31:i] := v[i+95:i+64] + dst[i+63:i+32] := v[i+95:i+64] + dst[i+95:i+64] := v[i+95:i+64] + dst[i+127:i+96] := v[i+95:i+64] + ENDFOR +_MM_SWIZ_REG_DDDD: + FOR j := 0 to 3 + i := j*128 + dst[i+31:i] := v[i+127:i+96] + dst[i+63:i+32] := v[i+127:i+96] + dst[i+95:i+64] := v[i+127:i+96] + dst[i+127:i+96] := v[i+127:i+96] + ENDFOR +_MM_SWIZ_REG_DACB: + FOR j := 0 to 3 + i := j*128 + dst[i+31:i] := v[i+63:i+32] + dst[i+63:i+32] := v[i+95:i+64] + dst[i+95:i+64] := v[i+31:i] + dst[i+127:i+96] := v[i+127:i+96] + ENDFOR +ESAC +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + KNCNI + Swizzle + + + Performs a swizzle transformation of each of the two groups of packed 4x double-precision (64-bit) floating-point elements in "v" using swizzle parameter "s", storing the results in "dst". + +CASE s OF +_MM_SWIZ_REG_NONE: + dst[511:0] := v[511:0] +_MM_SWIZ_REG_DCBA: + dst[511:0] := v[511:0] +_MM_SWIZ_REG_CDAB: + FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := v[i+127:i+64] + dst[i+127:i+64] := v[i+63:i] + ENDFOR +_MM_SWIZ_REG_BADC: + FOR j := 0 to 1 + i := j*256 + dst[i+63:i] := v[i+191:i+128] + dst[i+127:i+64] := v[i+255:i+192] + dst[i+191:i+128] := v[i+63:i] + dst[i+255:i+192] := v[i+127:i+64] + ENDFOR +_MM_SWIZ_REG_AAAA: + FOR j := 0 to 1 + i := j*256 + dst[i+63:i] := v[i+63:i] + dst[i+127:i+64] := v[i+63:i] + dst[i+191:i+128] := v[i+63:i] + dst[i+255:i+192] := v[i+63:i] + ENDFOR +_MM_SWIZ_REG_BBBB: + FOR j := 0 to 1 + i := j*256 + dst[i+63:i] := v[i+127:i+63] + dst[i+127:i+64] := v[i+127:i+63] + dst[i+191:i+128] := v[i+127:i+63] + dst[i+255:i+192] := v[i+127:i+63] + ENDFOR +_MM_SWIZ_REG_CCCC: + FOR j := 0 to 1 + i := j*256 + dst[i+63:i] := v[i+191:i+128] + dst[i+127:i+64] := v[i+191:i+128] + dst[i+191:i+128] := v[i+191:i+128] + dst[i+255:i+192] := v[i+191:i+128] + ENDFOR +_MM_SWIZ_REG_DDDD: + FOR j := 0 to 1 + i := j*256 + dst[i+63:i] := v[i+255:i+192] + dst[i+127:i+64] := v[i+255:i+192] + dst[i+191:i+128] := v[i+255:i+192] + dst[i+255:i+192] := v[i+255:i+192] + ENDFOR +_MM_SWIZ_REG_DACB: + FOR j := 0 to 1 + i := j*256 + dst[i+63:i] := v[i+127:i+64] + dst[i+127:i+64] := v[i+191:i+128] + dst[i+191:i+128] := v[i+63:i] + dst[i+255:i+192] := v[i+255:i+192] + ENDFOR +ESAC +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Integer + KNCNI + Swizzle + + + Performs a swizzle transformation of each of the four groups of packed 4x 32-bit integer elements in "v" using swizzle parameter "s", storing the results in "dst". + +CASE s OF +_MM_SWIZ_REG_NONE: + dst[511:0] := v[511:0] +_MM_SWIZ_REG_DCBA: + dst[511:0] := v[511:0] +_MM_SWIZ_REG_CDAB: + FOR j := 0 to 7 + i := j*64 + dst[i+31:i] := v[i+63:i+32] + dst[i+63:i+32] := v[i+31:i] + ENDFOR +_MM_SWIZ_REG_BADC: + FOR j := 0 to 3 + i := j*128 + dst[i+31:i] := v[i+95:i+64] + dst[i+63:i+32] := v[i+127:i+96] + dst[i+95:i+64] := v[i+31:i] + dst[i+127:i+96] := v[i+63:i+32] + ENDFOR +_MM_SWIZ_REG_AAAA: + FOR j := 0 to 3 + i := j*128 + dst[i+31:i] := v[i+31:i] + dst[i+63:i+32] := v[i+31:i] + dst[i+95:i+64] := v[i+31:i] + dst[i+127:i+96] := v[i+31:i] + ENDFOR +_MM_SWIZ_REG_BBBB: + FOR j := 0 to 3 + i := j*128 + dst[i+31:i] := v[i+63:i+32] + dst[i+63:i+32] := v[i+63:i+32] + dst[i+95:i+64] := v[i+63:i+32] + dst[i+127:i+96] := v[i+63:i+32] + ENDFOR +_MM_SWIZ_REG_CCCC: + FOR j := 0 to 3 + i := j*128 + dst[i+31:i] := v[i+95:i+64] + dst[i+63:i+32] := v[i+95:i+64] + dst[i+95:i+64] := v[i+95:i+64] + dst[i+127:i+96] := v[i+95:i+64] + ENDFOR +_MM_SWIZ_REG_DDDD: + FOR j := 0 to 3 + i := j*128 + dst[i+31:i] := v[i+127:i+96] + dst[i+63:i+32] := v[i+127:i+96] + dst[i+95:i+64] := v[i+127:i+96] + dst[i+127:i+96] := v[i+127:i+96] + ENDFOR +_MM_SWIZ_REG_DACB: + FOR j := 0 to 3 + i := j*128 + dst[i+31:i] := v[i+63:i+32] + dst[i+63:i+32] := v[i+95:i+64] + dst[i+95:i+64] := v[i+31:i] + dst[i+127:i+96] := v[i+127:i+96] + ENDFOR +ESAC +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Integer + KNCNI + Swizzle + + + Performs a swizzle transformation of each of the two groups of packed 4x64-bit integer elements in "v" using swizzle parameter "s", storing the results in "dst". + +CASE s OF +_MM_SWIZ_REG_NONE: + dst[511:0] := v[511:0] +_MM_SWIZ_REG_DCBA: + dst[511:0] := v[511:0] +_MM_SWIZ_REG_CDAB: + FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := v[i+127:i+64] + dst[i+127:i+64] := v[i+63:i] + ENDFOR +_MM_SWIZ_REG_BADC: + FOR j := 0 to 1 + i := j*256 + dst[i+63:i] := v[i+191:i+128] + dst[i+127:i+64] := v[i+255:i+192] + dst[i+191:i+128] := v[i+63:i] + dst[i+255:i+192] := v[i+127:i+64] + ENDFOR +_MM_SWIZ_REG_AAAA: + FOR j := 0 to 1 + i := j*256 + dst[i+63:i] := v[i+63:i] + dst[i+127:i+64] := v[i+63:i] + dst[i+191:i+128] := v[i+63:i] + dst[i+255:i+192] := v[i+63:i] + ENDFOR +_MM_SWIZ_REG_BBBB: + FOR j := 0 to 1 + i := j*256 + dst[i+63:i] := v[i+127:i+63] + dst[i+127:i+64] := v[i+127:i+63] + dst[i+191:i+128] := v[i+127:i+63] + dst[i+255:i+192] := v[i+127:i+63] + ENDFOR +_MM_SWIZ_REG_CCCC: + FOR j := 0 to 1 + i := j*256 + dst[i+63:i] := v[i+191:i+128] + dst[i+127:i+64] := v[i+191:i+128] + dst[i+191:i+128] := v[i+191:i+128] + dst[i+255:i+192] := v[i+191:i+128] + ENDFOR +_MM_SWIZ_REG_DDDD: + FOR j := 0 to 1 + i := j*256 + dst[i+63:i] := v[i+255:i+192] + dst[i+127:i+64] := v[i+255:i+192] + dst[i+191:i+128] := v[i+255:i+192] + dst[i+255:i+192] := v[i+255:i+192] + ENDFOR +_MM_SWIZ_REG_DACB: + FOR j := 0 to 1 + i := j*256 + dst[i+63:i] := v[i+127:i+64] + dst[i+127:i+64] := v[i+191:i+128] + dst[i+191:i+128] := v[i+63:i] + dst[i+255:i+192] := v[i+255:i+192] + ENDFOR +ESAC +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + KNCNI + Swizzle + + + + + Performs a swizzle transformation of each of the four groups of packed 4x single-precision (32-bit) floating-point elements in "v" using swizzle parameter "s", storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +CASE s OF +_MM_SWIZ_REG_NONE: + dst[511:0] := v[511:0] +_MM_SWIZ_REG_DCBA: + dst[511:0] := v[511:0] +_MM_SWIZ_REG_CDAB: + FOR j := 0 to 7 + i := j*64 + IF k[j*2] + dst[i+31:i] := v[i+63:i+32] + ELSE + dst[i+31:i] := src[i+31:i] + FI + IF k[j*2+1] + dst[i+63:i+32] := v[i+31:i] + ELSE + dst[i+63:i+32] := src[i+63:i+32] + FI + ENDFOR +_MM_SWIZ_REG_BADC: + FOR j := 0 to 3 + i := j*128 + IF k[j*4] + dst[i+31:i] := v[i+95:i+64] + ELSE + dst[i+31:i] := src[i+31:i] + FI + IF k[j*4+1] + dst[i+63:i+32] := v[i+127:i+96] + ELSE + dst[i+63:i+32] := src[i+63:i+32] + FI + IF k[j*4+2] + dst[i+95:i+64] := v[i+31:i] + ELSE + dst[i+95:i+64] := src[i+95:i+64] + FI + IF k[j*4+3] + dst[i+127:i+96] := v[i+63:i+32] + ELSE + dst[i+127:i+96] := src[i+127:i+96] + FI + ENDFOR +_MM_SWIZ_REG_AAAA: + FOR j := 0 to 3 + i := j*128 + IF k[j*4] + dst[i+31:i] := v[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI + IF k[j*4+1] + dst[i+63:i+32] := v[i+31:i] + ELSE + dst[i+63:i+32] := src[i+63:i+32] + FI + IF k[j*4+2] + dst[i+95:i+64] := v[i+31:i] + ELSE + dst[i+95:i+64] := src[i+95:i+64] + FI + IF k[j*4+3] + dst[i+127:i+96] := v[i+31:i] + ELSE + dst[i+127:i+96] := src[i+127:i+96] + FI + ENDFOR +_MM_SWIZ_REG_BBBB: + FOR j := 0 to 3 + i := j*128 + IF k[j*4] + dst[i+31:i] := v[i+63:i+32] + ELSE + dst[i+31:i] := src[i+31:i] + FI + IF k[j*4+1] + dst[i+63:i+32] := v[i+63:i+32] + ELSE + dst[i+63:i+32] := src[i+63:i+32] + FI + IF k[j*4+2] + dst[i+95:i+64] := v[i+63:i+32] + ELSE + dst[i+95:i+64] := src[i+95:i+64] + FI + IF k[j*4+3] + dst[i+127:i+96] := v[i+63:i+32] + ELSE + dst[i+127:i+96] := src[i+127:i+96] + FI + ENDFOR +_MM_SWIZ_REG_CCCC: + FOR j := 0 to 3 + i := j*128 + IF k[j*4] + dst[i+31:i] := v[i+95:i+64] + ELSE + dst[i+31:i] := src[i+31:i] + FI + IF k[j*4+1] + dst[i+63:i+32] := v[i+95:i+64] + ELSE + dst[i+63:i+32] := src[i+63:i+32] + FI + IF k[j*4+2] + dst[i+95:i+64] := v[i+95:i+64] + ELSE + dst[i+95:i+64] := src[i+95:i+64] + FI + IF k[j*4+3] + dst[i+127:i+96] := v[i+95:i+64] + ELSE + dst[i+127:i+96] := src[i+127:i+96] + FI + ENDFOR +_MM_SWIZ_REG_DDDD: + FOR j := 0 to 3 + i := j*128 + IF k[j*4] + dst[i+31:i] := v[i+127:i+96] + ELSE + dst[i+31:i] := src[i+31:i] + FI + IF k[j*4+1] + dst[i+63:i+32] := v[i+127:i+96] + ELSE + dst[i+63:i+32] := src[i+63:i+32] + FI + IF k[j*4+2] + dst[i+95:i+64] := v[i+127:i+96] + ELSE + dst[i+95:i+64] := src[i+95:i+64] + FI + IF k[j*4+3] + dst[i+127:i+96] := v[i+127:i+96] + ELSE + dst[i+127:i+96] := src[i+127:i+96] + FI + ENDFOR +_MM_SWIZ_REG_DACB: + FOR j := 0 to 3 + i := j*128 + IF k[j*4] + dst[i+31:i] := v[i+63:i+32] + ELSE + dst[i+31:i] := src[i+31:i] + FI + IF k[j*4+1] + dst[i+63:i+32] := v[i+95:i+64] + ELSE + dst[i+63:i+32] := src[i+63:i+32] + FI + IF k[j*4+2] + dst[i+95:i+64] := v[i+31:i] + ELSE + dst[i+95:i+64] := src[i+95:i+64] + FI + IF k[j*4+3] + dst[i+127:i+96] := v[i+127:i+96] + ELSE + dst[i+127:i+96] := src[i+127:i+96] + FI + ENDFOR +ESAC +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + KNCNI + Swizzle + + + + + Performs a swizzle transformation of each of the two groups of packed 4x double-precision (64-bit) floating-point elements in "v" using swizzle parameter "s", storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +CASE s OF +_MM_SWIZ_REG_NONE: + dst[511:0] := v[511:0] +_MM_SWIZ_REG_DCBA: + dst[511:0] := v[511:0] +_MM_SWIZ_REG_CDAB: + FOR j := 0 to 3 + i := j*64 + IF k[j*2] + dst[i+63:i] := v[i+127:i+64] + ELSE + dst[i+63:i] := src[i+63:i] + FI + IF k[j*2+1] + dst[i+127:i+64] := v[i+63:i] + ELSE + dst[i+127:i+64] := src[i+127:i+64] + FI + ENDFOR +_MM_SWIZ_REG_BADC: + FOR j := 0 to 1 + i := j*256 + IF k[j*4] + dst[i+63:i] := v[i+191:i+128] + ELSE + dst[i+63:i] := src[i+63:i] + FI + IF k[j*4+1] + dst[i+127:i+64] := v[i+255:i+192] + ELSE + dst[i+127:i+64] := src[i+127:i+64] + FI + IF k[j*4+2] + dst[i+191:i+128] := v[i+63:i] + ELSE + dst[i+191:i+128] := src[i+191:i+128] + FI + IF k[j*4+3] + dst[i+255:i+192] := v[i+127:i+64] + ELSE + dst[i+255:i+192] := src[i+255:i+192] + FI + ENDFOR +_MM_SWIZ_REG_AAAA: + FOR j := 0 to 1 + i := j*256 + IF k[j*4] + dst[i+63:i] := v[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI + IF k[j*4+1] + dst[i+127:i+64] := v[i+63:i] + ELSE + dst[i+127:i+64] := src[i+127:i+64] + FI + IF k[j*4+2] + dst[i+191:i+128] := v[i+63:i] + ELSE + dst[i+191:i+128] := src[i+191:i+128] + FI + IF k[j*4+3] + dst[i+255:i+192] := v[i+63:i] + ELSE + dst[i+255:i+192] := src[i+255:i+192] + FI + ENDFOR +_MM_SWIZ_REG_BBBB: + FOR j := 0 to 1 + i := j*256 + IF k[j*4] + dst[i+63:i] := v[i+127:i+63] + ELSE + dst[i+63:i] := src[i+63:i] + FI + IF k[j*4+1] + dst[i+127:i+64] := v[i+127:i+63] + ELSE + dst[i+127:i+64] := src[i+127:i+64] + FI + IF k[j*4+2] + dst[i+191:i+128] := v[i+127:i+63] + ELSE + dst[i+191:i+128] := src[i+191:i+128] + FI + IF k[j*4+3] + dst[i+255:i+192] := v[i+127:i+63] + ELSE + dst[i+255:i+192] := src[i+255:i+192] + FI + ENDFOR +_MM_SWIZ_REG_CCCC: + FOR j := 0 to 1 + i := j*256 + IF k[j*4] + dst[i+63:i] := v[i+191:i+128] + ELSE + dst[i+63:i] := src[i+63:i] + FI + IF k[j*4+1] + dst[i+127:i+64] := v[i+191:i+128] + ELSE + dst[i+127:i+64] := src[i+127:i+64] + FI + IF k[j*4+2] + dst[i+191:i+128] := v[i+191:i+128] + ELSE + dst[i+191:i+128] := src[i+191:i+128] + FI + IF k[j*4+3] + dst[i+255:i+192] := v[i+191:i+128] + ELSE + dst[i+255:i+192] := src[i+255:i+192] + FI + ENDFOR +_MM_SWIZ_REG_DDDD: + FOR j := 0 to 1 + i := j*256 + IF k[j*4] + dst[i+63:i] := v[i+255:i+192] + ELSE + dst[i+63:i] := src[i+63:i] + FI + IF k[j*4+1] + dst[i+127:i+64] := v[i+255:i+192] + ELSE + dst[i+127:i+64] := src[i+127:i+64] + FI + IF k[j*4+2] + dst[i+191:i+128] := v[i+255:i+192] + ELSE + dst[i+191:i+128] := src[i+191:i+128] + FI + IF k[j*4+3] + dst[i+255:i+192] := v[i+255:i+192] + ELSE + dst[i+255:i+192] := src[i+255:i+192] + FI + ENDFOR +_MM_SWIZ_REG_DACB: + FOR j := 0 to 1 + i := j*256 + IF k[j*4] + dst[i+63:i] := v[i+127:i+64] + ELSE + dst[i+63:i] := src[i+63:i] + FI + IF k[j*4+1] + dst[i+127:i+64] := v[i+191:i+128] + ELSE + dst[i+127:i+64] := src[i+127:i+64] + FI + IF k[j*4+2] + dst[i+191:i+128] := v[i+63:i] + ELSE + dst[i+191:i+128] := src[i+191:i+128] + FI + IF k[j*4+3] + dst[i+255:i+192] := v[i+255:i+192] + ELSE + dst[i+255:i+192] := src[i+255:i+192] + FI + ENDFOR +ESAC +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Integer + KNCNI + Swizzle + + + + + Performs a swizzle transformation of each of the four groups of packed 4x32-bit integer elements in "v" using swizzle parameter "s", storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +CASE s OF +_MM_SWIZ_REG_NONE: + dst[511:0] := v[511:0] +_MM_SWIZ_REG_DCBA: + dst[511:0] := v[511:0] +_MM_SWIZ_REG_CDAB: + FOR j := 0 to 7 + i := j*64 + IF k[j*2] + dst[i+31:i] := v[i+63:i+32] + ELSE + dst[i+31:i] := src[i+31:i] + FI + IF k[j*2+1] + dst[i+63:i+32] := v[i+31:i] + ELSE + dst[i+63:i+32] := src[i+63:i+32] + FI + ENDFOR +_MM_SWIZ_REG_BADC: + FOR j := 0 to 3 + i := j*128 + IF k[j*4] + dst[i+31:i] := v[i+95:i+64] + ELSE + dst[i+31:i] := src[i+31:i] + FI + IF k[j*4+1] + dst[i+63:i+32] := v[i+127:i+96] + ELSE + dst[i+63:i+32] := src[i+63:i+32] + FI + IF k[j*4+2] + dst[i+95:i+64] := v[i+31:i] + ELSE + dst[i+95:i+64] := src[i+95:i+64] + FI + IF k[j*4+3] + dst[i+127:i+96] := v[i+63:i+32] + ELSE + dst[i+127:i+96] := src[i+127:i+96] + FI + ENDFOR +_MM_SWIZ_REG_AAAA: + FOR j := 0 to 3 + i := j*128 + IF k[j*4] + dst[i+31:i] := v[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI + IF k[j*4+1] + dst[i+63:i+32] := v[i+31:i] + ELSE + dst[i+63:i+32] := src[i+63:i+32] + FI + IF k[j*4+2] + dst[i+95:i+64] := v[i+31:i] + ELSE + dst[i+95:i+64] := src[i+95:i+64] + FI + IF k[j*4+3] + dst[i+127:i+96] := v[i+31:i] + ELSE + dst[i+127:i+96] := src[i+127:i+96] + FI + ENDFOR +_MM_SWIZ_REG_BBBB: + FOR j := 0 to 3 + i := j*128 + IF k[j*4] + dst[i+31:i] := v[i+63:i+32] + ELSE + dst[i+31:i] := src[i+31:i] + FI + IF k[j*4+1] + dst[i+63:i+32] := v[i+63:i+32] + ELSE + dst[i+63:i+32] := src[i+63:i+32] + FI + IF k[j*4+2] + dst[i+95:i+64] := v[i+63:i+32] + ELSE + dst[i+95:i+64] := src[i+95:i+64] + FI + IF k[j*4+3] + dst[i+127:i+96] := v[i+63:i+32] + ELSE + dst[i+127:i+96] := src[i+127:i+96] + FI + ENDFOR +_MM_SWIZ_REG_CCCC: + FOR j := 0 to 3 + i := j*128 + IF k[j*4] + dst[i+31:i] := v[i+95:i+64] + ELSE + dst[i+31:i] := src[i+31:i] + FI + IF k[j*4+1] + dst[i+63:i+32] := v[i+95:i+64] + ELSE + dst[i+63:i+32] := src[i+63:i+32] + FI + IF k[j*4+2] + dst[i+95:i+64] := v[i+95:i+64] + ELSE + dst[i+95:i+64] := src[i+95:i+64] + FI + IF k[j*4+3] + dst[i+127:i+96] := v[i+95:i+64] + ELSE + dst[i+127:i+96] := src[i+127:i+96] + FI + ENDFOR +_MM_SWIZ_REG_DDDD: + FOR j := 0 to 3 + i := j*128 + IF k[j*4] + dst[i+31:i] := v[i+127:i+96] + ELSE + dst[i+31:i] := src[i+31:i] + FI + IF k[j*4+1] + dst[i+63:i+32] := v[i+127:i+96] + ELSE + dst[i+63:i+32] := src[i+63:i+32] + FI + IF k[j*4+2] + dst[i+95:i+64] := v[i+127:i+96] + ELSE + dst[i+95:i+64] := src[i+95:i+64] + FI + IF k[j*4+3] + dst[i+127:i+96] := v[i+127:i+96] + ELSE + dst[i+127:i+96] := src[i+127:i+96] + FI + ENDFOR +_MM_SWIZ_REG_DACB: + FOR j := 0 to 3 + i := j*128 + IF k[j*4] + dst[i+31:i] := v[i+63:i+32] + ELSE + dst[i+31:i] := src[i+31:i] + FI + IF k[j*4+1] + dst[i+63:i+32] := v[i+95:i+64] + ELSE + dst[i+63:i+32] := src[i+63:i+32] + FI + IF k[j*4+2] + dst[i+95:i+64] := v[i+31:i] + ELSE + dst[i+95:i+64] := src[i+95:i+64] + FI + IF k[j*4+3] + dst[i+127:i+96] := v[i+127:i+96] + ELSE + dst[i+127:i+96] := src[i+127:i+96] + FI + ENDFOR +ESAC +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Integer + KNCNI + Swizzle + + + + + Performs a swizzle transformation of each of the four groups of packed 4x64-bit integer elements in "v" using swizzle parameter "s", storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +CASE s OF +_MM_SWIZ_REG_NONE: + dst[511:0] := v[511:0] +_MM_SWIZ_REG_DCBA: + dst[511:0] := v[511:0] +_MM_SWIZ_REG_CDAB: + FOR j := 0 to 3 + i := j*64 + IF k[j*2] + dst[i+63:i] := v[i+127:i+64] + ELSE + dst[i+63:i] := src[i+63:i] + FI + IF k[j*2+1] + dst[i+127:i+64] := v[i+63:i] + ELSE + dst[i+127:i+64] := src[i+127:i+64] + FI + ENDFOR +_MM_SWIZ_REG_BADC: + FOR j := 0 to 1 + i := j*256 + IF k[j*4] + dst[i+63:i] := v[i+191:i+128] + ELSE + dst[i+63:i] := src[i+63:i] + FI + IF k[j*4+1] + dst[i+127:i+64] := v[i+255:i+192] + ELSE + dst[i+127:i+64] := src[i+127:i+64] + FI + IF k[j*4+2] + dst[i+191:i+128] := v[i+63:i] + ELSE + dst[i+191:i+128] := src[i+191:i+128] + FI + IF k[j*4+3] + dst[i+255:i+192] := v[i+127:i+64] + ELSE + dst[i+255:i+192] := src[i+255:i+192] + FI + ENDFOR +_MM_SWIZ_REG_AAAA: + FOR j := 0 to 1 + i := j*256 + IF k[j*4] + dst[i+63:i] := v[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI + IF k[j*4+1] + dst[i+127:i+64] := v[i+63:i] + ELSE + dst[i+127:i+64] := src[i+127:i+64] + FI + IF k[j*4+2] + dst[i+191:i+128] := v[i+63:i] + ELSE + dst[i+191:i+128] := src[i+191:i+128] + FI + IF k[j*4+3] + dst[i+255:i+192] := v[i+63:i] + ELSE + dst[i+255:i+192] := src[i+255:i+192] + FI + ENDFOR +_MM_SWIZ_REG_BBBB: + FOR j := 0 to 1 + i := j*256 + IF k[j*4] + dst[i+63:i] := v[i+127:i+63] + ELSE + dst[i+63:i] := src[i+63:i] + FI + IF k[j*4+1] + dst[i+127:i+64] := v[i+127:i+63] + ELSE + dst[i+127:i+64] := src[i+127:i+64] + FI + IF k[j*4+2] + dst[i+191:i+128] := v[i+127:i+63] + ELSE + dst[i+191:i+128] := src[i+191:i+128] + FI + IF k[j*4+3] + dst[i+255:i+192] := v[i+127:i+63] + ELSE + dst[i+255:i+192] := src[i+255:i+192] + FI + ENDFOR +_MM_SWIZ_REG_CCCC: + FOR j := 0 to 1 + i := j*256 + IF k[j*4] + dst[i+63:i] := v[i+191:i+128] + ELSE + dst[i+63:i] := src[i+63:i] + FI + IF k[j*4+1] + dst[i+127:i+64] := v[i+191:i+128] + ELSE + dst[i+127:i+64] := src[i+127:i+64] + FI + IF k[j*4+2] + dst[i+191:i+128] := v[i+191:i+128] + ELSE + dst[i+191:i+128] := src[i+191:i+128] + FI + IF k[j*4+3] + dst[i+255:i+192] := v[i+191:i+128] + ELSE + dst[i+255:i+192] := src[i+255:i+192] + FI + ENDFOR +_MM_SWIZ_REG_DDDD: + FOR j := 0 to 1 + i := j*256 + IF k[j*4] + dst[i+63:i] := v[i+255:i+192] + ELSE + dst[i+63:i] := src[i+63:i] + FI + IF k[j*4+1] + dst[i+127:i+64] := v[i+255:i+192] + ELSE + dst[i+127:i+64] := src[i+127:i+64] + FI + IF k[j*4+2] + dst[i+191:i+128] := v[i+255:i+192] + ELSE + dst[i+191:i+128] := src[i+191:i+128] + FI + IF k[j*4+3] + dst[i+255:i+192] := v[i+255:i+192] + ELSE + dst[i+255:i+192] := src[i+255:i+192] + FI + ENDFOR +_MM_SWIZ_REG_DACB: + FOR j := 0 to 1 + i := j*256 + IF k[j*4] + dst[i+63:i] := v[i+127:i+64] + ELSE + dst[i+63:i] := src[i+63:i] + FI + IF k[j*4+1] + dst[i+127:i+64] := v[i+191:i+128] + ELSE + dst[i+127:i+64] := src[i+127:i+64] + FI + IF k[j*4+2] + dst[i+191:i+128] := v[i+63:i] + ELSE + dst[i+191:i+128] := src[i+191:i+128] + FI + IF k[j*4+3] + dst[i+255:i+192] := v[i+255:i+192] + ELSE + dst[i+255:i+192] := src[i+255:i+192] + FI + ENDFOR +ESAC +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + KNCNI + Store + + + + + Downconverts packed single-precision (32-bit) floating-point elements stored in "v" to a smaller type depending on "conv" and stores them in memory location "mt". "hint" indicates to the processor whether the data is non-temporal. + +addr := MEM[mt] +FOR j := 0 to 15 + i := j*32 + CASE conv OF + _MM_DOWNCONV_PS_NONE: + addr[i+31:i] := v[i+31:i] + _MM_DOWNCONV_PS_FLOAT16: + n := j*16 + addr[n+15:n] := Float32ToFloat16(v[i+31:i]) + _MM_DOWNCONV_PS_UINT8: + n := j*8 + addr[n+7:n] := Float32ToUInt8(v[i+31:i]) + _MM_DOWNCONV_PS_SINT8: + n := j*8 + addr[n+7:n] := Float32ToSInt8(v[i+31:i]) + _MM_DOWNCONV_PS_UINT16: + n := j*16 + addr[n+15:n] := Float32ToUInt16(v[i+31:i]) + _MM_DOWNCONV_PS_SINT16: + n := j*16 + addr[n+15:n] := Float32ToSInt16(v[i+31:i]) + ESAC +ENDFOR + + +
immintrin.h
+
+ + Integer + KNCNI + Store + + + + + Downconverts packed 32-bit integer elements stored in "v" to a smaller type depending on "conv" and stores them in memory location "mt". "hint" indicates to the processor whether the data is non-temporal. + +addr := MEM[mt] +FOR j := 0 to 15 + i := j*32 + CASE conv OF + _MM_DOWNCONV_EPI32_NONE: + addr[i+31:i] := v[i+31:i] + _MM_DOWNCONV_EPI32_UINT8: + n := j*8 + addr[n+7:n] := Int32ToUInt8(v[i+31:i]) + _MM_DOWNCONV_EPI32_SINT8: + n := j*8 + addr[n+7:n] := Int32ToSInt8(v[i+31:i]) + _MM_DOWNCONV_EPI32_UINT16: + n := j*16 + addr[n+15:n] := Int32ToUInt16(v[i+31:i]) + _MM_DOWNCONV_EPI32_SINT16: + n := j*16 + addr[n+15:n] := Int32ToSInt16(v[i+31:i]) + ESAC +ENDFOR + + +
immintrin.h
+
+ + Floating Point + KNCNI + Store + + + + + Downconverts packed double-precision (64-bit) floating-point elements stored in "v" to a smaller type depending on "conv" and stores them in memory location "mt". "hint" indicates to the processor whether the data is non-temporal. + +addr := MEM[mt] +FOR j := 0 to 7 + i := j*64 + CASE conv OF + _MM_DOWNCONV_PS_NONE: + addr[i+63:i] := v[i+63:i] + ESAC +ENDFOR + + +
immintrin.h
+
+ + Integer + KNCNI + Store + + + + + Downconverts packed 64-bit integer elements stored in "v" to a smaller type depending on "conv" and stores them in memory location "mt". "hint" indicates to the processor whether the data is non-temporal. + +addr := MEM[mt] +FOR j := 0 to 7 + i := j*64 + CASE conv OF + _MM_DOWNCONV_EPI64_NONE: addr[i+63:i] := v[i+63:i] + ESAC +ENDFOR + + +
immintrin.h
+
+ + Floating Point + KNCNI + Store + + + + + + Downconverts packed single-precision (32-bit) floating-point elements stored in "v" to a smaller type depending on "conv" and stores them in memory location "mt" using writemask "k" (elements are not written to memory when the corresponding mask bit is not set). "hint" indicates to the processor whether the data is non-temporal. + +FOR j := 0 to 15 + i := j*32 + IF k[j] + CASE conv OF + _MM_DOWNCONV_PS_NONE: + mt[i+31:i] := v[i+31:i] + _MM_DOWNCONV_PS_FLOAT16: + n := j*16 + mt[n+15:n] := Float32ToFloat16(v[i+31:i]) + _MM_DOWNCONV_PS_UINT8: + n := j*8 + mt[n+7:n] := Float32ToUInt8(v[i+31:i]) + _MM_DOWNCONV_PS_SINT8: + n := j*8 + mt[n+7:n] := Float32ToSInt8(v[i+31:i]) + _MM_DOWNCONV_PS_UINT16: + n := j*16 + mt[n+15:n] := Float32ToUInt16(v[i+31:i]) + _MM_DOWNCONV_PS_SINT16: + n := j*16 + mt[n+15:n] := Float32ToSInt16(v[i+31:i]) + ESAC + FI +ENDFOR + + +
immintrin.h
+
+ + Floating Point + KNCNI + Store + + + + + + Downconverts packed double-precision (64-bit) floating-point elements stored in "v" to a smaller type depending on "conv" and stores them in memory location "mt" (elements in "mt" are unaltered when the corresponding mask bit is not set). "hint" indicates to the processor whether the data is non-temporal. + +addr := MEM[mt] +FOR j := 0 to 7 + i := j*64 + CASE conv OF + _MM_DOWNCONV_PD_NONE: + IF k[j] + mt[i+63:i] := v[i+63:i] + FI + ESAC +ENDFOR + + +
immintrin.h
+
+ + Integer + KNCNI + Store + + + + + + Downconverts packed 32-bit integer elements stored in "v" to a smaller type depending on "conv" and stores them in memory location "mt" (elements in "mt" are unaltered when the corresponding mask bit is not set). "hint" indicates to the processor whether the data is non-temporal. + +addr := MEM[mt] +FOR j := 0 to 15 + i := j*32 + IF k[j] + CASE conv OF + _MM_DOWNCONV_EPI32_NONE: + addr[i+31:i] := v[i+31:i] + _MM_DOWNCONV_EPI32_UINT8: + n := j*8 + addr[n+7:n] := Int32ToUInt8(v[i+31:i]) + _MM_DOWNCONV_EPI32_SINT8: + n := j*8 + addr[n+7:n] := Int32ToSInt8(v[i+31:i]) + _MM_DOWNCONV_EPI32_UINT16: + n := j*16 + addr[n+15:n] := Int32ToUInt16(v[i+31:i]) + _MM_DOWNCONV_EPI32_SINT16: + n := j*16 + addr[n+15:n] := Int32ToSInt16(v[i+31:i]) + ESAC + FI +ENDFOR + + +
immintrin.h
+
+ + Integer + KNCNI + Store + + + + + + Downconverts packed 64-bit integer elements stored in "v" to a smaller type depending on "conv" and stores them in memory location "mt" (elements in "mt" are unaltered when the corresponding mask bit is not set). "hint" indicates to the processor whether the data is non-temporal. + +addr := MEM[mt] +FOR j := 0 to 7 + i := j*64 + IF k[j] + CASE conv OF + _MM_DOWNCONV_EPI64_NONE: addr[i+63:i] := v[i+63:i] + ESAC + FI +ENDFOR + + +
immintrin.h
+
+ + Floating Point + KNCNI + Store + + + Stores packed single-precision (32-bit) floating-point elements from "v" to memory address "mt" with a no-read hint to the processor. + +addr := MEM[mt] +FOR j := 0 to 15 + i := j*32 + addr[i+31:i] := v[i+31:i] +ENDFOR + + +
immintrin.h
+
+ + Floating Point + KNCNI + Store + + + Stores packed double-precision (64-bit) floating-point elements from "v" to memory address "mt" with a no-read hint to the processor. + +addr := MEM[mt] +FOR j := 0 to 7 + i := j*64 + addr[i+63:i] := v[i+63:i] +ENDFOR + + +
immintrin.h
+
+ + Floating Point + KNCNI + Store + + + Stores packed single-precision (32-bit) floating-point elements from "v" to memory address "mt" with a no-read hint and using a weakly-ordered memory consistency model (stores performed with this function are not globally ordered, and subsequent stores from the same thread can be observed before them). + +addr := MEM[mt] +FOR j := 0 to 15 + i := j*32 + addr[i+31:i] := v[i+31:i] +ENDFOR + + +
immintrin.h
+
+ + Floating Point + KNCNI + Store + + + Stores packed double-precision (64-bit) floating-point elements from "v" to memory address "mt" with a no-read hint and using a weakly-ordered memory consistency model (stores performed with this function are not globally ordered, and subsequent stores from the same thread can be observed before them). + +addr := MEM[mt] +FOR j := 0 to 7 + i := j*64 + addr[i+63:i] := v[i+63:i] +ENDFOR + + +
immintrin.h
+
+ + Integer + KNCNI + Arithmetic + + + + + Performs element-by-element addition of packed 32-bit integers in "v2" and "v3" and the corresponding bit in "k2", storing the result of the addition in "dst" and the result of the carry in "k2_res". + +FOR j := 0 to 15 + i := j*32 + k2_res[j] := Carry(v2[i+31:i] + v3[i+31:i] + k2[j]) + dst[i+31:i] := v2[i+31:i] + v3[i+31:i] + k2[j] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Arithmetic + + + + + + Performs element-by-element addition of packed 32-bit integers in "v2" and "v3" and the corresponding bit in "k2", storing the result of the addition in "dst" and the result of the carry in "k2_res" using writemask "k1" (elements are copied from "v2" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k2_res[j] := Carry(v2[i+31:i] + v3[i+31:i] + k2[j]) + dst[i+31:i] := v2[i+31:i] + v3[i+31:i] + k2[j] + ELSE + dst[i+31:i] := v2[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Arithmetic + + + Performs element-by-element addition between packed double-precision (64-bit) floating-point elements in "v2" and "v3" and negates their sum, storing the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := -(v2[i+63:i] + v3[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Arithmetic + + + + + Performs element-by-element addition between packed double-precision (64-bit) floating-point elements in "v2" and "v3" and negates their sum, storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := -(v2[i+63:i] + v3[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Arithmetic + + + Performs element-by-element addition between packed single-precision (32-bit) floating-point elements in "v2" and "v3" and negates their sum, storing the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := -(v2[i+31:i] + v3[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Arithmetic + + + + + Performs element-by-element addition between packed single-precision (32-bit) floating-point elements in "v2" and "v3" and negates their sum, storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := -(v2[i+31:i] + v3[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Arithmetic + + + + Performs element by element addition between packed double-precision (64-bit) floating-point elements in "v2" and "v3" and negates the sum, storing the result in "dst". + [round_note] + + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := -(v2[i+63:i] + v3[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Arithmetic + + + + + + Performs element by element addition between packed double-precision (64-bit) floating-point elements in "v2" and "v3" and negates the sum, storing the result in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := -(v2[i+63:i] + v3[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Arithmetic + + + + Performs element by element addition between packed single-precision (32-bit) floating-point elements in "v2" and "v3" and negates the sum, storing the result in "dst". + [round_note] + + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := -(v2[i+31:i] + v3[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Arithmetic + + + + + + Performs element by element addition between packed single-precision (32-bit) floating-point elements in "v2" and "v3" and negates the sum, storing the result in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := -(v2[i+31:i] + v3[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Arithmetic + + + Performs element-by-element subtraction of packed double-precision (64-bit) floating-point elements in "v2" from "v3" storing the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := v3[i+63:i] - v2[i+63:i] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Arithmetic + + + + + Performs element-by-element subtraction of packed double-precision (64-bit) floating-point elements in "v2" from "v3" storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := v3[i+63:i] - v2[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Arithmetic + + + Performs element-by-element subtraction of packed single-precision (32-bit) floating-point elements in "v2" from "v3" storing the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := v3[i+31:i] - v2[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Arithmetic + + + + + Performs element-by-element subtraction of packed single-precision (32-bit) floating-point elements in "v2" from "v3" storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := v3[i+31:i] - v2[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Arithmetic + + + + Performs element-by-element subtraction of packed double-precision (64-bit) floating-point elements in "v2" from "v3" storing the results in "dst". + [round_note] + + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := v3[i+63:i] - v2[i+63:i] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Arithmetic + + + + + + Performs element-by-element subtraction of packed double-precision (64-bit) floating-point elements in "v2" from "v3" storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := v3[i+63:i] - v2[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Arithmetic + + + + Performs element-by-element subtraction of packed single-precision (32-bit) floating-point elements in "v2" from "v3" storing the results in "dst". + [round_note] + + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := v3[i+31:i] - v2[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Arithmetic + + + + + + Performs element-by-element subtraction of packed single-precision (32-bit) floating-point elements in "v2" from "v3" storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := v3[i+31:i] - v2[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Arithmetic + + + Performs element-by-element subtraction of packed 32-bit integer elements in "v2" from "v3" storing the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := v3[i+31:i] - v2[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Arithmetic + + + + + Performs element-by-element subtraction of packed 32-bit integer elements in "v2" from "v3" storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set) + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := v3[i+31:i] - v2[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Arithmetic + + + + Performs element-by-element addition of packed 32-bit integer elements in "v2" and "v3", storing the resultant carry in "k2_res" (carry flag) and the addition results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := v2[i+31:i] + v3[i+31:i] + k2_res[j] := Carry(v2[i+31:i] + v3[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Arithmetic + + + + + + Performs element-by-element addition of packed 32-bit integer elements in "v2" and "v3", storing the resultant carry in "k2_res" (carry flag) and the addition results in "dst" using writemask "k" (elements are copied from "v2" and "k_old" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := v2[i+31:i] + v3[i+31:i] + ELSE + dst[i+31:i] := v2[i+31:i] + k2_res[j] := k_old[j] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Arithmetic + + + + Performs an element-by-element addition of packed 32-bit integer elements in "v2" and "v3", storing the results in "dst" and the sign of the sum in "sign" (sign flag). + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := v2[i+31:i] + v3[i+31:i] + sign[j] := v2[i+31:i] & v3[i+31:i] & 0x80000000 +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Arithmetic + + + + + + Performs an element-by-element addition of packed 32-bit integer elements in "v2" and "v3", storing the results in "dst" and the sign of the sum in "sign" (sign flag). Results are stored using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := v2[i+31:i] + v3[i+31:i] + sign[j] := v2[i+31:i] & v3[i+31:i] & 0x80000000 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Arithmetic + + + + Performs an element-by-element addition of packed single-precision (32-bit) floating-point elements in "v2" and "v3", storing the results in "dst" and the sign of the sum in "sign" (sign flag). + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := v2[i+31:i] + v3[i+31:i] + sign[j] := v2[i+31:i] & v3[i+31:i] & 0x80000000 +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Arithmetic + + + + + + Performs an element-by-element addition of packed single-precision (32-bit) floating-point elements in "v2" and "v3", storing the results in "dst" and the sign of the sum in "sign" (sign flag). Results are stored using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := v2[i+31:i] + v3[i+31:i] + sign[j] := v2[i+31:i] & v3[i+31:i] & 0x80000000 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Arithmetic + + + + + Performs an element-by-element addition of packed single-precision (32-bit) floating-point elements in "v2" and "v3", storing the results in "dst" and the sign of the sum in "sign" (sign flag). + [round_note] + + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := v2[i+31:i] + v3[i+31:i] + sign[j] := v2[i+31:i] & v3[i+31:i] & 0x80000000 +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Arithmetic + + + + + + + Performs an element-by-element addition of packed single-precision (32-bit) floating-point elements in "v2" and "v3", storing the results in "dst" and the sign of the sum in "sign" (sign flag). Results are stored using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := v2[i+31:i] + v3[i+31:i] + sign[j] := v2[i+31:i] & v3[i+31:i] & 0x80000000 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Arithmetic + + + + Performs element-by-element subtraction of packed 32-bit integer elements in "v3" from "v2", storing the results in "dst" and the nth borrow bit in the nth position of "borrow" (borrow flag). + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := v2[i+31:i] - v3[i+31:i] + borrow[j] := Borrow(v2[i+31:i] - v3[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Arithmetic + + + + + + Performs element-by-element subtraction of packed 32-bit integer elements in "v3" from "v2", storing the results in "dst" and the nth borrow bit in the nth position of "borrow" (borrow flag). Results are stored using writemask "k" (elements are copied from "v2" and "k_old" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := v2[i+31:i] - v3[i+31:i] + borrow[j] := Borrow(v2[i+31:i] - v3[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + borrow[j] := k_old[j] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Arithmetic + + + + Performs element-by-element subtraction of packed 32-bit integer elements in "v2" from "v3", storing the results in "dst" and "v2". The borrowed value from the subtraction difference for the nth element is written to the nth bit of "borrow" (borrow flag). + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := v3[i+31:i] - v2[i+31:i] + borrow[j] := Borrow(v3[i+31:i] - v2[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Arithmetic + + + + + + Performs element-by-element subtraction of packed 32-bit integer elements in "v2" from "v3", storing the results in "dst" and "v2". The borrowed value from the subtraction difference for the nth element is written to the nth bit of "borrow" (borrow flag). Results are written using writemask "k" (elements are copied from "k" to "k_old" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + diff := v3[i+31:i] - v2[i+31:i] + borrow[j] := Borrow(v3[i+31:i] - v2[i+31:i]) + dst[i+31:i] := diff + v2[i+31:i] := diff + ELSE + borrow[j] := k_old[j] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Arithmetic + + + + + Performs element-by-element three-input subtraction of packed 32-bit integer elements of "v3" as well as the corresponding bit from "k" from "v2". The borrowed value from the subtraction difference for the nth element is written to the nth bit of "borrow" (borrow flag). Results are stored in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := v2[i+31:i] - v3[i+31:i] - k[j] + borrow[j] := Borrow(v2[i+31:i] - v3[i+31:i] - k[j]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Arithmetic + + + + + + Performs element-by-element three-input subtraction of packed 32-bit integer elements of "v3" as well as the corresponding bit from "k2" from "v2". The borrowed value from the subtraction difference for the nth element is written to the nth bit of "borrow" (borrow flag). Results are stored in "dst" using writemask "k1" (elements are copied from "v2" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k1[j] + dst[i+31:i] := v2[i+31:i] - v3[i+31:i] - k2[j] + borrow[j] := Borrow(v2[i+31:i] - v3[i+31:i] - k2[j]) + ELSE + dst[i+31:i] := v2[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Arithmetic + + + + + Performs element-by-element three-input subtraction of packed 32-bit integer elements of "v2" as well as the corresponding bit from "k" from "v3". The borrowed value from the subtraction difference for the nth element is written to the nth bit of "borrow" (borrow flag). Results are stored in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := v3[i+31:i] - v2[i+31:i] - k[j] + borrow[j] := Borrow(v2[i+31:i] - v3[i+31:i] - k[j]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Arithmetic + + + + + + Performs element-by-element three-input subtraction of packed 32-bit integer elements of "v2" as well as the corresponding bit from "k2" from "v3". The borrowed value from the subtraction difference for the nth element is written to the nth bit of "borrow" (borrow flag). Results are stored in "dst" using writemask "k1" (elements are copied from "v2" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k1[j] + dst[i+31:i] := v3[i+31:i] - v2[i+31:i] - k2[j] + borrow[j] := Borrow(v2[i+31:i] - v3[i+31:i] - k[j]) + ELSE + dst[i+31:i] := v2[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Logical + + + + + Performs element-by-element bitwise AND between packed 32-bit integer elements of "v2" and "v3", storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := v2[i+31:i] & v3[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Convert + + + Performs element-by-element conversion of packed double-precision (64-bit) floating-point elements in "v2" to packed single-precision (32-bit) floating-point elements, storing the results in "dst". Results are written to the lower half of "dst", and the upper half locations are set to '0'. + [round_note] + + +FOR j := 0 to 7 + i := j*64 + k := j*32 + dst[k+31:k] := Float64ToFloat32(v2[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Convert + + + + + Performs element-by-element conversion of packed double-precision (64-bit) floating-point elements in "v2" to packed single-precision (32-bit) floating-point elements, storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). Results are written to the lower half of "dst", and the upper half locations are set to '0'. + [round_note] + + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[l+31:l] := Float64ToFloat32(v2[i+63:i]) + ELSE + dst[l+31:l] := src[l+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + KNCNI + Convert + + + Performs element-by-element conversion of packed double-precision (64-bit) floating-point elements in "v2" to packed 32-bit unsigned integer elements, storing the results in "dst". Results are written to the lower half of "dst", and the upper half locations are set to '0'. + [round_note] + + +FOR j := 0 to 7 + i := j*64 + k := j*32 + dst[k+31:k] := Float64ToInt32(v2[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + KNCNI + Convert + + + + + Performs element-by-element conversion of packed double-precision (64-bit) floating-point elements in "v2" to packed 32-bit unsigned integer elements, storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). Results are written to the lower half of "dst", and the upper half locations are set to '0'. + [round_note] + + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[l+31:l] := Float64ToInt32(v2[i+63:i]) + ELSE + dst[l+31:l] := src[l+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Convert + + Performs element-by-element conversion of the lower half of packed single-precision (32-bit) floating-point elements in "v2" to packed double-precision (64-bit) floating-point elements, storing the results in "dst". + +FOR j := 0 to 7 + i := j*32 + k := j*64 + dst[k+63:k] := Float32ToFloat64(v2[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Convert + + + + Performs element-by-element conversion of the lower half of packed single-precision (32-bit) floating-point elements in "v2" to packed double-precision (64-bit) floating-point elements, storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + l := j*64 + IF k[j] + dst[l+63:l] := Float32ToFloat64(v2[i+31:i]) + ELSE + dst[l+63:l] := src[l+63:l]: + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + KNCNI + Convert + + + + Performs element-by-element conversion of packed single-precision (32-bit) floating-point elements in "v2" to packed 32-bit integer elements and performs an optional exponent adjust using "expadj", storing the results in "dst". + [round_note] + + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := Float32ToInt32(v2[i+31:i]) + CASE expadj OF + _MM_EXPADJ_NONE: dst[i+31:i] = dst[i+31:i] * 2**0 + _MM_EXPADJ_4: dst[i+31:i] = dst[i+31:i] * 2**4 + _MM_EXPADJ_5: dst[i+31:i] = dst[i+31:i] * 2**5 + _MM_EXPADJ_8: dst[i+31:i] = dst[i+31:i] * 2**8 + _MM_EXPADJ_16: dst[i+31:i] = dst[i+31:i] * 2**16 + _MM_EXPADJ_24: dst[i+31:i] = dst[i+31:i] * 2**24 + _MM_EXPADJ_31: dst[i+31:i] = dst[i+31:i] * 2**31 + _MM_EXPADJ_32: dst[i+31:i] = dst[i+31:i] * 2**32 + ESAC +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + KNCNI + Convert + + + + Performs element-by-element conversion of packed single-precision (32-bit) floating-point elements in "v2" to packed 32-bit unsigned integer elements and performing an optional exponent adjust using "expadj", storing the results in "dst". + [round_note] + + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := Float32ToUInt32(v2[i+31:i]) + CASE expadj OF + _MM_EXPADJ_NONE: dst[i+31:i] = dst[i+31:i] 0 + _MM_EXPADJ_4: dst[i+31:i] = dst[i+31:i] 4 + _MM_EXPADJ_5: dst[i+31:i] = dst[i+31:i] 5 + _MM_EXPADJ_8: dst[i+31:i] = dst[i+31:i] 8 + _MM_EXPADJ_16: dst[i+31:i] = dst[i+31:i] 16 + _MM_EXPADJ_24: dst[i+31:i] = dst[i+31:i] 24 + _MM_EXPADJ_31: dst[i+31:i] = dst[i+31:i] 31 + _MM_EXPADJ_32: dst[i+31:i] = dst[i+31:i] 32 + ESAC +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F/KNCNI + Convert + + Performs element-by-element conversion of the lower half of packed 32-bit integer elements in "v2" to packed double-precision (64-bit) floating-point elements, storing the results in "dst". + +FOR j := 0 to 7 + i := j*32 + l := j*64 + dst[l+63:l] := Int32ToFloat64(v2[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F/KNCNI + Convert + + + + Performs element-by-element conversion of the lower half of packed 32-bit integer elements in "v2" to packed double-precision (64-bit) floating-point elements, storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + n := j*64 + IF k[j] + dst[k+63:k] := Int32ToFloat64(v2[i+31:i]) + ELSE + dst[n+63:n] := src[n+63:n] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F/KNCNI + Convert + + Performs element-by-element conversion of the lower half of packed 32-bit unsigned integer elements in "v2" to packed double-precision (64-bit) floating-point elements, storing the results in "dst". + +FOR j := 0 to 7 + i := j*32 + k := j*64 + dst[k+63:k] := UInt32ToFloat64(v2[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512F/KNCNI + Convert + + + + Performs element-by-element conversion of the lower half of 32-bit unsigned integer elements in "v2" to packed double-precision (64-bit) floating-point elements, storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + l := j*64 + IF k[j] + dst[l+63:l] := UInt32ToFloat64(v2[i+31:i]) + ELSE + dst[l+63:l] := src[l+63:l] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + KNCNI + Convert + + + + Performs element-by-element conversion of packed 32-bit unsigned integer elements in "v2" to packed single-precision (32-bit) floating-point elements and performing an optional exponent adjust using "expadj", storing the results in "dst". + [round_note] + + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := UInt32ToFloat32(v2[i+31:i]) + CASE expadj OF + _MM_EXPADJ_NONE: dst[i+31:i] = dst[i+31:i] * 2**0 + _MM_EXPADJ_4: dst[i+31:i] = dst[i+31:i] * 2**4 + _MM_EXPADJ_5: dst[i+31:i] = dst[i+31:i] * 2**5 + _MM_EXPADJ_8: dst[i+31:i] = dst[i+31:i] * 2**8 + _MM_EXPADJ_16: dst[i+31:i] = dst[i+31:i] * 2**16 + _MM_EXPADJ_24: dst[i+31:i] = dst[i+31:i] * 2**24 + _MM_EXPADJ_31: dst[i+31:i] = dst[i+31:i] * 2**31 + _MM_EXPADJ_32: dst[i+31:i] = dst[i+31:i] * 2**32 + ESAC +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + KNCNI + Convert + + + + + + Performs element-by-element conversion of packed 32-bit unsigned integer elements in "v2" to packed single-precision (32-bit) floating-point elements and performing an optional exponent adjust using "expadj", storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := Int32ToFloat32(v2[i+31:i]) + CASE expadj OF + _MM_EXPADJ_NONE: dst[i+31:i] = dst[i+31:i] * 2**0 + _MM_EXPADJ_4: dst[i+31:i] = dst[i+31:i] * 2**4 + _MM_EXPADJ_5: dst[i+31:i] = dst[i+31:i] * 2**5 + _MM_EXPADJ_8: dst[i+31:i] = dst[i+31:i] * 2**8 + _MM_EXPADJ_16: dst[i+31:i] = dst[i+31:i] * 2**16 + _MM_EXPADJ_24: dst[i+31:i] = dst[i+31:i] * 2**24 + _MM_EXPADJ_31: dst[i+31:i] = dst[i+31:i] * 2**31 + _MM_EXPADJ_32: dst[i+31:i] = dst[i+31:i] * 2**32 + ESAC + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Elementary Math Functions + + Approximates the base-2 exponent of the packed single-precision (32-bit) floating-point elements in "v2" with eight bits for sign and magnitude and 24 bits for the fractional part. Results are stored in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := exp223(v2[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Elementary Math Functions + + + + Approximates the base-2 exponent of the packed single-precision (32-bit) floating-point elements in "v2" with eight bits for sign and magnitude and 24 bits for the fractional part. Results are stored in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := exp223(v2[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Miscellaneous + + + + Fixes up NaN's from packed double-precision (64-bit) floating-point elements in "v1" and "v2", storing the results in "dst" and storing the quietized NaN's from "v1" in "v3". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := FixupNaNs(v1[i+63:i], v2[i+63:i]) + v3[i+63:i] := QuietizeNaNs(v1[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Miscellaneous + + + + + Fixes up NaN's from packed double-precision (64-bit) floating-point elements in "v1" and "v2", storing the results in "dst" using writemask "k" (only elements whose corresponding mask bit is set are used in the computation). Quietized NaN's from "v1" are stored in "v3". + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := FixupNaNs(v1[i+63:i], v2[i+63:i]) + v3[i+63:i] := QuietizeNaNs(v1[i+63:i]) + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Miscellaneous + + + + Fixes up NaN's from packed single-precision (32-bit) floating-point elements in "v1" and "v2", storing the results in "dst" and storing the quietized NaN's from "v1" in "v3". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := FixupNaNs(v1[i+31:i], v2[i+31:i]) + v3[i+31:i] := QuietizeNaNs(v1[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Miscellaneous + + + + + Fixes up NaN's from packed single-precision (32-bit) floating-point elements in "v1" and "v2", storing the results in "dst" using writemask "k" (only elements whose corresponding mask bit is set are used in the computation). Quietized NaN's from "v1" are stored in "v3". + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := FixupNaNs(v1[i+31:i], v2[i+31:i]) + v3[i+31:i] := QuietizeNaNs(v1[i+31:i]) + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Load + + + + + + Up-converts 16 memory locations starting at location "mv" at packed 32-bit integer indices stored in "index" scaled by "scale" using "conv" to 32-bit integer elements and stores them in "dst". AVX512 supports _MM_UPCONV_EPI32_NONE. + +FOR j := 0 to 15 + addr := MEM[mv + index[j] * scale] + i := j*32 + CASE conv OF + _MM_UPCONV_EPI32_NONE: + dst[i+31:i] := addr[i+31:i] + _MM_UPCONV_EPI32_UINT8: + n := j*7 + dst[i+31:i] := UInt8ToUInt32(addr[n+7:n]) + _MM_UPCONV_EPI32_SINT8: + n := j*7 + dst[i+31:i] := Int8ToInt32(addr[n+7:n]) + _MM_UPCONV_EPI32_UINT16: + n := j*16 + dst[i+31:i] := UInt16ToUInt32(addr[n+15:n]) + _MM_UPCONV_EPI32_SINT16: + n := j*16 + dst[i+31:i] := Int16ToInt32(addr[n+15:n]) + ESAC +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Load + + + + + + + + Up-converts 16 single-precision (32-bit) memory locations starting at location "mv" at packed 32-bit integer indices stored in "index" scaled by "scale" using "conv" to 32-bit integer elements and stores them in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). AVX512 supports _MM_UPCONV_EPI32_NONE. + +FOR j := 0 to 15 + addr := MEM[mv + index[j] * scale] + i := j*32 + IF k[j] + CASE conv OF + _MM_UPCONV_EPI32_NONE: + dst[i+31:i] := addr[i+31:i] + _MM_UPCONV_EPI32_UINT8: + n := j*7 + dst[i+31:i] := UInt8ToUInt32(addr[n+7:n]) + _MM_UPCONV_EPI32_SINT8: + n := j*7 + dst[i+31:i] := Int8ToInt32(addr[n+7:n]) + _MM_UPCONV_EPI32_UINT16: + n := j*16 + dst[i+31:i] := UInt16ToUInt32(addr[n+15:n]) + _MM_UPCONV_EPI32_SINT16: + n := j*16 + dst[i+31:i] := Int16ToInt32(addr[n+15:n]) + ESAC + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Load + + + + + + Up-converts 8 double-precision (64-bit) memory locations starting at location "mv" at packed 32-bit integer indices stored in the lower half of "index" scaled by "scale" using "conv" to 64-bit integer elements and stores them in "dst". + +FOR j := 0 to 7 + addr := MEM[mv + index[j] * scale] + i := j*64 + CASE conv OF + _MM_UPCONV_EPI64_NONE: dst[i+63:i] := addr[i+63:i] + ESAC +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Load + + + + + + + + Up-converts 8 double-precision (64-bit) memory locations starting at location "mv" at packed 32-bit integer indices stored in the lower half of "index" scaled by "scale" using "conv" to 64-bit integer elements and stores them in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + addr := MEM[mv + index[j] * scale] + i := j*64 + IF k[j] + CASE conv OF + _MM_UPCONV_EPI64_NONE: dst[i+63:i] := addr[i+63:i] + ESAC + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Load + + + + + + Up-converts 16 memory locations starting at location "mv" at packed 32-bit integer indices stored in "index" scaled by "scale" using "conv" to single-precision (32-bit) floating-point elements and stores them in "dst". + +FOR j := 0 to 15 + addr := MEM[mv + index[j] * scale] + i := j*32 + CASE conv OF + _MM_UPCONV_PS_NONE: + dst[i+31:i] := addr[i+31:i] + _MM_UPCONV_PS_FLOAT16: + n := j*16 + dst[i+31:i] := Float16ToFloat32(addr[n+15:n]) + _MM_UPCONV_PS_UINT8: + n := j*8 + dst[i+31:i] := UInt8ToFloat32(addr[n+7:n]) + _MM_UPCONV_PS_SINT8: + n := j*8 + dst[i+31:i] := SInt8ToFloat32(addr[n+7:n]) + _MM_UPCONV_PS_UINT16: + n := j*16 + dst[i+31:i] := UInt16ToFloat32(addr[n+15:n]) + _MM_UPCONV_PS_SINT16: + n := j*16 + dst[i+31:i] := SInt16ToFloat32(addr[n+15:n]) + ESAC +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Load + + + + + + + + Up-converts 16 single-precision (32-bit) memory locations starting at location "mv" at packed 32-bit integer indices stored in "index" scaled by "scale" using "conv" to single-precision (32-bit) floating-point elements and stores them in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + addr := MEM[mv + index[j] * scale] + i := j*32 + IF k[j] + CASE conv OF + _MM_UPCONV_PS_NONE: + dst[i+31:i] := addr[i+31:i] + _MM_UPCONV_PS_FLOAT16: + n := j*16 + dst[i+31:i] := Float16ToFloat32(addr[n+15:n]) + _MM_UPCONV_PS_UINT8: + n := j*8 + dst[i+31:i] := UInt8ToFloat32(addr[n+7:n]) + _MM_UPCONV_PS_SINT8: + n := j*8 + dst[i+31:i] := SInt8ToFloat32(addr[n+7:n]) + _MM_UPCONV_PS_UINT16: + n := j*16 + dst[i+31:i] := UInt16ToFloat32(addr[n+15:n]) + _MM_UPCONV_PS_SINT16: + n := j*16 + dst[i+31:i] := SInt16ToFloat32(addr[n+15:n]) + ESAC + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Load + + + + + + Up-converts 8 double-precision (64-bit) floating-point elements in memory locations starting at location "mv" at packed 32-bit integer indices stored in the lower half of "index" scaled by "scale" using "conv" to 64-bit floating-point elements and stores them in "dst". + +FOR j := 0 to 7 + addr := MEM[mv + index[j] * scale] + i := j*64 + CASE conv OF + _MM_UPCONV_PD_NONE: dst[i+63:i] := addr[i+63:i] + ESAC +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Load + + + + + + + + Up-converts 8 double-precision (64-bit) floating-point elements in memory locations starting at location "mv" at packed 32-bit integer indices stored in the lower half of "index" scaled by "scale" using "conv" to 64-bit floating-point elements and stores them in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + addr := MEM[mv + index[j] * scale] + i := j*64 + IF k[j] + CASE conv OF + _MM_UPCONV_PD_NONE: dst[i+63:i] := addr[i+63:i] + ESAC + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512PF/KNCNI + Load + + + + + + Prefetches a set of 16 single-precision (32-bit) memory locations pointed by base address "mv" and 32-bit integer index vector "index" with scale "scale" to L1 or L2 level of cache depending on the value of "hint". The "hint" parameter may be 1 (_MM_HINT_T0) for prefetching to L1 cache, or 2 (_MM_HINT_T1) for prefetching to L2 cache. +The "conv" parameter specifies the granularity used by compilers to better encode the instruction. It should be the same as the "conv" parameter specified for the subsequent gather intrinsic. + +FOR j := 0 to 15 + addr := MEM[mv + index[j] * scale] + i := j*32 + CASE hint OF + _MM_HINT_T0: PrefetchL1WithT0Hint(addr[i+31:i]) + _MM_HINT_T1: PrefetchL2WithT1Hint(addr[i+31:i]) + ESAC +ENDFOR +dst[MAX:512] := 0 + + + +
immintrin.h
+
+ + Floating Point + AVX512PF/KNCNI + Load + + + + + + + Prefetches a set of 16 single-precision (32-bit) memory locations pointed by base address "mv" and 32-bit integer index vector "index" with scale "scale" to L1 or L2 level of cache depending on the value of "hint". Gathered elements are merged in cache using writemask "k" (elements are brought into cache only when their corresponding mask bits are set). The "hint" parameter may be 1 (_MM_HINT_T0) for prefetching to L1 cache, or 2 (_MM_HINT_T1) for prefetching to L2 cache. +The "conv" parameter specifies the granularity used by compilers to better encode the instruction. It should be the same as the "conv" parameter specified for the subsequent gather intrinsic. + +FOR j := 0 to 15 + addr := MEM[mv + index[j] * scale] + i := j*32 + IF k[j] THEN + CASE hint OF + _MM_HINT_T0: PrefetchL1WithT0Hint(addr[i+31:i]) + _MM_HINT_T1: PrefetchL2WithT1Hint(addr[i+31:i]) + ESAC + FI +ENDFOR +dst[MAX:512] := 0 + + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Store + + + + + + + Down-converts 16 packed single-precision (32-bit) floating-point elements in "v1" and stores them in memory locations starting at location "mv" at packed 32-bit integer indices stored in "index" scaled by "scale" using "conv". + +FOR j := 0 to 15 + addr := MEM[mv + index[j] * scale] + i := j*32 + CASE conv OF + _MM_DOWNCONV_PS_NONE: + n := j*32 + addr[i+31:i] := v1[n+31:n] + _MM_DOWNCONV_PS_FLOAT16: + i := j*16 + addr[i+15:i] := Float32ToFloat16(v1[n+31:n]) + _MM_DOWNCONV_PS_UINT8: + i := j*8 + addr[i+7:i] := Float32ToUInt8(v1[n+31:n]) + _MM_DOWNCONV_PS_SINT8: + i := j*8 + addr[i+7:i] := Float32ToSInt8(v1[n+31:n]) + _MM_DOWNCONV_PS_UINT16: + i := j*8 + addr[i+15:i] := Float32ToUInt16(v1[n+31:n]) + _MM_DOWNCONV_PS_SINT16: + i := j*8 + addr[i+15:i] := Float32ToSInt16(v1[n+31:n]) + ESAC +ENDFOR + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Store + + + + + + + + Down-converts 16 packed single-precision (32-bit) floating-point elements in "v1" according to "conv" and stores them in memory locations starting at location "mv" at packed 32-bit integer indices stored in "index" scaled by "scale" using writemask "k" (elements are written only when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + addr := MEM[mv + index[j] * scale] + CASE conv OF + _MM_DOWNCONV_PS_NONE: + n := j*32 + addr[i+31:i] := v1[n+31:n] + _MM_DOWNCONV_PS_FLOAT16: + i := j*16 + addr[i+15:i] := Float32ToFloat16(v1[n+31:n]) + _MM_DOWNCONV_PS_UINT8: + i := j*8 + addr[i+7:i] := Float32ToUInt8(v1[n+31:n]) + _MM_DOWNCONV_PS_SINT8: + i := j*8 + addr[i+7:i] := Float32ToSInt8(v1[n+31:n]) + _MM_DOWNCONV_PS_UINT16: + i := j*8 + addr[i+15:i] := Float32ToUInt16(v1[n+31:n]) + _MM_DOWNCONV_PS_SINT16: + i := j*8 + addr[i+15:i] := Float32ToSInt16(v1[n+31:n]) + ESAC + FI +ENDFOR + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Store + + + + + + + Down-converts 8 packed double-precision (64-bit) floating-point elements in "v1" and stores them in memory locations starting at location "mv" at packed 32-bit integer indices stored in "index" scaled by "scale" using "conv". + +FOR j := 0 to 7 + addr := MEM[mv + index[j] * scale] + i := j*64 + CASE conv OF + _MM_DOWNCONV_PD_NONE: addr[i+63:i] := v1[i+63:i] + ESAC +ENDFOR + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Store + + + + + + + + Down-converts 8 packed double-precision (64-bit) floating-point elements in "v1" and stores them in memory locations starting at location "mv" at packed 32-bit integer indices stored in "index" scaled by "scale" using "conv". Only those elements whose corresponding mask bit is set in writemask "k" are written to memory. + +FOR j := 0 to 7 + IF k[j] + addr := MEM[mv + index[j] * scale] + i := j*64 + CASE conv OF + _MM_DOWNCONV_PD_NONE: addr[i+63:i] := v1[i+63:i] + ESAC + FI +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Store + + + + + + + Down-converts 8 packed 64-bit integer elements in "v1" and stores them in memory locations starting at location "mv" at packed 32-bit integer indices stored in "index" scaled by "scale" using "conv". + +FOR j := 0 to 7 + addr := MEM[mv + index[j] * scale] + i := j*64 + CASE conv OF + _MM_DOWNCONV_EPI64_NONE: addr[i+63:i] := v1[i+63:i] + ESAC +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Store + + + + + + + + Down-converts 8 packed 64-bit integer elements in "v1" and stores them in memory locations starting at location "mv" at packed 32-bit integer indices stored in "index" scaled by "scale" using "conv". Only those elements whose corresponding mask bit is set in writemask "k" are written to memory. + +FOR j := 0 to 7 + IF k[j] + addr := MEM[mv + index[j] * scale] + i := j*64 + CASE conv OF + _MM_DOWNCONV_EPI64_NONE: addr[i+63:i] := v1[i+63:i] + ESAC + FI +ENDFOR + + +
immintrin.h
+
+ + Floating Point + AVX512PF/KNCNI + Store + + + + + + Prefetches a set of 16 single-precision (32-bit) memory locations pointed by base address "mv" and 32-bit integer index vector "index" with scale "scale" to L1 or L2 level of cache depending on the value of "hint", with a request for exclusive ownership. The "hint" parameter may be one of the following: _MM_HINT_T0 = 1 for prefetching to L1 cache, _MM_HINT_T1 = 2 for prefetching to L2 cache, _MM_HINT_T2 = 3 for prefetching to L2 cache non-temporal, _MM_HINT_NTA = 0 for prefetching to L1 cache non-temporal. The "conv" parameter specifies the granularity used by compilers to better encode the instruction. It should be the same as the "conv" parameter specified for the subsequent scatter intrinsic. + +cachev := 0 +FOR j := 0 to 15 + i := j*32 + addr := MEM[mv + index[j] * scale] + CASE hint OF + _MM_HINT_T0: PrefetchL1WithT0Hint(addr[i+31:i]) + _MM_HINT_T1: PrefetchL2WithT1Hint(addr[i+31:i]) + _MM_HINT_T2: PrefetchL2WithT1HintNonTemporal(addr[i+31:i]) + _MM_HINT_NTA: PrefetchL1WithT0HintNonTemporal(addr[i+31:i]) + ESAC +ENDFOR + + + +
immintrin.h
+
+ + Floating Point + AVX512PF/KNCNI + Store + + + + + + + Prefetches a set of 16 single-precision (32-bit) memory locations pointed by base address "mv" and 32-bit integer index vector "index" with scale "scale" to L1 or L2 level of cache depending on the value of "hint". The "hint" parameter may be 1 (_MM_HINT_T0) for prefetching to L1 cache, or 2 (_MM_HINT_T1) for prefetching to L2 cache. +The "conv" parameter specifies the granularity used by compilers to better encode the instruction. It should be the same as the "conv" parameter specified for the subsequent gather intrinsic. Only those elements whose corresponding mask bit in "k" is set are loaded into cache. + +cachev := 0 +FOR j := 0 to 15 + i := j*32 + IF k[j] + addr := MEM[mv + index[j] * scale] + CASE hint OF + _MM_HINT_T0: PrefetchL1WithT0Hint(addr[i+31:i]) + _MM_HINT_T1: PrefetchL2WithT1Hint(addr[i+31:i]) + ESAC + FI +ENDFOR + + + +
immintrin.h
+
+ + Integer + KNCNI + Load + + + + + Loads the high-64-byte-aligned portion of the byte/word/doubleword stream starting at element-aligned address mt-64, up-converted depending on the value of "conv", and expanded into packed 32-bit integers in "dst". The initial values of "dst" are copied from "src". Only those converted doublewords that occur at or after the first 64-byte-aligned address following (mt-64) are loaded. Elements in the resulting vector that do not map to those doublewords are taken from "src". "hint" indicates to the processor whether the loaded data is non-temporal. + +UPCONVERT(address, offset, convertTo) { + CASE conv OF + _MM_UPCONV_EPI32_NONE: RETURN MEM[addr + 4*offset] + _MM_UPCONV_EPI32_UINT8: RETURN UInt8ToInt32(MEM[addr + offset]) + _MM_UPCONV_EPI32_SINT8: RETURN SInt8ToInt32(MEM[addr + offset]) + _MM_UPCONV_EPI32_UINT16: RETURN UInt16ToInt32(MEM[addr + 2*offset]) + _MM_UPCONV_EPI32_SINT16: RETURN SInt16ToInt32(MEM[addr + 2*offset]) + ESAC +} + +UPCONVERTSIZE(convertTo) { + CASE conv OF + _MM_UPCONV_EPI32_NONE: RETURN 4 + _MM_UPCONV_EPI32_UINT8: RETURN 1 + _MM_UPCONV_EPI32_SINT8: RETURN 1 + _MM_UPCONV_EPI32_UINT16: RETURN 2 + _MM_UPCONV_EPI32_SINT16: RETURN 2 + ESAC +} + +dst[511:0] := src[511:0] +loadOffset := 0 +foundNext64BytesBoundary := false +upSize := UPCONVERTSIZE(conv) +addr = mt-64 +FOR j := 0 to 15 + IF foundNext64BytesBoundary == false + IF (addr + (loadOffset + 1)*upSize % 64) == 0 + foundNext64BytesBoundary := true + FI + ELSE + i := j*32 + dst[i+31:i] := UPCONVERT(addr, loadOffset, conv) + FI + loadOffset := loadOffset + 1 +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Load + + + + + + Loads the high-64-byte-aligned portion of the byte/word/doubleword stream starting at element-aligned address mt-64, up-converted depending on the value of "conv", and expanded into packed 32-bit integers in "dst". The initial values of "dst" are copied from "src". Only those converted doublewords that occur at or after the first 64-byte-aligned address following (mt-64) are loaded. Elements in the resulting vector that do not map to those doublewords are taken from "src". "hint" indicates to the processor whether the loaded data is non-temporal. Elements are copied to "dst" according to element selector "k" (elements are skipped when the corresponding mask bit is not set). + +UPCONVERT(address, offset, convertTo) { + CASE conv OF + _MM_UPCONV_EPI32_NONE: RETURN MEM[addr + 4*offset] + _MM_UPCONV_EPI32_UINT8: RETURN UInt8ToInt32(MEM[addr + offset]) + _MM_UPCONV_EPI32_SINT8: RETURN SInt8ToInt32(MEM[addr + offset]) + _MM_UPCONV_EPI32_UINT16: RETURN UInt16ToInt32(MEM[addr + 2*offset]) + _MM_UPCONV_EPI32_SINT16: RETURN SInt16ToInt32(MEM[addr + 2*offset]) + ESAC +} + +UPCONVERTSIZE(convertTo) { + CASE conv OF + _MM_UPCONV_EPI32_NONE: RETURN 4 + _MM_UPCONV_EPI32_UINT8: RETURN 1 + _MM_UPCONV_EPI32_SINT8: RETURN 1 + _MM_UPCONV_EPI32_UINT16: RETURN 2 + _MM_UPCONV_EPI32_SINT16: RETURN 2 + ESAC +} + +dst[511:0] := src[511:0] +loadOffset := 0 +foundNext64BytesBoundary := false +upSize := UPCONVERTSIZE(conv) +addr = mt-64 +FOR j := 0 to 15 + IF k[j] + IF foundNext64BytesBoundary == false + IF (addr + (loadOffset + 1)*upSize % 64) == 0 + foundNext64BytesBoundary := true + FI + ELSE + i := j*32 + dst[i+31:i] := UPCONVERT(addr, loadOffset, conv) + FI + loadOffset := loadOffset + 1 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Load + + + + + Loads the low-64-byte-aligned portion of the byte/word/doubleword stream starting at element-aligned address mt, up-converted depending on the value of "conv", and expanded into packed 32-bit integers in "dst". The initial values of "dst" are copied from "src". Only those converted doublewords that occur before first 64-byte-aligned address following "mt" are loaded. Elements in the resulting vector that do not map to those doublewords are taken from "src". "hint" indicates to the processor whether the loaded data is non-temporal. + +UPCONVERT(address, offset, convertTo) { + CASE conv OF + _MM_UPCONV_EPI32_NONE: RETURN MEM[addr + 4*offset] + _MM_UPCONV_EPI32_UINT8: RETURN UInt8ToInt32(MEM[addr + offset]) + _MM_UPCONV_EPI32_SINT8: RETURN SInt8ToInt32(MEM[addr + offset]) + _MM_UPCONV_EPI32_UINT16: RETURN UInt16ToInt32(MEM[addr + 2*offset]) + _MM_UPCONV_EPI32_SINT16: RETURN SInt16ToInt32(MEM[addr + 2*offset]) + ESAC +} + +UPCONVERTSIZE(convertTo) { + CASE conv OF + _MM_UPCONV_EPI32_NONE: RETURN 4 + _MM_UPCONV_EPI32_UINT8: RETURN 1 + _MM_UPCONV_EPI32_SINT8: RETURN 1 + _MM_UPCONV_EPI32_UINT16: RETURN 2 + _MM_UPCONV_EPI32_SINT16: RETURN 2 + ESAC +} + +dst[511:0] := src[511:0] +loadOffset := 0 +upSize := UPCONVERTSIZE(conv) +addr = mt +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := UPCONVERT(addr, loadOffset, conv) + loadOffset := loadOffset + 1 + IF (mt + loadOffset * upSize) % 64 == 0 + break + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Load + + + + + + Loads the low-64-byte-aligned portion of the byte/word/doubleword stream starting at element-aligned address mt, up-converted depending on the value of "conv", and expanded into packed 32-bit integers in "dst". The initial values of "dst" are copied from "src". Only those converted doublewords that occur before first 64-byte-aligned address following "mt" are loaded. Elements in the resulting vector that do not map to those doublewords are taken from "src". "hint" indicates to the processor whether the loaded data is non-temporal. Elements are copied to "dst" according to element selector "k" (elements are skipped when the corresponding mask bit is not set). + +UPCONVERT(address, offset, convertTo) { + CASE conv OF + _MM_UPCONV_EPI32_NONE: RETURN MEM[addr + 4*offset] + _MM_UPCONV_EPI32_UINT8: RETURN UInt8ToInt32(MEM[addr + offset]) + _MM_UPCONV_EPI32_SINT8: RETURN SInt8ToInt32(MEM[addr + offset]) + _MM_UPCONV_EPI32_UINT16: RETURN UInt16ToInt32(MEM[addr + 2*offset]) + _MM_UPCONV_EPI32_SINT16: RETURN SInt16ToInt32(MEM[addr + 2*offset]) + ESAC +} + +UPCONVERTSIZE(convertTo) { + CASE conv OF + _MM_UPCONV_EPI32_NONE: RETURN 4 + _MM_UPCONV_EPI32_UINT8: RETURN 1 + _MM_UPCONV_EPI32_SINT8: RETURN 1 + _MM_UPCONV_EPI32_UINT16: RETURN 2 + _MM_UPCONV_EPI32_SINT16: RETURN 2 + ESAC +} + +dst[511:0] := src[511:0] +loadOffset := 0 +upSize := UPCONVERTSIZE(conv) +addr = mt +FOR j := 0 to 15 + IF k[j] + i := j*32 + dst[i+31:i] := UPCONVERT(addr, loadOffset, conv) + loadOffset := loadOffset + 1 + IF (mt + loadOffset * upSize) % 64 == 0 + break + FI + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Load + + + + + Loads the high-64-byte-aligned portion of the quadword stream starting at element-aligned address mt-64, up-converted depending on the value of "conv", and expanded into packed 64-bit integers in "dst". The initial values of "dst" are copied from "src". Only those converted quadwords that occur at or after the first 64-byte-aligned address following (mt-64) are loaded. Elements in the resulting vector that do not map to those quadwords are taken from "src". "hint" indicates to the processor whether the loaded data is non-temporal. + +UPCONVERT(address, offset, convertTo) { + CASE conv OF + _MM_UPCONV_EPI64_NONE: RETURN MEM[addr + 8*offset] + ESAC +} + +UPCONVERTSIZE(convertTo) { + CASE conv OF + _MM_UPCONV_EPI64_NONE: RETURN 8 + ESAC +} + +dst[511:0] := src[511:0] +loadOffset := 0 +foundNext64BytesBoundary := false +upSize := UPCONVERTSIZE(conv) +addr = mt-64 +FOR j := 0 to 7 + IF foundNext64BytesBoundary == false + IF (addr + (loadOffset + 1)*upSize) == 0 + foundNext64BytesBoundary := true + FI + ELSE + i := j*64 + dst[i+63:i] := UPCONVERT(addr, loadOffset, conv) + FI + loadOffset := loadOffset + 1 +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Load + + + + + + Loads the high-64-byte-aligned portion of the quadword stream starting at element-aligned address mt-64, up-converted depending on the value of "conv", and expanded into packed 64-bit integers in "dst". The initial values of "dst" are copied from "src". Only those converted quadwords that occur at or after the first 64-byte-aligned address following (mt-64) are loaded. Elements in the resulting vector that do not map to those quadwords are taken from "src". "hint" indicates to the processor whether the loaded data is non-temporal. Elements are copied to "dst" according to element selector "k" (elements are skipped when the corresponding mask bit is not set). + +UPCONVERT(address, offset, convertTo) { + CASE conv OF + _MM_UPCONV_EPI64_NONE: RETURN MEM[addr + 8*offset] + ESAC +} + +UPCONVERTSIZE(convertTo) { + CASE conv OF + _MM_UPCONV_EPI64_NONE: RETURN 8 + ESAC +} + +dst[511:0] := src[511:0] +loadOffset := 0 +foundNext64BytesBoundary := false +upSize := UPCONVERTSIZE(conv) +addr = mt-64 +FOR j := 0 to 7 + IF k[j] + IF foundNext64BytesBoundary == false + IF (addr + (loadOffset + 1)*upSize) == 0 + foundNext64BytesBoundary := true + FI + ELSE + i := j*64 + dst[i+63:i] := UPCONVERT(addr, loadOffset, conv) + FI + loadOffset := loadOffset + 1 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Load + + + + + Loads the low-64-byte-aligned portion of the quadword stream starting at element-aligned address mt, up-converted depending on the value of "conv", and expanded into packed 64-bit integers in "dst". The initial values of "dst" are copied from "src". Only those converted quad that occur before first 64-byte-aligned address following "mt" are loaded. Elements in the resulting vector that do not map to those quadwords are taken from "src". "hint" indicates to the processor whether the loaded data is non-temporal. + +UPCONVERT(address, offset, convertTo) { + CASE conv OF + _MM_UPCONV_EPI64_NONE: RETURN MEM[addr + 8*offset] + ESAC +} + +UPCONVERTSIZE(convertTo) { + CASE conv OF + _MM_UPCONV_EPI64_NONE: RETURN 8 + ESAC +} + +dst[511:0] := src[511:0] +loadOffset := 0 +upSize := UPCONVERTSIZE(conv) +addr = mt +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := UPCONVERT(addr, loadOffset, conv) + loadOffset := loadOffset + 1 + IF (addr + loadOffset*upSize % 64) == 0 + BREAK + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Load + + + + + + Loads the low-64-byte-aligned portion of the quadword stream starting at element-aligned address mt, up-converted depending on the value of "conv", and expanded into packed 64-bit integers in "dst". The initial values of "dst" are copied from "src". Only those converted quadwords that occur before first 64-byte-aligned address following "mt" are loaded. Elements in the resulting vector that do not map to those quadwords are taken from "src". "hint" indicates to the processor whether the loaded data is non-temporal. Elements are copied to "dst" according to element selector "k" (elements are skipped when the corresponding mask bit is not set). + +UPCONVERT(address, offset, convertTo) { + CASE conv OF + _MM_UPCONV_EPI64_NONE: RETURN MEM[addr + 8*offset] + ESAC +} + +UPCONVERTSIZE(convertTo) { + CASE conv OF + _MM_UPCONV_EPI64_NONE: RETURN 8 + ESAC +} + +dst[511:0] := src[511:0] +loadOffset := 0 +upSize := UPCONVERTSIZE(conv) +addr = mt +FOR j := 0 to 7 + IF k[j] + i := j*64 + dst[i+63:i] := UPCONVERT(addr, loadOffset, conv) + loadOffset := loadOffset + 1 + IF (addr + loadOffset*upSize % 64) == 0 + BREAK + FI + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Load + + + + + Loads the high-64-byte-aligned portion of the byte/word/doubleword stream starting at element-aligned address mt-64, up-converted depending on the value of "conv", and expanded into packed single-precision (32-bit) floating-point elements in "dst". The initial values of "dst" are copied from "src". Only those converted quadwords that occur at or after the first 64-byte-aligned address following (mt-64) are loaded. Elements in the resulting vector that do not map to those quadwords are taken from "src". "hint" indicates to the processor whether the loaded data is non-temporal. + +UPCONVERT(address, offset, convertTo) { + CASE conv OF + _MM_UPCONV_PS_NONE: RETURN MEM[addr + 4*offset] + _MM_UPCONV_PS_FLOAT16: RETURN Float16ToFloat32(MEM[addr + 4*offset]) + _MM_UPCONV_PS_UINT8: RETURN UInt8ToFloat32(MEM[addr + offset]) + _MM_UPCONV_PS_SINT8: RETURN SInt8ToFloat32(MEM[addr + offset]) + _MM_UPCONV_PS_UINT16: RETURN UInt16ToFloat32(MEM[addr + 2*offset]) + _MM_UPCONV_PS_SINT16: RETURN SInt16ToFloat32(MEM[addr + 2*offset]) + ESAC +} + +UPCONVERTSIZE(convertTo) { + CASE conv OF + _MM_UPCONV_PS_NONE: RETURN 4 + _MM_UPCONV_PS_FLOAT16: RETURN 2 + _MM_UPCONV_PS_UINT8: RETURN 1 + _MM_UPCONV_PS_SINT8: RETURN 1 + _MM_UPCONV_PS_UINT16: RETURN 2 + _MM_UPCONV_PS_SINT16: RETURN 2 + ESAC +} + +dst[511:0] := src[511:0] +loadOffset := 0 +foundNext64BytesBoundary := false +upSize := UPCONVERTSIZE(conv) +addr = mt-64 +FOR j := 0 to 15 + IF foundNext64BytesBoundary == false + IF (addr + (loadOffset + 1)*upSize % 64) == 0 + foundNext64BytesBoundary := true + FI + ELSE + i := j*32 + dst[i+31:i] := UPCONVERT(addr, loadOffset, conv) + FI + loadOffset := loadOffset + 1 +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Load + + + + + + Loads the high-64-byte-aligned portion of the byte/word/doubleword stream starting at element-aligned address mt-64, up-converted depending on the value of "conv", and expanded into packed single-precision (32-bit) floating-point elements in "dst". The initial values of "dst" are copied from "src". Only those converted quadwords that occur at or after the first 64-byte-aligned address following (mt-64) are loaded. Elements in the resulting vector that do not map to those quadwords are taken from "src". "hint" indicates to the processor whether the loaded data is non-temporal. Elements are copied to "dst" according to element selector "k" (elements are skipped when the corresponding mask bit is not set). + +UPCONVERT(address, offset, convertTo) { + CASE conv OF + _MM_UPCONV_PS_NONE: RETURN MEM[addr + 4*offset] + _MM_UPCONV_PS_FLOAT16: RETURN Float16ToFloat32(MEM[addr + 4*offset]) + _MM_UPCONV_PS_UINT8: RETURN UInt8ToFloat32(MEM[addr + offset]) + _MM_UPCONV_PS_SINT8: RETURN SInt8ToFloat32(MEM[addr + offset]) + _MM_UPCONV_PS_UINT16: RETURN UInt16ToFloat32(MEM[addr + 2*offset]) + _MM_UPCONV_PS_SINT16: RETURN SInt16ToFloat32(MEM[addr + 2*offset]) + ESAC +} + +UPCONVERTSIZE(convertTo) { + CASE conv OF + _MM_UPCONV_PS_NONE: RETURN 4 + _MM_UPCONV_PS_FLOAT16: RETURN 2 + _MM_UPCONV_PS_UINT8: RETURN 1 + _MM_UPCONV_PS_SINT8: RETURN 1 + _MM_UPCONV_PS_UINT16: RETURN 2 + _MM_UPCONV_PS_SINT16: RETURN 2 + ESAC +} + +dst[511:0] := src[511:0] +loadOffset := 0 +foundNext64BytesBoundary := false +upSize := UPCONVERTSIZE(conv) +addr = mt-64 +FOR j := 0 to 15 + IF k[j] + IF foundNext64BytesBoundary == false + IF (addr + (loadOffset + 1)*upSize % 64) == 0 + foundNext64BytesBoundary := true + FI + ELSE + i := j*32 + dst[i+31:i] := UPCONVERT(addr, loadOffset, conv) + FI + loadOffset := loadOffset + 1 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Load + + + + + Loads the low-64-byte-aligned portion of the byte/word/doubleword stream starting at element-aligned address mt, up-converted depending on the value of "conv", and expanded into packed single-precision (32-bit) floating-point elements in "dst". The initial values of "dst" are copied from "src". Only those converted doublewords that occur before first 64-byte-aligned address following "mt" are loaded. Elements in the resulting vector that do not map to those doublewords are taken from "src". "hint" indicates to the processor whether the loaded data is non-temporal. + +UPCONVERT(address, offset, convertTo) { + CASE conv OF + _MM_UPCONV_PS_NONE: RETURN MEM[addr + 4*offset] + _MM_UPCONV_PS_FLOAT16: RETURN Float16ToFloat32(MEM[addr + 4*offset]) + _MM_UPCONV_PS_UINT8: RETURN UInt8ToFloat32(MEM[addr + offset]) + _MM_UPCONV_PS_SINT8: RETURN SInt8ToFloat32(MEM[addr + offset]) + _MM_UPCONV_PS_UINT16: RETURN UInt16ToFloat32(MEM[addr + 2*offset]) + _MM_UPCONV_PS_SINT16: RETURN SInt16ToFloat32(MEM[addr + 2*offset]) + ESAC +} + +UPCONVERTSIZE(convertTo) { + CASE conv OF + _MM_UPCONV_PS_NONE: RETURN 4 + _MM_UPCONV_PS_FLOAT16: RETURN 2 + _MM_UPCONV_PS_UINT8: RETURN 1 + _MM_UPCONV_PS_SINT8: RETURN 1 + _MM_UPCONV_PS_UINT16: RETURN 2 + _MM_UPCONV_PS_SINT16: RETURN 2 + ESAC +} + +dst[511:0] := src[511:0] +loadOffset := 0 +upSize := UPCONVERTSIZE(conv) +addr = MEM[mt] +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := UPCONVERT(addr, loadOffset, conv) + loadOffset := loadOffset + 1 + IF (mt + loadOffset * upSize) % 64 == 0 + break + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Load + + + + + + Loads the low-64-byte-aligned portion of the byte/word/doubleword stream starting at element-aligned address mt, up-converted depending on the value of "conv", and expanded into packed single-precision (32-bit) floating-point elements in "dst". The initial values of "dst" are copied from "src". Only those converted doublewords that occur before first 64-byte-aligned address following "mt" are loaded. Elements in the resulting vector that do not map to those doublewords are taken from "src". "hint" indicates to the processor whether the loaded data is non-temporal. Elements are copied to "dst" according to element selector "k" (elements are skipped when the corresponding mask bit is not set). + +UPCONVERT(address, offset, convertTo) { + CASE conv OF + _MM_UPCONV_PS_NONE: RETURN MEM[addr + 4*offset] + _MM_UPCONV_PS_FLOAT16: RETURN Float16ToFloat32(MEM[addr + 4*offset]) + _MM_UPCONV_PS_UINT8: RETURN UInt8ToFloat32(MEM[addr + offset]) + _MM_UPCONV_PS_SINT8: RETURN SInt8ToFloat32(MEM[addr + offset]) + _MM_UPCONV_PS_UINT16: RETURN UInt16ToFloat32(MEM[addr + 2*offset]) + _MM_UPCONV_PS_SINT16: RETURN SInt16ToFloat32(MEM[addr + 2*offset]) + ESAC +} + +UPCONVERTSIZE(convertTo) { + CASE conv OF + _MM_UPCONV_PS_NONE: RETURN 4 + _MM_UPCONV_PS_FLOAT16: RETURN 2 + _MM_UPCONV_PS_UINT8: RETURN 1 + _MM_UPCONV_PS_SINT8: RETURN 1 + _MM_UPCONV_PS_UINT16: RETURN 2 + _MM_UPCONV_PS_SINT16: RETURN 2 + ESAC +} + +dst[511:0] := src[511:0] +loadOffset := 0 +upSize := UPCONVERTSIZE(conv) +addr = MEM[mt] +FOR j := 0 to 15 + IF k[j] + i := j*32 + dst[i+31:i] := UPCONVERT(addr, loadOffset, conv) + loadOffset := loadOffset + 1 + IF (mt + loadOffset * upSize) % 64 == 0 + break + FI + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Load + + + + + Loads the high-64-byte-aligned portion of the quadword stream starting at element-aligned address mt-64, up-converted depending on the value of "conv", and expanded into packed double-precision (64-bit) floating-point values in "dst". The initial values of "dst" are copied from "src". Only those converted quadwords that occur at or after the first 64-byte-aligned address following (mt-64) are loaded. Elements in the resulting vector that do not map to those quadwords are taken from "src". "hint" indicates to the processor whether the loaded data is non-temporal. + +UPCONVERT(address, offset, convertTo) { + CASE conv OF + _MM_UPCONV_PD_NONE: RETURN MEM[addr + 8*offset] + ESAC +} + +UPCONVERTSIZE(convertTo) { + CASE conv OF + _MM_UPCONV_PD_NONE: RETURN 8 + ESAC +} + +dst[511:0] := src[511:0] +loadOffset := 0 +foundNext64BytesBoundary := false +upSize := UPCONVERTSIZE(conv) +addr = mt-64 +FOR j := 0 to 7 + IF foundNext64BytesBoundary == false + IF (addr + (loadOffset + 1)*upSize) % 64 == 0 + foundNext64BytesBoundary := true + FI + ELSE + i := j*64 + dst[i+63:i] := UPCONVERT(addr, loadOffset, conv) + FI + loadOffset := loadOffset + 1 +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Load + + + + + + Loads the high-64-byte-aligned portion of the quadword stream starting at element-aligned address mt-64, up-converted depending on the value of "conv", and expanded into packed double-precision (64-bit) floating-point values in "dst". The initial values of "dst" are copied from "src". Only those converted quadwords that occur at or after the first 64-byte-aligned address following (mt-64) are loaded. Elements in the resulting vector that do not map to those quadwords are taken from "src". "hint" indicates to the processor whether the loaded data is non-temporal. Elements are copied to "dst" according to element selector "k" (elements are skipped when the corresponding mask bit is not set). + +UPCONVERT(address, offset, convertTo) { + CASE conv OF + _MM_UPCONV_PD_NONE: RETURN MEM[addr + 8*offset] + ESAC +} + +UPCONVERTSIZE(convertTo) { + CASE conv OF + _MM_UPCONV_PD_NONE: RETURN 8 + ESAC +} + +dst[511:0] := src[511:0] +loadOffset := 0 +foundNext64BytesBoundary := false +upSize := UPCONVERTSIZE(conv) +addr = mt-64 +FOR j := 0 to 7 + IF k[j] + IF foundNext64BytesBoundary == false + IF (addr + (loadOffset + 1)*upSize) % 64 == 0 + foundNext64BytesBoundary := true + FI + ELSE + i := j*64 + dst[i+63:i] := UPCONVERT(addr, loadOffset, conv) + FI + loadOffset := loadOffset + 1 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Load + + + + + Loads the low-64-byte-aligned portion of the quadword stream starting at element-aligned address mt, up-converted depending on the value of "conv", and expanded into packed double-precision (64-bit) floating-point elements in "dst". The initial values of "dst" are copied from "src". Only those converted quad that occur before first 64-byte-aligned address following "mt" are loaded. Elements in the resulting vector that do not map to those quadwords are taken from "src". "hint" indicates to the processor whether the loaded data is non-temporal. + +UPCONVERT(address, offset, convertTo) { + CASE conv OF + _MM_UPCONV_PD_NONE: RETURN MEM[addr + 8*offset] + ESAC +} + +UPCONVERTSIZE(convertTo) { + CASE conv OF + _MM_UPCONV_PD_NONE: RETURN 8 + ESAC +} + +dst[511:0] := src[511:0] +loadOffset := 0 +upSize := UPCONVERTSIZE(conv) +addr = mt +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := UPCONVERT(addr, loadOffset, conv) + loadOffset := loadOffset + 1 + IF (mt + loadOffset * upSize) % 64 == 0 + break + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Load + + + + + + Loads the low-64-byte-aligned portion of the quadword stream starting at element-aligned address mt, up-converted depending on the value of "conv", and expanded into packed double-precision (64-bit) floating-point elements in "dst". The initial values of "dst" are copied from "src". Only those converted quad that occur before first 64-byte-aligned address following "mt" are loaded. Elements in the resulting vector that do not map to those quadwords are taken from "src". "hint" indicates to the processor whether the loaded data is non-temporal. Elements are copied to "dst" according to element selector "k" (elemenst are skipped when the corresponding mask bit is not set). + +UPCONVERT(address, offset, convertTo) { + CASE conv OF + _MM_UPCONV_PD_NONE: RETURN MEM[addr + 8*offset] + ESAC +} + +UPCONVERTSIZE(convertTo) { + CASE conv OF + _MM_UPCONV_PD_NONE: RETURN 8 + ESAC +} + +dst[511:0] := src[511:0] +loadOffset := 0 +upSize := UPCONVERTSIZE(conv) +addr = mt +FOR j := 0 to 7 + IF k[j] + i := j*64 + dst[i+63:i] := UPCONVERT(addr, loadOffset, conv) + loadOffset := loadOffset + 1 + IF (mt + loadOffset * upSize) % 64 == 0 + break + FI + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Store + + + + + Down-converts and stores packed 32-bit integer elements of "v1" into a byte/word/doubleword stream according to "conv" at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elemetns of the stream that map at or after the first 64-byte-aligned address following (m5-64)). "hint" indicates to the processor whether the data is non-temporal. + +DOWNCONVERT(element, convertTo) { + CASE converTo OF + _MM_DOWNCONV_EPI32_NONE: RETURN element[i+31:i] + _MM_DOWNCONV_EPI32_UINT8: RETURN UInt32ToUInt8(element[i+31:i]) + _MM_DOWNCONV_EPI32_SINT8: RETURN SInt32ToSInt8(element[i+31:i]) + _MM_DOWNCONV_EPI32_UINT16: RETURN UInt32ToUInt16(element[i+31:i]) + _MM_DOWNCONV_EPI32_SINT16: RETURN SInt32ToSInt16(element[i+31:i]) + ESAC +} + +DOWNCONVERTSIZE(convertTo) { + CASE converTo OF + _MM_DOWNCONV_EPI32_NONE: RETURN 4 + _MM_DOWNCONV_EPI32_UINT8: RETURN 1 + _MM_DOWNCONV_EPI32_SINT8: RETURN 1 + _MM_DOWNCONV_EPI32_UINT16: RETURN 2 + _MM_DOWNCONV_EPI32_SINT16: RETURN 2 + ESAC +} + +storeOffset := 0 +foundNext64BytesBoundary := false +downSize := DOWNCONVERTSIZE(conv) +addr = mt-64 +FOR j := 0 to 15 + IF foundNext64BytesBoundary == false + IF ((addr + (storeOffset + 1)*downSize) % 64) == 0 + foundNext64BytesBoundary = true + FI + ELSE + i := j*32 + tmp := DOWNCONVERT(v1[i+31:i], conv) + storeAddr := addr + storeOffset * downSize + CASE downSize OF + 4: MEM[storeAddr] := tmp[31:0] + 2: MEM[storeAddr] := tmp[15:0] + 1: MEM[storeAddr] := tmp[7:0] + ESAC + FI + storeOffset := storeOffset + 1 +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Store + + + + + + Down-converts and stores packed 32-bit integer elements of "v1" into a byte/word/doubleword stream according to "conv" at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elemetns of the stream that map at or after the first 64-byte-aligned address following (m5-64)). "hint" indicates to the processor whether the data is non-temporal. Elements are stored to memory according to element selector "k" (elements are skipped when the corresonding mask bit is not set). + +DOWNCONVERT(element, convertTo) { + CASE converTo OF + _MM_DOWNCONV_EPI32_NONE: RETURN element[i+31:i] + _MM_DOWNCONV_EPI32_UINT8: RETURN UInt32ToUInt8(element[i+31:i]) + _MM_DOWNCONV_EPI32_SINT8: RETURN SInt32ToSInt8(element[i+31:i]) + _MM_DOWNCONV_EPI32_UINT16: RETURN UInt32ToUInt16(element[i+31:i]) + _MM_DOWNCONV_EPI32_SINT16: RETURN SInt32ToSInt16(element[i+31:i]) + ESAC +} + +DOWNCONVERTSIZE(convertTo) { + CASE converTo OF + _MM_DOWNCONV_EPI32_NONE: RETURN 4 + _MM_DOWNCONV_EPI32_UINT8: RETURN 1 + _MM_DOWNCONV_EPI32_SINT8: RETURN 1 + _MM_DOWNCONV_EPI32_UINT16: RETURN 2 + _MM_DOWNCONV_EPI32_SINT16: RETURN 2 + ESAC +} + +storeOffset := 0 +foundNext64BytesBoundary := false +downSize := DOWNCONVERTSIZE(conv) +addr = mt-64 +FOR j := 0 to 15 + IF k[j] + IF foundNext64BytesBoundary == false + IF ((addr + (storeOffset + 1)*downSize) % 64) == 0 + foundNext64BytesBoundary = true + FI + ELSE + i := j*32 + tmp := DOWNCONVERT(v1[i+31:i], conv) + storeAddr := addr + storeOffset * downSize + CASE downSize OF + 4: MEM[storeAddr] := tmp[31:0] + 2: MEM[storeAddr] := tmp[15:0] + 1: MEM[storeAddr] := tmp[7:0] + ESAC + FI + storeOffset := storeOffset + 1 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Store + + + + + Down-converts and stores packed 32-bit integer elements of "v1" into a byte/word/doubleword stream according to "conv" at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address follwing "mt"). "hint" indicates to the processor whether the data is non-temporal. + +DOWNCONVERT(element, convertTo) { + CASE converTo OF + _MM_DOWNCONV_EPI32_NONE: RETURN element[i+31:i] + _MM_DOWNCONV_EPI32_UINT8: RETURN UInt32ToUInt8(element[i+31:i]) + _MM_DOWNCONV_EPI32_SINT8: RETURN SInt32ToSInt8(element[i+31:i]) + _MM_DOWNCONV_EPI32_UINT16: RETURN UInt32ToUInt16(element[i+31:i]) + _MM_DOWNCONV_EPI32_SINT16: RETURN SInt32ToSInt16(element[i+31:i]) + ESAC +} + +DOWNCONVERTSIZE(convertTo) { + CASE converTo OF + _MM_DOWNCONV_EPI32_NONE: RETURN 4 + _MM_DOWNCONV_EPI32_UINT8: RETURN 1 + _MM_DOWNCONV_EPI32_SINT8: RETURN 1 + _MM_DOWNCONV_EPI32_UINT16: RETURN 2 + _MM_DOWNCONV_EPI32_SINT16: RETURN 2 + ESAC +} + +storeOffset := 0 +downSize := DOWNCONVERTSIZE(conv) +addr = mt +FOR j := 0 to 15 + i := j*32 + tmp := DOWNCONVERT(v1[i+31:i], conv) + storeAddr := addr + storeOffset * downSize + CASE downSize OF + 4: MEM[storeAddr] := tmp[31:0] + 2: MEM[storeAddr] := tmp[15:0] + 1: MEM[storeAddr] := tmp[7:0] + ESAC + storeOffset := storeOffset + 1 + IF ((addr + storeOffset * downSize) % 64) == 0 + BREAK + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Store + + + + + + Down-converts and stores packed 32-bit integer elements of "v1" into a byte/word/doubleword stream according to "conv" at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address follwing "mt"). "hint" indicates to the processor whether the data is non-temporal. Elements are written to memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set). + +DOWNCONVERT(element, convertTo) { + CASE converTo OF + _MM_DOWNCONV_EPI32_NONE: RETURN element[i+31:i] + _MM_DOWNCONV_EPI32_UINT8: RETURN UInt32ToUInt8(element[i+31:i]) + _MM_DOWNCONV_EPI32_SINT8: RETURN SInt32ToSInt8(element[i+31:i]) + _MM_DOWNCONV_EPI32_UINT16: RETURN UInt32ToUInt16(element[i+31:i]) + _MM_DOWNCONV_EPI32_SINT16: RETURN SInt32ToSInt16(element[i+31:i]) + ESAC +} + +DOWNCONVERTSIZE(convertTo) { + CASE converTo OF + _MM_DOWNCONV_EPI32_NONE: RETURN 4 + _MM_DOWNCONV_EPI32_UINT8: RETURN 1 + _MM_DOWNCONV_EPI32_SINT8: RETURN 1 + _MM_DOWNCONV_EPI32_UINT16: RETURN 2 + _MM_DOWNCONV_EPI32_SINT16: RETURN 2 + ESAC +} + +storeOffset := 0 +downSize := DOWNCONVERTSIZE(conv) +addr = mt +FOR j := 0 to 15 + IF k[j] + i := j*32 + tmp := DOWNCONVERT(v1[i+31:i], conv) + storeAddr := addr + storeOffset * downSize + CASE downSize OF + 4: MEM[storeAddr] := tmp[31:0] + 2: MEM[storeAddr] := tmp[15:0] + 1: MEM[storeAddr] := tmp[7:0] + ESAC + storeOffset := storeOffset + 1 + IF ((addr + storeOffset * downSize) % 64) == 0 + BREAK + FI + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Store + + + + + Down-converts and stores packed 64-bit integer elements of "v1" into a quadword stream according to "conv" at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elemetns of the stream that map at or after the first 64-byte-aligned address following (m5-64)). "hint" indicates to the processor whether the data is non-temporal. + +DOWNCONVERT(element, convertTo) { + CASE converTo OF + _MM_UPCONV_EPI64_NONE: RETURN element[i+63:i] + ESAC +} + +DOWNCONVERTSIZE(convertTo) { + CASE converTo OF + _MM_UPCONV_EPI64_NONE: RETURN 8 + ESAC +} + +storeOffset := 0 +foundNext64BytesBoundary := false +downSize := DOWNCONVERTSIZE(conv) +addr = mt-64 +FOR j := 0 to 7 + IF foundNext64BytesBoundary == false + IF ((addr + (storeOffset + 1)*downSize) % 64) == 0 + foundNext64BytesBoundary = true + FI + ELSE + i := j*64 + tmp := DOWNCONVERT(v1[i+63:i], conv) + storeAddr := addr + storeOffset * downSize + CASE downSize OF + 8: MEM[storeAddr] := tmp[63:0] + ESAC + FI + storeOffset := storeOffset + 1 +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Store + + + + + + Down-converts and stores packed 64-bit integer elements of "v1" into a quadword stream according to "conv" at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elemetns of the stream that map at or after the first 64-byte-aligned address following (mt-64)). "hint" indicates to the processor whether the data is non-temporal. Elements are stored to memory according to element selector "k" (elements are skipped when the corresonding mask bit is not set). + +DOWNCONVERT(element, convertTo) { + CASE converTo OF + _MM_UPCONV_EPI64_NONE: RETURN element[i+63:i] + ESAC +} + +DOWNCONVERTSIZE(convertTo) { + CASE converTo OF + _MM_UPCONV_EPI64_NONE: RETURN 8 + ESAC +} + +storeOffset := 0 +foundNext64BytesBoundary := false +downSize := DOWNCONVERTSIZE(conv) +addr = mt-64 +FOR j := 0 to 7 + IF k[j] + IF foundNext64BytesBoundary == false + IF ((addr + (storeOffset + 1)*downSize) % 64) == 0 + foundNext64BytesBoundary = true + FI + ELSE + i := j*64 + tmp := DOWNCONVERT(v1[i+63:i], conv) + storeAddr := addr + storeOffset * downSize + CASE downSize OF + 8: MEM[storeAddr] := tmp[63:0] + ESAC + FI + storeOffset := storeOffset + 1 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Store + + + + + Down-converts and stores packed 64-bit integer elements of "v1" into a quadword stream according to "conv" at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address follwing "mt"). "hint" indicates to the processor whether the data is non-temporal. + +DOWNCONVERT(element, convertTo) { + CASE converTo OF + _MM_UPCONV_EPI64_NONE: RETURN element[i+63:i] + ESAC +} + +DOWNCONVERTSIZE(convertTo) { + CASE converTo OF + _MM_UPCONV_EPI64_NONE: RETURN 8 + ESAC +} + +storeOffset := 0 +downSize := DOWNCONVERTSIZE(conv) +addr = mt +FOR j := 0 to 7 + i := j*63 + tmp := DOWNCONVERT(v1[i+63:i], conv) + storeAddr := addr + storeOffset * downSize + CASE downSize OF + 8: MEM[storeAddr] := tmp[63:0] + ESAC + storeOffset := storeOffset + 1 + IF ((addr + storeOffset * downSize) % 64) == 0 + BREAK + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Store + + + + + + Down-converts and stores packed 64-bit integer elements of "v1" into a quadword stream according to "conv" at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address follwing "mt"). "hint" indicates to the processor whether the data is non-temporal. Elements are stored to memory according to element selector "k" (elements are skipped whent he corresponding mask bit is not set). + +DOWNCONVERT(element, convertTo) { + CASE converTo OF + _MM_UPCONV_EPI64_NONE: RETURN element[i+63:i] + ESAC +} + +DOWNCONVERTSIZE(convertTo) { + CASE converTo OF + _MM_UPCONV_EPI64_NONE: RETURN 8 + ESAC +} + +storeOffset := 0 +downSize := DOWNCONVERTSIZE(conv) +addr = mt +FOR j := 0 to 7 + IF k[j] + i := j*63 + tmp := DOWNCONVERT(v1[i+63:i], conv) + storeAddr := addr + storeOffset * downSize + CASE downSize OF + 8: MEM[storeAddr] := tmp[63:0] + ESAC + storeOffset := storeOffset + 1 + IF ((addr + storeOffset * downSize) % 64) == 0 + BREAK + FI + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Store + + + + + Down-converts and stores packed single-precision (32-bit) floating-point elements of "v1" into a byte/word/doubleword stream according to "conv" at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elemetns of the stream that map at or after the first 64-byte-aligned address following (m5-64)). "hint" indicates to the processor whether the data is non-temporal. + +DOWNCONVERT(element, convertTo) { + CASE converTo OF + _MM_UPCONV_PS_NONE: RETURN element[i+31:i] + _MM_UPCONV_PS_FLOAT16: RETURN Float32ToFloat16(element[i+31:i]) + _MM_UPCONV_PS_UINT8: RETURN UInt32ToUInt8(element[i+31:i]) + _MM_UPCONV_PS_SINT8: RETURN SInt32ToSInt8(element[i+31:i]) + _MM_UPCONV_PS_UINT16: RETURN UInt32ToUInt16(element[i+31:i]) + _MM_UPCONV_PS_SINT16: RETURN SInt32ToSInt16(element[i+31:i]) + ESAC +} + +DOWNCONVERTSIZE(convertTo) { + CASE converTo OF + _MM_UPCONV_PS_NONE: RETURN 4 + _MM_UPCONV_PS_FLOAT16: RETURN 2 + _MM_UPCONV_PS_UINT8: RETURN 1 + _MM_UPCONV_PS_SINT8: RETURN 1 + _MM_UPCONV_PS_UINT16: RETURN 2 + _MM_UPCONV_PS_SINT16: RETURN 2 + ESAC +} + +storeOffset := 0 +foundNext64BytesBoundary := false +downSize := DOWNCONVERTSIZE(conv) +addr = mt-64 +FOR j := 0 to 15 + IF foundNext64BytesBoundary == false + IF ((addr + (storeOffset + 1)*downSize) % 64) == 0 + foundNext64BytesBoundary = true + FI + ELSE + i := j*32 + tmp := DOWNCONVERT(v1[i+31:i], conv) + storeAddr := addr + storeOffset * downSize + CASE downSize OF + 4: MEM[storeAddr] := tmp[31:0] + 2: MEM[storeAddr] := tmp[15:0] + 1: MEM[storeAddr] := tmp[7:0] + ESAC + FI + storeOffset := storeOffset + 1 +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Store + + + + + + Down-converts and stores packed single-precision (32-bit) floating-point elements of "v1" into a byte/word/doubleword stream according to "conv" at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elemetns of the stream that map at or after the first 64-byte-aligned address following (m5-64)). "hint" indicates to the processor whether the data is non-temporal. Elements are stored to memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set). + +DOWNCONVERT(element, convertTo) { + CASE converTo OF + _MM_UPCONV_PS_NONE: RETURN element[i+31:i] + _MM_UPCONV_PS_FLOAT16: RETURN Float32ToFloat16(element[i+31:i]) + _MM_UPCONV_PS_UINT8: RETURN UInt32ToUInt8(element[i+31:i]) + _MM_UPCONV_PS_SINT8: RETURN SInt32ToSInt8(element[i+31:i]) + _MM_UPCONV_PS_UINT16: RETURN UInt32ToUInt16(element[i+31:i]) + _MM_UPCONV_PS_SINT16: RETURN SInt32ToSInt16(element[i+31:i]) + ESAC +} + +DOWNCONVERTSIZE(convertTo) { + CASE converTo OF + _MM_UPCONV_PS_NONE: RETURN 4 + _MM_UPCONV_PS_FLOAT16: RETURN 2 + _MM_UPCONV_PS_UINT8: RETURN 1 + _MM_UPCONV_PS_SINT8: RETURN 1 + _MM_UPCONV_PS_UINT16: RETURN 2 + _MM_UPCONV_PS_SINT16: RETURN 2 + ESAC +} + +storeOffset := 0 +foundNext64BytesBoundary := false +downSize := DOWNCONVERTSIZE(conv) +addr = mt-64 +FOR j := 0 to 15 + IF k[j] + IF foundNext64BytesBoundary == false + IF ((addr + (storeOffset + 1)*downSize) % 64) == 0 + foundNext64BytesBoundary = true + FI + ELSE + i := j*32 + tmp := DOWNCONVERT(v1[i+31:i], conv) + storeAddr := addr + storeOffset * downSize + CASE downSize OF + 4: MEM[storeAddr] := tmp[31:0] + 2: MEM[storeAddr] := tmp[15:0] + 1: MEM[storeAddr] := tmp[7:0] + ESAC + FI + storeOffset := storeOffset + 1 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Store + + + + + Down-converts and stores packed single-precision (32-bit) floating-point elements of "v1" into a byte/word/doubleword stream according to "conv" at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address follwing "mt"). "hint" indicates to the processor whether the data is non-temporal. + +DOWNCONVERT(element, convertTo) { + CASE converTo OF + _MM_UPCONV_PS_NONE: RETURN element[i+31:i] + _MM_UPCONV_PS_FLOAT16: RETURN Float32ToFloat16(element[i+31:i]) + _MM_UPCONV_PS_UINT8: RETURN UInt32ToUInt8(element[i+31:i]) + _MM_UPCONV_PS_SINT8: RETURN SInt32ToSInt8(element[i+31:i]) + _MM_UPCONV_PS_UINT16: RETURN UInt32ToUInt16(element[i+31:i]) + _MM_UPCONV_PS_SINT16: RETURN SInt32ToSInt16(element[i+31:i]) + ESAC +} + +DOWNCONVERTSIZE(convertTo) { + CASE converTo OF + _MM_UPCONV_PS_NONE: RETURN 4 + _MM_UPCONV_PS_FLOAT16: RETURN 2 + _MM_UPCONV_PS_UINT8: RETURN 1 + _MM_UPCONV_PS_SINT8: RETURN 1 + _MM_UPCONV_PS_UINT16: RETURN 2 + _MM_UPCONV_PS_SINT16: RETURN 2 + ESAC +} + +storeOffset := 0 +downSize := DOWNCONVERTSIZE(conv) +addr = mt +FOR j := 0 to 15 + i := j*32 + tmp := DOWNCONVERT(v1[i+31:i], conv) + storeAddr := addr + storeOffset * downSize + CASE downSize OF + 4: MEM[storeAddr] := tmp[31:0] + 2: MEM[storeAddr] := tmp[15:0] + 1: MEM[storeAddr] := tmp[7:0] + ESAC + storeOffset := storeOffset + 1 + IF ((addr + storeOffset * downSize) % 64) == 0 + BREAK + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Store + + + + + + Down-converts and stores packed single-precision (32-bit) floating-point elements of "v1" into a byte/word/doubleword stream according to "conv" at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address follwing "mt"). "hint" indicates to the processor whether the data is non-temporal. Elements are stored to memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set). + +DOWNCONVERT(element, convertTo) { + CASE converTo OF + _MM_UPCONV_PS_NONE: RETURN element[i+31:i] + _MM_UPCONV_PS_FLOAT16: RETURN Float32ToFloat16(element[i+31:i]) + _MM_UPCONV_PS_UINT8: RETURN UInt32ToUInt8(element[i+31:i]) + _MM_UPCONV_PS_SINT8: RETURN SInt32ToSInt8(element[i+31:i]) + _MM_UPCONV_PS_UINT16: RETURN UInt32ToUInt16(element[i+31:i]) + _MM_UPCONV_PS_SINT16: RETURN SInt32ToSInt16(element[i+31:i]) + ESAC +} + +DOWNCONVERTSIZE(convertTo) { + CASE converTo OF + _MM_UPCONV_PS_NONE: RETURN 4 + _MM_UPCONV_PS_FLOAT16: RETURN 2 + _MM_UPCONV_PS_UINT8: RETURN 1 + _MM_UPCONV_PS_SINT8: RETURN 1 + _MM_UPCONV_PS_UINT16: RETURN 2 + _MM_UPCONV_PS_SINT16: RETURN 2 + ESAC +} + +storeOffset := 0 +downSize := DOWNCONVERTSIZE(conv) +addr = mt +FOR j := 0 to 15 + IF k[j] + i := j*32 + tmp := DOWNCONVERT(v1[i+31:i], conv) + storeAddr := addr + storeOffset * downSize + CASE downSize OF + 4: MEM[storeAddr] := tmp[31:0] + 2: MEM[storeAddr] := tmp[15:0] + 1: MEM[storeAddr] := tmp[7:0] + ESAC + storeOffset := storeOffset + 1 + IF ((addr + storeOffset * downSize) % 64) == 0 + BREAK + FI + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Store + + + + + Down-converts and stores packed double-precision (64-bit) floating-point elements of "v1" into a quadword stream according to "conv" at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elemetns of the stream that map at or after the first 64-byte-aligned address following (m5-64)). "hint" indicates to the processor whether the data is non-temporal. + +DOWNCONVERT(element, convertTo) { + CASE converTo OF + _MM_UPCONV_PD_NONE: RETURN element[i+63:i] + ESAC +} + +DOWNCONVERTSIZE(convertTo) { + CASE converTo OF + _MM_UPCONV_PD_NONE: RETURN 8 + ESAC +} + +storeOffset := 0 +foundNext64BytesBoundary := false +downSize := DOWNCONVERTSIZE(conv) +addr = mt-64 +FOR j := 0 to 7 + IF foundNext64BytesBoundary == false + IF ((addr + (storeOffset + 1)*downSize) % 64) == 0 + foundNext64BytesBoundary = true + FI + ELSE + i := j*64 + tmp := DOWNCONVERT(v1[i+63:i], conv) + storeAddr := addr + storeOffset * downSize + CASE downSize OF + 8: MEM[storeAddr] := tmp[63:0] + ESAC + FI + storeOffset := storeOffset + 1 +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Store + + + + + + Down-converts and stores packed double-precision (64-bit) floating-point elements of "v1" into a quadword stream according to "conv" at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elemetns of the stream that map at or after the first 64-byte-aligned address following (m5-64)). "hint" indicates to the processor whether the data is non-temporal. Elements are stored to memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set). + +DOWNCONVERT(element, convertTo) { + CASE converTo OF + _MM_UPCONV_PD_NONE: RETURN element[i+63:i] + ESAC +} + +DOWNCONVERTSIZE(convertTo) { + CASE converTo OF + _MM_UPCONV_PD_NONE: RETURN 8 + ESAC +} + +storeOffset := 0 +foundNext64BytesBoundary := false +downSize := DOWNCONVERTSIZE(conv) +addr = mt-64 +FOR j := 0 to 7 + IF k[j] + IF foundNext64BytesBoundary == false + IF ((addr + (storeOffset + 1)*downSize) % 64) == 0 + foundNext64BytesBoundary = true + FI + ELSE + i := j*64 + tmp := DOWNCONVERT(v1[i+63:i], conv) + storeAddr := addr + storeOffset * downSize + CASE downSize OF + 8: MEM[storeAddr] := tmp[63:0] + ESAC + FI + storeOffset := storeOffset + 1 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Store + + + + + Down-converts and stores packed double-precision (64-bit) floating-point elements of "v1" into a quadword stream according to "conv" at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address follwing "mt"). "hint" indicates to the processor whether the data is non-temporal. + +DOWNCONVERT(element, convertTo) { + CASE converTo OF + _MM_UPCONV_PD_NONE: RETURN element[i+63:i] + ESAC +} + +DOWNCONVERTSIZE(convertTo) { + CASE converTo OF + _MM_UPCONV_PD_NONE: RETURN 8 + ESAC +} + +storeOffset := 0 +downSize := DOWNCONVERTSIZE(conv) +addr = mt +FOR j := 0 to 7 + i := j*63 + tmp := DOWNCONVERT(v1[i+63:i], conv) + storeAddr := addr + storeOffset * downSize + CASE downSize OF + 8: MEM[storeAddr] := tmp[63:0] + ESAC + storeOffset := storeOffset + 1 + IF ((addr + storeOffset * downSize) % 64) == 0 + BREAK + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Store + + + + + + Down-converts and stores packed double-precision (64-bit) floating-point elements of "v1" into a quadword stream according to "conv" at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address follwing "mt"). "hint" indicates to the processor whether the data is non-temporal. Elements are stored to memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set). + +DOWNCONVERT(element, convertTo) { + CASE converTo OF + _MM_UPCONV_PD_NONE: RETURN element[i+63:i] + ESAC +} + +DOWNCONVERTSIZE(convertTo) { + CASE converTo OF + _MM_UPCONV_PD_NONE: RETURN 8 + ESAC +} + +storeOffset := 0 +downSize := DOWNCONVERTSIZE(conv) +addr = mt +FOR j := 0 to 7 + IF k[j] + i := j*63 + tmp := DOWNCONVERT(v1[i+63:i], conv) + storeAddr := addr + storeOffset * downSize + CASE downSize OF + 8: MEM[storeAddr] := tmp[63:0] + ESAC + storeOffset := storeOffset + 1 + IF ((addr + storeOffset * downSize) % 64) == 0 + BREAK + FI + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Convert + + Performs an element-by-element conversion of packed double-precision (64-bit) floating-point elements in "v2" to single-precision (32-bit) floating-point elements and stores them in "dst". The elements are stored in the lower half of the results vector, while the remaining upper half locations are set to 0. + +FOR j := 0 to 7 + i := j*64 + k := j*32 + dst[k+31:k] := Float64ToFloat32(v2[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Convert + + + + Performs an element-by-element conversion of packed double-precision (64-bit) floating-point elements in "v2" to single-precision (32-bit) floating-point elements and stores them in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The elements are stored in the lower half of the results vector, while the remaining upper half locations are set to 0. + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[l+31:l] := Float64ToFloat32(v2[i+63:i]) + ELSE + dst[l+31:l] := src[l+31:l] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Load + + + + Loads 8 64-bit integer elements from memory starting at location "mv" at packed 32-bit integer indices stored in the lower half of "index" scaled by "scale" and stores them in "dst". + +FOR j := 0 to 7 + i := j*64 + addr := MEM[mv + index[j] * scale] + dst[i+63:i] := addr[i+63:i] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Load + + + + + + Loads 8 64-bit integer elements from memory starting at location "mv" at packed 32-bit integer indices stored in the lower half of "index" scaled by "scale" and stores them in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + addr := MEM[mv + index[j] * scale] + dst[i+63:i] := addr[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Load + + + + Loads 8 double-precision (64-bit) floating-point elements stored at memory locations starting at location "mv" at packed 32-bit integer indices stored in the lower half of "index" scaled by "scale" them in "dst". + +FOR j := 0 to 7 + i := j*64 + addr := MEM[mv + index[j] * scale] + dst[i+63:i] := addr[i+63:i] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Load + + + + + + Loads 8 double-precision (64-bit) floating-point elements from memory starting at location "mv" at packed 32-bit integer indices stored in the lower half of "index" scaled by "scale" into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + addr := MEM[mv + index[j] * scale] + dst[i+63:i] := addr[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512PF/KNCNI + Load + + + + + Prefetches 16 single-precision (32-bit) floating-point elements in memory starting at location "mv" at packed 32-bit integer indices stored in "index" scaled by "scale". The "hint" parameter may be 1 (_MM_HINT_T0) for prefetching to L1 cache, or 2 (_MM_HINT_T1) for prefetching to L2 cache. + +cachev := 0 +FOR j := 0 to 15 + i := j*32 + addr := MEM[mv + index[j] * scale] + cachev[i+31:i] := addr[i+63:i] +ENDFOR + + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Store + + + + + Stores 8 packed double-precision (64-bit) floating-point elements in "v1" and to memory locations starting at location "mv" at packed 32-bit integer indices stored in "index" scaled by "scale". + +FOR j := 0 to 7 + addr := MEM[mv + index[j] * scale] + i := j*64 + addr[i+63:i] := v1[k+63:j] +ENDFOR + + +
immintrin.h
+
+ + Floating Point + AVX512F/KNCNI + Store + + + + + + Stores 8 packed double-precision (64-bit) floating-point elements in "v1" to memory locations starting at location "mv" at packed 32-bit integer indices stored in "index" scaled by "scale". Only those elements whose corresponding mask bit is set in writemask "k" are written to memory. + +FOR j := 0 to 7 + IF k[j] + addr := MEM[mv + index[j] * scale] + i := j*64 + addr[i+63:i] := v1[k+63:j] + FI +ENDFOR + + +
immintrin.h
+
+ + Integer + KNCNI + Store + + + + + Stores 8 packed 64-bit integer elements located in "v1" and stores them in memory locations starting at location "mv" at packed 32-bit integer indices stored in "index" scaled by "scale". + +FOR j := 0 to 7 + addr := MEM[mv + index[j] * scale] + i := j*64 + addr[i+63:i] := v1[k+63:j] +ENDFOR + + +
immintrin.h
+
+ + Integer + KNCNI + Store + + + + + + Stores 8 packed 64-bit integer elements located in "v1" and stores them in memory locations starting at location "mv" at packed 32-bit integer indices stored in "index" scaled by "scale" using writemask "k" (elements whose corresponding mask bit is not set are not written to memory). + +FOR j := 0 to 7 + IF k[j] + addr := MEM[mv + index[j] * scale] + addr[i+63:i] := v1[i+63:i] + FI +ENDFOR + + +
immintrin.h
+
+ + Floating Point + AVX512PF/KNCNI + Store + + + + + Prefetches 16 single-precision (32-bit) floating-point elements in memory starting at location "mv" at packed 32-bit integer indices stored in "index" scaled by "scale". The "hint" parameter may be 1 (_MM_HINT_T0) for prefetching to L1 cache, or 2 (_MM_HINT_T1) for prefetching to L2 cache. + +FOR j := 0 to 15 + i := j*32 + addr := MEM[mv + index[j] * scale] + CASE hint OF + _MM_HINT_T0: PrefetchL1WithT0Hint(addr[i+31:i]) + _MM_HINT_T1: PrefetchL2WithT1Hint(addr[i+31:i]) + _MM_HINT_T2: PrefetchL2WithT1HintNonTemporal(addr[i+31:i]) + _MM_HINT_NTA: PrefetchL1WithT0HintNonTemporal(addr[i+31:i]) + ESAC +ENDFOR + + + +
immintrin.h
+
+ + Floating Point + AVX512PF/KNCNI + Store + + + + + + Prefetches 16 single-precision (32-bit) floating-point elements in memory starting at location "mv" at packed 32-bit integer indices stored in "index" scaled by "scale". The "hint" parameter may be 1 (_MM_HINT_T0) for prefetching to L1 cache, or 2 (_MM_HINT_T1) for prefetching to L2 cache. Only those elements whose corresponding mask bit in "k" is set are loaded into the desired cache. + +FOR j := 0 to 15 + i := j*32 + IF k[j] + addr := MEM[mv + index[j] * scale] + CASE hint OF + _MM_HINT_T0: PrefetchL1WithT0Hint(addr[i+31:i]) + _MM_HINT_T1: PrefetchL2WithT1Hint(addr[i+31:i]) + _MM_HINT_T2: PrefetchL2WithT1HintNonTemporal(addr[i+31:i]) + _MM_HINT_NTA: PrefetchL1WithT0HintNonTemporal(addr[i+31:i]) + FI +ENDFOR + + + +
immintrin.h
+
+ + Integer + KNCNI + Load + + + Loads the high-64-byte-aligned portion of the byte/word/doubleword stream starting at element-aligned address mt-64 and expands them into packed 32-bit integers in "dst". The initial values of "dst" are copied from "src". Only those converted doublewords that occur at or after the first 64-byte-aligned address following (mt-64) are loaded. Elements in the resulting vector that do not map to those doublewords are taken from "src". + +dst[511:0] := src[511:0] +loadOffset := 0 +foundNext64BytesBoundary := false +addr = mt-64 +FOR j := 0 to 15 + IF foundNext64BytesBoundary == false + IF (addr + (loadOffset + 1)*4 % 64) == 0 + foundNext64BytesBoundary := true + FI + ELSE + i := j*32 + tmp := MEM[addr + loadOffset*4] + dst[i+31:i] := tmp[i+31:i] + FI + loadOffset := loadOffset + 1 +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Load + + + + Loads the high-64-byte-aligned portion of the byte/word/doubleword stream starting at element-aligned address mt-64 and expands them into packed 32-bit integers in "dst". The initial values of "dst" are copied from "src". Only those converted doublewords that occur at or after the first 64-byte-aligned address following (mt-64) are loaded. Elements in the resulting vector that do not map to those doublewords are taken from "src". Elements are loaded from memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set). + +dst[511:0] := src[511:0] +loadOffset := 0 +foundNext64BytesBoundary := false +addr = mt-64 +FOR j := 0 to 15 + IF k[j] + IF foundNext64BytesBoundary == false + IF (addr + (loadOffset + 1)*4 % 64) == 0 + foundNext64BytesBoundary := true + FI + ELSE + i := j*32 + tmp := MEM[addr + loadOffset*4] + dst[i+31:i] := tmp[i+31:i] + FI + loadOffset := loadOffset + 1 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Load + + + Loads the low-64-byte-aligned portion of the byte/word/doubleword stream starting at element-aligned address mt and expanded into packed 32-bit integers in "dst". The initial values of "dst" are copied from "src". Only those converted doublewords that occur before first 64-byte-aligned address following "mt" are loaded. Elements in the resulting vector that do not map to those doublewords are taken from "src". + +dst[511:0] := src[511:0] +loadOffset := 0 +addr = mt +FOR j := 0 to 15 + i := j*32 + tmp := MEM[addr + loadOffset*4] + dst[i+31:i] := tmp[i+31:i] + loadOffset := loadOffset + 1 + IF (mt + loadOffset * 4) % 64 == 0 + break + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Load + + + + Loads the low-64-byte-aligned portion of the byte/word/doubleword stream starting at element-aligned address mt and expands them into packed 32-bit integers in "dst". The initial values of "dst" are copied from "src". Only those converted doublewords that occur before first 64-byte-aligned address following "mt" are loaded. Elements in the resulting vector that do not map to those doublewords are taken from "src". Elements are loaded from memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set). + +dst[511:0] := src[511:0] +loadOffset := 0 +addr = mt +FOR j := 0 to 15 + i := j*32 + IF k[j] + tmp := MEM[addr + loadOffset*4] + dst[i+31:i] := tmp[i+31:i] + loadOffset := loadOffset + 1 + IF (mt + loadOffset * 4) % 64 == 0 + break + FI + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Load + + + Loads the high-64-byte-aligned portion of the quadword stream starting at element-aligned address mt-64 and expands them into packed 64-bit integers in "dst". The initial values of "dst" are copied from "src". Only those converted quadwords that occur at or after the first 64-byte-aligned address following (mt-64) are loaded. Elements in the resulting vector that do not map to those quadwords are taken from "src". + +dst[511:0] := src[511:0] +loadOffset := 0 +foundNext64BytesBoundary := false +addr = mt-64 +FOR j := 0 to 7 + IF foundNext64BytesBoundary == false + IF (addr + (loadOffset + 1)*8) == 0 + foundNext64BytesBoundary := true + FI + ELSE + i := j*64 + tmp := MEM[addr + loadOffset*8] + dst[i+63:i] := tmp[i+63:i] + FI + loadOffset := loadOffset + 1 +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Load + + + + Loads the high-64-byte-aligned portion of the quadword stream starting at element-aligned address mt-64 and expands them into packed 64-bit integers in "dst". The initial values of "dst" are copied from "src". Only those converted quadwords that occur at or after the first 64-byte-aligned address following (mt-64) are loaded. Elements in the resulting vector that do not map to those quadwords are taken from "src". Elements are loaded from memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set). + +dst[511:0] := src[511:0] +loadOffset := 0 +foundNext64BytesBoundary := false +addr = mt-64 +FOR j := 0 to 7 + IF k[j] + IF foundNext64BytesBoundary == false + IF (addr + (loadOffset + 1)*8) == 0 + foundNext64BytesBoundary := true + FI + ELSE + i := j*64 + tmp := MEM[addr + loadOffset*8] + dst[i+63:i] := tmp[i+63:i] + FI + loadOffset := loadOffset + 1 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Load + + + Loads the low-64-byte-aligned portion of the quadword stream starting at element-aligned address mt and expands them into packed 64-bit integers in "dst". The initial values of "dst" are copied from "src". Only those converted quad that occur before first 64-byte-aligned address following "mt" are loaded. Elements in the resulting vector that do not map to those quadwords are taken from "src". + +dst[511:0] := src[511:0] +loadOffset := 0 +addr = mt +FOR j := 0 to 7 + i := j*64 + tmp := MEM[addr + loadOffset*8] + dst[i+63:i] := tmp[i+63:i] + loadOffset := loadOffset + 1 + IF (addr + loadOffset*8 % 64) == 0 + break + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Load + + + + Loads the low-64-byte-aligned portion of the quadword stream starting at element-aligned address mt and expands them into packed 64-bit integers in "dst". The initial values of "dst" are copied from "src". Only those converted quad that occur before first 64-byte-aligned address following "mt" are loaded. Elements in the resulting vector that do not map to those quadwords are taken from "src". Elements are loaded from memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set). + +dst[511:0] := src[511:0] +loadOffset := 0 +addr = mt +FOR j := 0 to 7 + i := j*64 + IF k[j] + tmp := MEM[addr + loadOffset*8] + dst[i+63:i] := tmp[i+63:i] + loadOffset := loadOffset + 1 + IF (addr + loadOffset*8 % 64) == 0 + break + FI + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Load + + + Loads the high-64-byte-aligned portion of the byte/word/doubleword stream starting at element-aligned address mt-64 and expands them into packed single-precision (32-bit) floating-point elements in "dst". The initial values of "dst" are copied from "src". Only those converted quadwords that occur at or after the first 64-byte-aligned address following (mt-64) are loaded. Elements in the resulting vector that do not map to those quadwords are taken from "src". + +dst[511:0] := src[511:0] +loadOffset := 0 +foundNext64BytesBoundary := false +addr = mt-64 +FOR j := 0 to 15 + IF foundNext64BytesBoundary == false + IF (addr + (loadOffset + 1)*4 % 64) == 0 + foundNext64BytesBoundary := true + FI + ELSE + i := j*32 + tmp := MEM[addr + loadOffset*4] + dst[i+31:i] := tmp[i+31:i] + FI + loadOffset := loadOffset + 1 +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Load + + + + Loads the high-64-byte-aligned portion of the doubleword stream starting at element-aligned address mt-64 and expands them into packed single-precision (32-bit) floating-point elements in "dst". The initial values of "dst" are copied from "src". Only those converted quadwords that occur at or after the first 64-byte-aligned address following (mt-64) are loaded. Elements in the resulting vector that do not map to those quadwords are taken from "src". Elements are loaded from memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set). + +dst[511:0] := src[511:0] +loadOffset := 0 +foundNext64BytesBoundary := false +addr = mt-64 +FOR j := 0 to 15 + IF k[j] + IF foundNext64BytesBoundary == false + IF (addr + (loadOffset + 1)*4 % 64) == 0 + foundNext64BytesBoundary := true + FI + ELSE + i := j*32 + tmp := MEM[addr + loadOffset*4] + dst[i+31:i] := tmp[i+31:i] + FI + loadOffset := loadOffset + 1 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Load + + + Loads the low-64-byte-aligned portion of the doubleword stream starting at element-aligned address mt and expanded into packed single-precision (32-bit) floating-point elements in "dst". The initial values of "dst" are copied from "src". Only those converted doublewords that occur before first 64-byte-aligned address following "mt" are loaded. Elements in the resulting vector that do not map to those doublewords are taken from "src". + +dst[511:0] := src[511:0] +loadOffset := 0 +addr = mt +FOR j := 0 to 15 + i := j*32 + tmp := MEM[addr + loadOffset*4] + dst[i+31:i] := tmp[i+31:i] + loadOffset := loadOffset + 1 + IF (mt + loadOffset * 4) % 64 == 0 + BREAK + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Load + + + + Loads the low-64-byte-aligned portion of the doubleword stream starting at element-aligned address mt and expanded into packed single-precision (32-bit) floating-point elements in "dst". The initial values of "dst" are copied from "src". Only those converted doublewords that occur before first 64-byte-aligned address following "mt" are loaded. Elements in the resulting vector that do not map to those doublewords are taken from "src". Elements are loaded from memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set). + +dst[511:0] := src[511:0] +loadOffset := 0 +addr = mt +FOR j := 0 to 15 + i := j*32 + IF k[j] + tmp := MEM[addr + loadOffset*4] + dst[i+31:i] := tmp[i+31:i] + loadOffset := loadOffset + 1 + IF (mt + loadOffset * 4) % 64 == 0 + break + FI + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Load + + + Loads the high-64-byte-aligned portion of the quadword stream starting at element-aligned address mt-64 and expands them into packed double-precision (64-bit) floating-point values in "dst". The initial values of "dst" are copied from "src". Only those converted quadwords that occur at or after the first 64-byte-aligned address following (mt-64) are loaded. Elements in the resulting vector that do not map to those quadwords are taken from "src". + +dst[511:0] := src[511:0] +loadOffset := 0 +foundNext64BytesBoundary := false +addr = mt-64 +FOR j := 0 to 7 + IF foundNext64BytesBoundary == false + IF (addr + (loadOffset + 1)*8) % 64 == 0 + foundNext64BytesBoundary := true + FI + ELSE + i := j*64 + tmp := MEM[addr + loadOffset*8] + dst[i+63:i] := tmp[i+63:i] + FI + loadOffset := loadOffset + 1 +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Load + + + + Loads the high-64-byte-aligned portion of the quadword stream starting at element-aligned address mt-64 and expands them into packed double-precision (64-bit) floating-point values in "dst". The initial values of "dst" are copied from "src". Only those converted quadwords that occur at or after the first 64-byte-aligned address following (mt-64) are loaded. Elements in the resulting vector that do not map to those quadwords are taken from "src". Elements are loaded from memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set). + +dst[511:0] := src[511:0] +loadOffset := 0 +foundNext64BytesBoundary := false +addr = mt-64 +FOR j := 0 to 7 + IF k[j] + IF foundNext64BytesBoundary == false + IF (addr + (loadOffset + 1)*8) % 64 == 0 + foundNext64BytesBoundary := true + FI + ELSE + i := j*64 + tmp := MEM[addr + loadOffset*8] + dst[i+63:i] := tmp[i+63:i] + FI + loadOffset := loadOffset + 1 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Load + + + Loads the low-64-byte-aligned portion of the quadword stream starting at element-aligned address mt and expands them into packed double-precision (64-bit) floating-point elements in "dst". The initial values of "dst" are copied from "src". Only those converted quad that occur before first 64-byte-aligned address following "mt" are loaded. Elements in the resulting vector that do not map to those quadwords are taken from "src". + +dst[511:0] := src[511:0] +loadOffset := 0 +addr = mt +FOR j := 0 to 7 + i := j*64 + tmp := MEM[addr + loadOffset*8] + dst[i+63:i] := tmp[i+63:i] + loadOffset := loadOffset + 1 + IF ((addr + 8*loadOffset) % 64) == 0 + BREAK + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Load + + + + Loads the low-64-byte-aligned portion of the quadword stream starting at element-aligned address mt and expands them into packed double-precision (64-bit) floating-point values in "dst". The initial values of "dst" are copied from "src". Only those converted quad that occur before first 64-byte-aligned address following "mt" are loaded. Elements in the resulting vector that do not map to those quadwords are taken from "src". Elements are loaded from memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set). + +dst[511:0] := src[511:0] +loadOffset := 0 +addr = mt +FOR j := 0 to 7 + i := j*64 + IF k[j] + tmp := MEM[addr + loadOffset*8] + dst[i+63:i] := tmp[i+63:i] + loadOffset := loadOffset + 1 + IF ((addr + 8*loadOffset) % 64) == 0 + BREAK + FI + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Store + + + Stores packed 32-bit integer elements of "v1" into a doubleword stream at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elements of the stream that map at or after the first 64-byte-aligned address following (m5-64)). + +storeOffset := 0 +foundNext64BytesBoundary := false +addr = mt-64 +FOR j := 0 to 15 + IF foundNext64BytesBoundary == false + IF ((addr + (storeOffset + 1)*4) % 64) == 0 + foundNext64BytesBoundary = true + FI + ELSE + i := j*32 + MEM[addr + storeOffset*4] := v1[i+31:i] + FI + storeOffset := storeOffset + 1 +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Store + + + + Stores packed 32-bit integer elements of "v1" into a doubleword stream at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elements of the stream that map at or after the first 64-byte-aligned address following (m5-64)). Elements are loaded from memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set). + +storeOffset := 0 +foundNext64BytesBoundary := false +addr = mt-64 +FOR j := 0 to 15 + IF k[j] + IF foundNext64BytesBoundary == false + IF ((addr + (storeOffset + 1)*4) % 64) == 0 + foundNext64BytesBoundary = true + FI + ELSE + i := j*32 + MEM[addr + storeOffset*4] := v1[i+31:i] + FI + storeOffset := storeOffset + 1 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Store + + + Stores packed 32-bit integer elements of "v1" into a doubleword stream at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address follwing "mt"). + +storeOffset := 0 +addr = mt +FOR j := 0 to 15 + i := j*32 + MEM[addr + storeOffset*4] := v1[i+31:i] + storeOffset := storeOffset + 1 + IF ((addr + storeOffset*4) % 64) == 0 + BREAK + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Store + + + + Stores packed 32-bit integer elements of "v1" into a doubleword stream at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address follwing "mt"). Elements are loaded from memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set). + +storeOffset := 0 +addr = mt +FOR j := 0 to 15 + IF k[j] + i := j*32 + MEM[addr + storeOffset*4] := v1[i+31:i] + storeOffset := storeOffset + 1 + IF ((addr + storeOffset*4) % 64) == 0 + BREAK + FI + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Store + + + Stores packed 64-bit integer elements of "v1" into a quadword stream at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elemetns of the stream that map at or after the first 64-byte-aligned address following (m5-64)). + +storeOffset := 0 +foundNext64BytesBoundary := false +addr = mt-64 +FOR j := 0 to 7 + IF foundNext64BytesBoundary == false + IF ((addr + (storeOffset + 1)*8) % 64) == 0 + foundNext64BytesBoundary = true + FI + ELSE + i := j*64 + MEM[addr + storeOffset*8] := v1[i+63:i] + FI + storeOffset := storeOffset + 1 +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Store + + + + Stores packed 64-bit integer elements of "v1" into a quadword stream at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elemetns of the stream that map at or after the first 64-byte-aligned address following (m5-64)). Elements are loaded from memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set). + +storeOffset := 0 +foundNext64BytesBoundary := false +addr = mt-64 +FOR j := 0 to 7 + IF k[j] + IF foundNext64BytesBoundary == false + IF ((addr + (storeOffset + 1)*8) % 64) == 0 + foundNext64BytesBoundary = true + FI + ELSE + i := j*64 + MEM[addr + storeOffset*8] := v1[i+63:i] + FI + storeOffset := storeOffset + 1 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Store + + + Stores packed 64-bit integer elements of "v1" into a quadword stream at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address follwing "mt"). + +storeOffset := 0 +addr = mt +FOR j := 0 to 7 + i := j*64 + MEM[addr + storeOffset*8] := v1[i+63:i] + storeOffset := storeOffset + 1 + IF ((addr + storeOffset*8) % 64) == 0 + BREAK + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Store + + + + Stores packed 64-bit integer elements of "v1" into a quadword stream at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address follwing "mt"). Elements are loaded from memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set). + +storeOffset := 0 +addr = mt +FOR j := 0 to 7 + IF k[j] + i := j*64 + MEM[addr + storeOffset*8] := v1[i+63:i] + storeOffset := storeOffset + 1 + IF ((addr + storeOffset*8) % 64) == 0 + BREAK + FI + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Store + + + Stores packed single-precision (32-bit) floating-point elements of "v1" into a doubleword stream at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elemetns of the stream that map at or after the first 64-byte-aligned address following (m5-64)). + +storeOffset := 0 +foundNext64BytesBoundary := false +addr = mt-64 +FOR j := 0 to 15 + IF foundNext64BytesBoundary == false + IF ((addr + (storeOffset + 1)*4) % 64) == 0 + foundNext64BytesBoundary = true + FI + ELSE + i := j*32 + MEM[addr + storeOffset*4] := v1[i+31:i] + FI + storeOffset := storeOffset + 1 +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Store + + + + Stores packed single-precision (32-bit) floating-point elements of "v1" into a doubleword stream at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elemetns of the stream that map at or after the first 64-byte-aligned address following (m5-64)). Elements are loaded from memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set). + +storeOffset := 0 +foundNext64BytesBoundary := false +addr = mt-64 +FOR j := 0 to 15 + IF k[j] + IF foundNext64BytesBoundary == false + IF ((addr + (storeOffset + 1)*4) % 64) == 0 + foundNext64BytesBoundary = true + FI + ELSE + i := j*32 + MEM[addr + storeOffset*4] := v1[i+31:i] + FI + storeOffset := storeOffset + 1 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Store + + + Stores packed single-precision (32-bit) floating-point elements of "v1" into a doubleword stream at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address follwing "mt"). + +storeOffset := 0 +addr = mt +FOR j := 0 to 15 + i := j*32 + MEM[addr + storeOffset*4] := v1[i+31:i] + storeOffset := storeOffset + 1 + IF ((addr + storeOffset*4) % 64) == 0 + BREAK + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Store + + + + Stores packed single-precision (32-bit) floating-point elements of "v1" into a doubleword stream at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address follwing "mt"). Elements are loaded from memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set). + +storeOffset := 0 +addr = mt +FOR j := 0 to 15 + IF k[j] + i := j*32 + MEM[addr + storeOffset*4] := v1[i+31:i] + storeOffset := storeOffset + 1 + IF ((addr + storeOffset*4) % 64) == 0 + BREAK + FI + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Store + + + Stores packed double-precision (64-bit) floating-point elements of "v1" into a quadword stream at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elemetns of the stream that map at or after the first 64-byte-aligned address following (m5-64)). + +storeOffset := 0 +foundNext64BytesBoundary := false +addr = mt-64 +FOR j := 0 to 7 + IF foundNext64BytesBoundary == false + IF ((addr + (storeOffset + 1)*8) % 64) == 0 + foundNext64BytesBoundary = true + FI + ELSE + i := j*64 + MEM[addr + storeOffset*4] := v1[i+63:i] + FI + storeOffset := storeOffset + 1 +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Store + + + + Stores packed double-precision (64-bit) floating-point elements of "v1" into a quadword stream at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elemetns of the stream that map at or after the first 64-byte-aligned address following (m5-64)). Elements are loaded from memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set). + +storeOffset := 0 +foundNext64BytesBoundary := false +addr = mt-64 +FOR j := 0 to 7 + IF k[j] + IF foundNext64BytesBoundary == false + IF ((addr + (storeOffset + 1)*8) % 64) == 0 + foundNext64BytesBoundary = true + FI + ELSE + i := j*64 + MEM[addr + storeOffset*4] := v1[i+63:i] + FI + storeOffset := storeOffset + 1 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Store + + + Stores packed double-precision (64-bit) floating-point elements of "v1" into a quadword stream at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address follwing "mt"). + +storeOffset := 0 +addr = mt +FOR j := 0 to 7 + i := j*64 + MEM[addr + storeOffset*8] := v1[i+63:i] + storeOffset := storeOffset + 1 + IF ((addr + storeOffset*8) % 64) == 0 + BREAK + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Store + + + + Stores packed double-precision (64-bit) floating-point elements of "v1" into a quadword stream at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address follwing "mt"). Elements are loaded from memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set). + +storeOffset := 0 +addr = mt +FOR j := 0 to 7 + IF k[j] + i := j*64 + MEM[addr + storeOffset*8] := v1[i+63:i] + storeOffset := storeOffset + 1 + IF ((addr + storeOffset*8) % 64) == 0 + BREAK + FI + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + KNCNI + Bit Manipulation + + Counts the number of set bits in 32-bit unsigned integer "r1", returning the results in "dst". + +dst[31:0] := PopCount(r1[31:0]) + + +
immintrin.h
+
+ + KNCNI + Bit Manipulation + + Counts the number of set bits in 64-bit unsigned integer "r1", returning the results in "dst". + +dst[63:0] := PopCount(r1[63:0]) + + +
immintrin.h
+
+ + Mask + KNCNI + Mask + + + Inserts the low byte of mask "k2" into the high byte of "dst", and copies the low byte of "k1" to the low byte of "dst". + +dst[7:0] := k1[7:0] +dst[15:8] := k2[7:0] + + +
immintrin.h
+
+ + Floating Point + Integer + KNCNI + Convert + + + Performs an element-by-element conversion of elements in packed double-precision (64-bit) floating-point vector "v2" to 32-bit integer elements, storing them in the lower half of "dst". The elements in the upper half of "dst" are set to 0. + [round_note] + + +FOR j := 0 to 7 + i := j*64 + k := j*32 + dst[k+31:k] := Float64ToInt32(v2[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + KNCNI + Convert + + + + + Performs an element-by-element conversion of elements in packed double-precision (64-bit) floating-point vector "v2" to 32-bit integer elements, storing them in the lower half of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The elements in the upper half of "dst" are set to 0. + [round_note] + + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[l+31:l] := Float64ToInt32(v2[i+63:i]) + ELSE + dst[l+31:l] := src[l+31:l] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + KNCNI + Convert + + + + Performs element-by-element conversion of packed 32-bit integer elements in "v2" to packed single-precision (32-bit) floating-point elements and performing an optional exponent adjust using "expadj", storing the results in "dst". + [round_note] + + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := Int32ToFloat32(v2[i+31:i]) + CASE expadj OF + _MM_EXPADJ_NONE: dst[i+31:i] = dst[i+31:i] * 2**0 + _MM_EXPADJ_4: dst[i+31:i] = dst[i+31:i] * 2**4 + _MM_EXPADJ_5: dst[i+31:i] = dst[i+31:i] * 2**5 + _MM_EXPADJ_8: dst[i+31:i] = dst[i+31:i] * 2**8 + _MM_EXPADJ_16: dst[i+31:i] = dst[i+31:i] * 2**16 + _MM_EXPADJ_24: dst[i+31:i] = dst[i+31:i] * 2**24 + _MM_EXPADJ_31: dst[i+31:i] = dst[i+31:i] * 2**31 + _MM_EXPADJ_32: dst[i+31:i] = dst[i+31:i] * 2**32 + ESAC +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + + Floating Point + AVX512F/KNCNI + Arithmetic + + Finds the absolute value of each packed single-precision (32-bit) floating-point element in "v2", storing the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := ABS(v2[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + + Floating Point + AVX512F/KNCNI + Arithmetic + + + + Finds the absolute value of each packed single-precision (32-bit) floating-point element in "v2", storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ABS(v2[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + + Floating Point + AVX512F/KNCNI + Arithmetic + + Finds the absolute value of each packed double-precision (64-bit) floating-point element in "v2", storing the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := ABS(v2[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + + Floating Point + AVX512F/KNCNI + Arithmetic + + + + Finds the absolute value of each packed double-precision (64-bit) floating-point element in "v2", storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ABS(v2[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Elementary Math Functions + + Compute the base-2 logarithm of packed single-precision (32-bit) floating-point elements in "a" with absolute error of 2^(-23) and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := Log2ae23(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Elementary Math Functions + + + + Compute the base-2 logarithm of packed single-precision (32-bit) floating-point elements in "a" with absolute error of 2^(-23) and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := Log2ae23(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Arithmetic + + + + Multiply packed 32-bit integer elements in "a" and "b", add the intermediate result to packed elements in "c" and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Arithmetic + + + + + Multiply packed 32-bit integer elements in "a" and "b", add the intermediate result to packed elements in "c" and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Arithmetic + + + + + Multiply packed 32-bit integer elements in "a" and "b", add the intermediate result to packed elements in "c" and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := c[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Arithmetic + + + Multiply packed 32-bit integer elements in each 4-element set of "a" and by element 1 of the corresponding 4-element set from "b", add the intermediate result to element 0 of the corresponding 4-element set from "b", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + base := (j & ~0x3) * 32 + scale[31:0] := b[base+63:base+32] + bias[31:0] := b[base+31:base] + dst[i+31:i] := (a[i+31:i] * scale[31:0]) + bias[31:0] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Arithmetic + + + + + Multiply packed 32-bit integer elements in each 4-element set of "a" and by element 1 of the corresponding 4-element set from "b", add the intermediate result to element 0 of the corresponding 4-element set from "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + base := (j & ~0x3) * 32 + scale[31:0] := b[base+63:base+32] + bias[31:0] := b[base+31:base] + dst[i+31:i] := (a[i+31:i] * scale[31:0]) + bias[31:0] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Arithmetic + + + + Multiply packed single-precision (32-bit) floating-point elements in each 4-element set of "a" and by element 1 of the corresponding 4-element set from "b", add the intermediate result to element 0 of the corresponding 4-element set from "b", and store the results in "dst". + [round_note] + + +FOR j := 0 to 15 + i := j*32 + base := (j & ~0x3) * 32 + scale[31:0] := b[base+63:base+32] + bias[31:0] := b[base+31:base] + dst[i+31:i] := (a[i+31:i] * scale[31:0]) + bias[31:0] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in each 4-element set of "a" and by element 1 of the corresponding 4-element set from "b", add the intermediate result to element 0 of the corresponding 4-element set from "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 15 + i := j*32 + IF k[j] + base := (j & ~0x3) * 32 + scale[31:0] := b[base+63:base+32] + bias[31:0] := b[base+31:base] + dst[i+31:i] := (a[i+31:i] * scale[31:0]) + bias[31:0] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Special Math Functions + + + Determines the maximum of the absolute elements of each pair of corresponding elements of packed single-precision (32-bit) floating-point elements in "a" and "b", storing the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := FpMax(Abs(a[i+31:i]), Abs(b[i+31:i])) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Special Math Functions + + + + + Determines the maximum of the absolute elements of each pair of corresponding elements of packed single-precision (32-bit) floating-point elements in "a" and "b", storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := FpMax(Abs(a[i+31:i]), Abs(b[i+31:i])) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Special Math Functions + + + Determines the maximum of each pair of corresponding elements in packed single-precision (32-bit) floating-point elements in "a" and "b", storing the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := FpMax(a[i+31:i], b[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Special Math Functions + + + + + Determines the maximum of each pair of corresponding elements of packed single-precision (32-bit) floating-point elements in "a" and "b", storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := FpMax(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Special Math Functions + + + Determines the maximum of the absolute elements of each pair of corresponding elements of packed single-precision (32-bit) floating-point elements in "a" and "b", storing the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := FpMax(Abs(a[i+31:i]), Abs(b[i+31:i])) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Special Math Functions + + + + + Determines the maximum of the absolute elements of each pair of corresponding elements of packed single-precision (32-bit) floating-point elements in "a" and "b", storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := FpMax(Abs(a[i+31:i]), Abs(b[i+31:i])) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Special Math Functions + + + Determines the maximum of each pair of corresponding elements in packed double-precision (64-bit) floating-point elements in "a" and "b", storing the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := FpMax(a[i+63:i], b[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Special Math Functions + + + + + Determines the maximum of each pair of corresponding elements of packed double-precision (64-bit) floating-point elements in "a" and "b", storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := FpMax(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Special Math Functions + + + Determines the minimum of each pair of corresponding elements in packed single-precision (32-bit) floating-point elements in "a" and "b", storing the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := FpMin(a[i+31:i], b[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Special Math Functions + + + + + Determines the maximum of each pair of corresponding elements of packed single-precision (32-bit) floating-point elements in "a" and "b", storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := FpMin(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Special Math Functions + + + Determines the minimum of each pair of corresponding elements in packed double-precision (64-bit) floating-point elements in "a" and "b", storing the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := FpMin(a[i+63:i], b[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Special Math Functions + + + + + Determines the maximum of each pair of corresponding elements of packed double-precision (64-bit) floating-point elements in "a" and "b", storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := FpMin(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Arithmetic + + + Performs element-by-element multiplication between packed 32-bit integer elements in "a" and "b" and stores the high 32 bits of each result into "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) >> 32 +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Arithmetic + + + + + Performs element-by-element multiplication between packed 32-bit integer elements in "a" and "b" and stores the high 32 bits of each result into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) >> 32 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Arithmetic + + + Performs element-by-element multiplication between packed unsigned 32-bit integer elements in "a" and "b" and stores the high 32 bits of each result into "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) >> 32 +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Arithmetic + + + + + Performs element-by-element multiplication between packed unsigned 32-bit integer elements in "a" and "b" and stores the high 32 bits of each result into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) >> 32 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Swizzle + + + Permutes 128-bit blocks of the packed 32-bit integer vector "a" using constant "imm8". The results are stored in "dst". + +SELECT4(src, control) { + CASE control[1:0] OF + 0: tmp[127:0] := src[127:0] + 1: tmp[127:0] := src[255:128] + 2: tmp[127:0] := src[383:256] + 3: tmp[127:0] := src[511:384] + ESAC + RETURN tmp[127:0] +} + +FOR j := 0 to 3 + i := j*128 + n := j*2 + dst[i+127:i] := SELECT4(a[511:0], imm8[n+1:n]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Swizzle + + + + + Permutes 128-bit blocks of the packed 32-bit integer vector "a" using constant "imm8". The results are stored in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +SELECT4(src, control) { + CASE control[1:0] OF + 0: tmp[127:0] := src[127:0] + 1: tmp[127:0] := src[255:128] + 2: tmp[127:0] := src[383:256] + 3: tmp[127:0] := src[511:384] + ESAC + RETURN tmp[127:0] +} + +tmp[511:0] := 0 +FOR j := 0 to 4 + i := j*128 + n := j*2 + tmp[i+127:i] := SELECT4(a[511:0], imm8[n+1:n]) +ENDFOR +FOR j := 0 to 15 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Elementary Math Functions + + Approximates the reciprocals of packed single-precision (32-bit) floating-point elements in "a" to 23 bits of precision, storing the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := APPROXIMATE(1.0/a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Elementary Math Functions + + + + Approximates the reciprocals of packed single-precision (32-bit) floating-point elements in "a" to 23 bits of precision, storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := APPROXIMATE(1.0/a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Convert + + + + Round the packed single-precision (32-bit) floating-point elements in "a" to the nearest integer value using "expadj" and in the direction of "rounding", and store the results as packed single-precision floating-point elements in "dst". + [round_note] + + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := ROUND(a[i+31:i]) + CASE expadj OF + _MM_EXPADJ_NONE: dst[i+31:i] = dst[i+31:i] * 2**0 + _MM_EXPADJ_4: dst[i+31:i] = dst[i+31:i] * 2**4 + _MM_EXPADJ_5: dst[i+31:i] = dst[i+31:i] * 2**5 + _MM_EXPADJ_8: dst[i+31:i] = dst[i+31:i] * 2**8 + _MM_EXPADJ_16: dst[i+31:i] = dst[i+31:i] * 2**16 + _MM_EXPADJ_24: dst[i+31:i] = dst[i+31:i] * 2**24 + _MM_EXPADJ_31: dst[i+31:i] = dst[i+31:i] * 2**31 + _MM_EXPADJ_32: dst[i+31:i] = dst[i+31:i] * 2**32 + ESAC +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Convert + + + + + + Round the packed single-precision (32-bit) floating-point elements in "a" to the nearest integer value using "expadj" and in the direction of "rounding", and store the results as packed single-precision floating-point elements in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ROUND(a[i+31:i]) + CASE expadj OF + _MM_EXPADJ_NONE: dst[i+31:i] = dst[i+31:i] * 2**0 + _MM_EXPADJ_4: dst[i+31:i] = dst[i+31:i] * 2**4 + _MM_EXPADJ_5: dst[i+31:i] = dst[i+31:i] * 2**5 + _MM_EXPADJ_8: dst[i+31:i] = dst[i+31:i] * 2**8 + _MM_EXPADJ_16: dst[i+31:i] = dst[i+31:i] * 2**16 + _MM_EXPADJ_24: dst[i+31:i] = dst[i+31:i] * 2**24 + _MM_EXPADJ_31: dst[i+31:i] = dst[i+31:i] * 2**31 + _MM_EXPADJ_32: dst[i+31:i] = dst[i+31:i] * 2**32 + ESAC + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Miscellaneous + + + + Performs element-by-element rounding of packed single-precision (32-bit) floating-point elements in "a" using "expadj" and in the direction of "rounding" and stores results in "dst". + [round_note] + + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := ROUND(a[i+31:i]) + CASE expadj OF + _MM_EXPADJ_NONE: dst[i+31:i] = dst[i+31:i] * 2**0 + _MM_EXPADJ_4: dst[i+31:i] = dst[i+31:i] * 2**4 + _MM_EXPADJ_5: dst[i+31:i] = dst[i+31:i] * 2**5 + _MM_EXPADJ_8: dst[i+31:i] = dst[i+31:i] * 2**8 + _MM_EXPADJ_16: dst[i+31:i] = dst[i+31:i] * 2**16 + _MM_EXPADJ_24: dst[i+31:i] = dst[i+31:i] * 2**24 + _MM_EXPADJ_31: dst[i+31:i] = dst[i+31:i] * 2**31 + _MM_EXPADJ_32: dst[i+31:i] = dst[i+31:i] * 2**32 + ESAC +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Miscellaneous + + + + + + Performs element-by-element rounding of packed single-precision (32-bit) floating-point elements in "a" using "expadj" and in the direction of "rounding" and stores results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ROUND(a[i+31:i]) + CASE expadj OF + _MM_EXPADJ_NONE: dst[i+31:i] = dst[i+31:i] * 2**0 + _MM_EXPADJ_4: dst[i+31:i] = dst[i+31:i] * 2**4 + _MM_EXPADJ_5: dst[i+31:i] = dst[i+31:i] * 2**5 + _MM_EXPADJ_8: dst[i+31:i] = dst[i+31:i] * 2**8 + _MM_EXPADJ_16: dst[i+31:i] = dst[i+31:i] * 2**16 + _MM_EXPADJ_24: dst[i+31:i] = dst[i+31:i] * 2**24 + _MM_EXPADJ_31: dst[i+31:i] = dst[i+31:i] * 2**31 + _MM_EXPADJ_32: dst[i+31:i] = dst[i+31:i] * 2**32 + ESAC + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Miscellaneous + + + + Performs element-by-element rounding of packed double-precision (64-bit) floating-point elements in "a" using "expadj" and in the direction of "rounding" and stores results in "dst". + [round_note] + + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := ROUND(a[i+63:i]) + CASE expadj OF + _MM_EXPADJ_NONE: dst[i+31:i] = dst[i+31:i] * 2**0 + _MM_EXPADJ_4: dst[i+31:i] = dst[i+31:i] * 2**4 + _MM_EXPADJ_5: dst[i+31:i] = dst[i+31:i] * 2**5 + _MM_EXPADJ_8: dst[i+31:i] = dst[i+31:i] * 2**8 + _MM_EXPADJ_16: dst[i+31:i] = dst[i+31:i] * 2**16 + _MM_EXPADJ_24: dst[i+31:i] = dst[i+31:i] * 2**24 + _MM_EXPADJ_31: dst[i+31:i] = dst[i+31:i] * 2**31 + _MM_EXPADJ_32: dst[i+31:i] = dst[i+31:i] * 2**32 + ESAC +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Miscellaneous + + + + + + Performs element-by-element rounding of packed double-precision (64-bit) floating-point elements in "a" using "expadj" and in the direction of "rounding" and stores results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ROUND(a[i+63:i]) + CASE expadj OF + _MM_EXPADJ_NONE: dst[i+31:i] = dst[i+31:i] * 2**0 + _MM_EXPADJ_4: dst[i+31:i] = dst[i+31:i] * 2**4 + _MM_EXPADJ_5: dst[i+31:i] = dst[i+31:i] * 2**5 + _MM_EXPADJ_8: dst[i+31:i] = dst[i+31:i] * 2**8 + _MM_EXPADJ_16: dst[i+31:i] = dst[i+31:i] * 2**16 + _MM_EXPADJ_24: dst[i+31:i] = dst[i+31:i] * 2**24 + _MM_EXPADJ_31: dst[i+31:i] = dst[i+31:i] * 2**31 + _MM_EXPADJ_32: dst[i+31:i] = dst[i+31:i] * 2**32 + ESAC + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Elementary Math Functions + + Calculates the reciprocal square root of packed single-precision (32-bit) floating-point elements in "a" to 23 bits of accuracy and stores the result in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := Sqrt(1.0 / a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Elementary Math Functions + + + + Calculates the reciprocal square root of packed single-precision (32-bit) floating-point elements in "a" to 23 bits of accuracy and stores the result in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := Sqrt(1.0 / a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Arithmetic + + + Scales each single-precision (32-bit) floating-point element in "a" by multiplying it by 2**exponent, where the exponent is the corresponding 32-bit integer element in "b", storing results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := a[i+31:i] * Pow(2, b[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Arithmetic + + + + + Scales each single-precision (32-bit) floating-point element in "a" by multiplying it by 2**exponent, where the exponenet is the corresponding 32-bit integer element in "b", storing results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] * Pow(2, b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Arithmetic + + + + Scales each single-precision (32-bit) floating-point element in "a" by multiplying it by 2**exponent, where the exponenet is the corresponding 32-bit integer element in "b", storing results in "dst". Intermediate elements are rounded using "rounding". + [round_note] + + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := a[i+31:i] * Pow(2, b[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Arithmetic + + + + + + Scales each single-precision (32-bit) floating-point element in "a" by multiplying it by 2**exp, where the exp is the corresponding 32-bit integer element in "b", storing results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). Results are rounded using constant "rounding". + [round_note] + + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] * Pow(2, b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + Compute the inverse cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := ACOS(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + + + Compute the inverse cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ACOS(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + Compute the inverse cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := ACOS(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + + + Compute the inverse cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ACOS(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + Compute the inverse hyperbolic cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := ACOSH(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + + + Compute the inverse hyperbolic cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ACOSH(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + Compute the inverse hyperbolic cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := ACOSH(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + + + Compute the inverse hyperbolic cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ACOSH(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + Compute the inverse sine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := ASIN(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + + + Compute the inverse sine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ASIN(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + Compute the inverse sine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := ASIN(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + + + Compute the inverse sine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ASIN(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + Compute the inverse hyperbolic sine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := ASINH(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + + + Compute the inverse hyperbolic sine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ASINH(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + Compute the inverse hyperbolic sine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := ASINH(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + + + Compute the inverse hyperbolic sine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ASINH(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + + Compute the inverse tangent of packed double-precision (64-bit) floating-point elements in "a" divided by packed elements in "b", and store the results in "dst" expressed in radians. + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := ATAN(a[i+63:i] / b[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + + + + Compute the inverse tangent of packed double-precision (64-bit) floating-point elements in "a" divided by packed elements in "b", and store the results in "dst" expressed in radians using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to + i := j*64 + IF k[j] + dst[i+63:i] := ATAN(a[i+63:i] / b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + + Compute the inverse tangent of packed single-precision (32-bit) floating-point elements in "a" divided by packed elements in "b", and store the results in "dst" expressed in radians. + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := ATAN(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + + + + Compute the inverse tangent of packed single-precision (32-bit) floating-point elements in "a" divided by packed elements in "b", and store the results in "dst" expressed in radians using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ATAN(a[i+31:i] / b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + Compute the inverse tangent of packed double-precision (64-bit) floating-point elements in "a" and store the results in "dst" expressed in radians. + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := ATAN(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + + + Compute the inverse tangent of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" expressed in radians using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ATAN(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + Compute the inverse tangent of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" expressed in radians. + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := ATAN(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + + + Compute the inverse tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ATAN(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + Compute the inverse hyperbolic tangent of packed double-precision (64-bit) floating-point elements in "a" and store the results in "dst" expressed in radians. + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := ATANH(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + + + Compute the inverse hyperbolic tangent of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" expressed in radians using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ATANH(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + Compute the inverse hyperblic tangent of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" expressed in radians. + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := ATANH(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + + + Compute the inverse hyperbolic tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ATANH(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + Compute the cube root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := CubeRoot(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + Compute the cube root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := CubeRoot(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + Compute the cube root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := CubeRoot(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + Compute the cube root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := CubeRoot(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Probability/Statistics + + Compute the cumulative distribution function of packed double-precision (64-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := CDFNormal(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Probability/Statistics + + + + Compute the cumulative distribution function of packed double-precision (64-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := CDFNormal(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Probability/Statistics + + Compute the cumulative distribution function of packed single-precision (32-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := CDFNormal(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Probability/Statistics + + + + Compute the cumulative distribution function of packed single-precision (32-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := CDFNormal(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Probability/Statistics + + Compute the inverse cumulative distribution function of packed double-precision (64-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := InverseCDFNormal(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Probability/Statistics + + + + Compute the inverse cumulative distribution function of packed double-precision (64-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := InverseCDFNormal(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Probability/Statistics + + Compute the inverse cumulative distribution function of packed single-precision (32-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := InverseCDFNormal(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Probability/Statistics + + + + Compute the inverse cumulative distribution function of packed single-precision (32-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := InverseCDFNormal(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + Round the packed double-precision (64-bit) floating-point elements in "a" up to an integer value, and store the results as packed double-precision floating-point elements in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := CEIL(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + Round the packed double-precision (64-bit) floating-point elements in "a" up to an integer value, and store the results as packed double-precision floating-point elements in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := CEIL(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + Round the packed single-precision (32-bit) floating-point elements in "a" up to an integer value, and store the results as packed single-precision floating-point elements in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := CEIL(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + Round the packed single-precision (32-bit) floating-point elements in "a" up to an integer value, and store the results as packed single-precision floating-point elements in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := CEIL(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + Compute the cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := COS(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + + + Compute the cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := COS(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + Compute the cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := COS(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + + + Compute the cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := COS(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + Compute the cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := COSD(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + + + Compute the cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := COSD(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + Compute the cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := COSD(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + + + Compute the cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := COSD(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + Compute the hyperbolic cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := COSH(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + + + Compute the hyperbolic cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := COSH(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + Compute the hyperbolic cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := COSH(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + + + Compute the hyperbolic cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := COSH(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Probability/Statistics + + Compute the error function of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := ERF(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Probability/Statistics + + + + Compute the error function of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ERF(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Probability/Statistics + + Compute the complementary error function of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := 1.0 - ERF(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Probability/Statistics + + + + Compute the complementary error function of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := 1.0 - ERF(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Probability/Statistics + + Compute the error function of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := ERF(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Probability/Statistics + + + + Compute the error function of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ERF(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Probability/Statistics + + Compute the complementary error function of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := 1.0 - ERF(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Probability/Statistics + + + + Compute the complementary error function of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := 1.0 - ERF(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Probability/Statistics + + Compute the inverse error function of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := 1.0 / ERF(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Probability/Statistics + + + + Compute the inverse error function of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := 1.0 / ERF(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Probability/Statistics + + Compute the inverse error function of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := 1.0 / ERF(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Probability/Statistics + + + + Compute the inverse error function of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := 1.0 / ERF(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Probability/Statistics + + Compute the inverse complementary error function of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := 1.0 / (1.0 - ERF(a[i+63:i])) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Probability/Statistics + + + + Compute the inverse complementary error function of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := 1.0 / (1.0 - ERF(a[i+63:i])) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Probability/Statistics + + Compute the inverse complementary error function of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := 1.0 / (1.0 - ERF(a[i+31:i])) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Probability/Statistics + + + + Compute the inverse complementary error function of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := 1.0 / (1.0 - ERF(a[i+31:i])) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + Compute the exponential value of 10 raised to the power of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := 10^(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + Compute the exponential value of 10 raised to the power of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := 10^(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + Compute the exponential value of 10 raised to the power of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := 10^(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + Compute the exponential value of 10 raised to the power of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := 10^(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + Compute the exponential value of 2 raised to the power of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := 2^(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + Compute the exponential value of 2 raised to the power of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := 2^(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + Compute the exponential value of 2 raised to the power of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := 2^(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + Compute the exponential value of 2 raised to the power of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := 2^(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + Compute the exponential value of "e" raised to the power of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := e^(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + Compute the exponential value of "e" raised to the power of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := e^(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + Compute the exponential value of "e" raised to the power of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := e^(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + Compute the exponential value of "e" raised to the power of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := e^(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + Compute the exponential value of "e" raised to the power of packed double-precision (64-bit) floating-point elements in "a", subtract one from each element, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := e^(a[i+63:i]) - 1.0 +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + Compute the exponential value of "e" raised to the power of packed double-precision (64-bit) floating-point elements in "a", subtract one from each element, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := e^(a[i+63:i]) - 1.0 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + Compute the exponential value of "e" raised to the power of packed single-precision (32-bit) floating-point elements in "a", subtract one from each element, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := e^(a[i+31:i]) - 1.0 +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + Compute the exponential value of "e" raised to the power of packed single-precision (32-bit) floating-point elements in "a", subtract one from each element, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := e^(a[i+31:i]) - 1.0 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + Round the packed double-precision (64-bit) floating-point elements in "a" down to an integer value, and store the results as packed double-precision floating-point elements in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := FLOOR(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + Round the packed double-precision (64-bit) floating-point elements in "a" down to an integer value, and store the results as packed double-precision floating-point elements in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := FLOOR(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + Round the packed single-precision (32-bit) floating-point elements in "a" down to an integer value, and store the results as packed single-precision floating-point elements in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := FLOOR(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + Round the packed single-precision (32-bit) floating-point elements in "a" down to an integer value, and store the results as packed single-precision floating-point elements in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := FLOOR(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + Compute the length of the hypotenous of a right triangle, with the lengths of the other two sides of the triangle stored as packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := SQRT(a[i+63:i]^2 + b[i+63:i]^2) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + + Compute the length of the hypotenous of a right triangle, with the lengths of the other two sides of the triangle stored as packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := SQRT(a[i+63:i]^2 + b[i+63:i]^2) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + Compute the length of the hypotenous of a right triangle, with the lengths of the other two sides of the triangle stored as packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := SQRT(a[i+31:i]^2 + b[i+31:i]^2) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + + Compute the length of the hypotenous of a right triangle, with the lengths of the other two sides of the triangle stored as packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := SQRT(a[i+31:i]^2 + b[i+31:i]^2) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Integer + AVX512F + Arithmetic + + + Divide packed 32-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 15 + i := 32*j + dst[i+31:i] := TRUNCATE(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Integer + AVX512F + Arithmetic + + + + + Divide packed 32-bit integers in "a" by packed elements in "b", and store the truncated results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + IF k[j] + dst[i+31:i] := TRUNCATE(a[i+31:i] / b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Integer + AVX512F + Arithmetic + + + Divide packed 8-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 63 + i := 8*j + dst[i+7:i] := TRUNCATE(a[i+7:i] / b[i+7:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Integer + AVX512F + Arithmetic + + + Divide packed 16-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 31 + i := 16*j + dst[i+15:i] := TRUNCATE(a[i+15:i] / b[i+15:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Integer + AVX512F + Arithmetic + + + Divide packed 64-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 7 + i := 64*j + dst[i+63:i] := TRUNCATE(a[i+63:i] / b[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + Compute the inverse square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := InvSQRT(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + Compute the inverse square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := InvSQRT(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + Compute the inverse square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := InvSQRT(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + Compute the inverse square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := InvSQRT(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Integer + AVX512F + Arithmetic + + + Divide packed 32-bit integers in "a" by packed elements in "b", and store the remainders as packed 32-bit integers in "dst". + +FOR j := 0 to 15 + i := 32*j + dst[i+31:i] := REMAINDER(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Integer + AVX512F + Arithmetic + + + + + Divide packed 32-bit integers in "a" by packed elements in "b", and store the remainders as packed 32-bit integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + IF k[j] + dst[i+31:i] := REMAINDER(a[i+31:i] / b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Integer + AVX512F + Arithmetic + + + Divide packed 8-bit integers in "a" by packed elements in "b", and store the remainders as packed 32-bit integers in "dst". + +FOR j := 0 to 63 + i := 8*j + dst[i+7:i] := REMAINDER(a[i+7:i] / b[i+7:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Integer + AVX512F + Arithmetic + + + Divide packed 16-bit integers in "a" by packed elements in "b", and store the remainders as packed 32-bit integers in "dst". + +FOR j := 0 to 31 + i := 16*j + dst[i+15:i] := REMAINDER(a[i+15:i] / b[i+15:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Integer + AVX512F + Arithmetic + + + Divide packed 64-bit integers in "a" by packed elements in "b", and store the remainders as packed 32-bit integers in "dst". + +FOR j := 0 to 7 + i := 64*j + dst[i+63:i] := REMAINDER(a[i+63:i] / b[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + Compute the base-10 logarithm of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := log10(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + Compute the base-10 logarithm of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := log10(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + Compute the base-10 logarithm of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := log10(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + Compute the base-10 logarithm of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := log10(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + Compute the natural logarithm of one plus packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := ln(1.0 + a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + Compute the natural logarithm of one plus packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ln(1.0 + a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + Compute the natural logarithm of one plus packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := ln(1.0 + a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + Compute the natural logarithm of one plus packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ln(1.0 + a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + Compute the base-2 logarithm of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := log2(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + Compute the base-2 logarithm of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := log2(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + KNCNI + Elementary Math Functions + + Compute the base-2 logarithm of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := log2(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+ +
+ + Floating Point + KNCNI + Elementary Math Functions + + + + Compute the base-2 logarithm of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := log2(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + Compute the natural logarithm of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := ln(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + Compute the natural logarithm of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ln(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + Compute the natural logarithm of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := ln(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + Compute the natural logarithm of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ln(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + Convert the exponent of each packed double-precision (64-bit) floating-point element in "a" to a double-precision floating-point number representing the integer exponent, and store the results in "dst". This intrinsic essentially calculates "floor(log2(x))" for each element. + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := ConvertExpFP64(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + Convert the exponent of each packed double-precision (64-bit) floating-point element in "a" to a double-precision floating-point number representing the integer exponent, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element. + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ConvertExpFP64(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + Convert the exponent of each packed single-precision (32-bit) floating-point element in "a" to a single-precision floating-point number representing the integer exponent, and store the results in "dst". This intrinsic essentially calculates "floor(log2(x))" for each element. + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := ConvertExpFP32(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + Convert the exponent of each packed single-precision (32-bit) floating-point element in "a" to a single-precision floating-point number representing the integer exponent, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element. + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ConvertExpFP32(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + Rounds each packed double-precision (64-bit) floating-point element in "a" to the nearest integer value and stores the results as packed double-precision floating-point elements in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := NearbyInt(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + Rounds each packed double-precision (64-bit) floating-point element in "a" to the nearest integer value and stores the results as packed double-precision floating-point elements in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := NearbyInt(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + Rounds each packed single-precision (32-bit) floating-point element in "a" to the nearest integer value and stores the results as packed double-precision floating-point elements in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := NearbyInt(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + Rounds each packed single-precision (32-bit) floating-point element in "a" to the nearest integer value and stores the results as packed double-precision floating-point elements in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := NearbyInt(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + Compute the exponential value of packed double-precision (64-bit) floating-point elements in "a" raised by packed elements in "b", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := (a[i+63:i])^(b[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + + Compute the exponential value of packed double-precision (64-bit) floating-point elements in "a" raised by packed elements in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i])^(b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + Compute the exponential value of packed single-precision (32-bit) floating-point elements in "a" raised by packed elements in "b", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := (a[i+31:i])^(b[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + + Compute the exponential value of packed single-precision (32-bit) floating-point elements in "a" raised by packed elements in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i])^(b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + Computes the reciprocal of packed double-precision (64-bit) floating-point elements in "a", storing the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := (1 / a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + Computes the reciprocal of packed double-precision (64-bit) floating-point elements in "a", storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := (1 / a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + Computes the reciprocal of packed single-precision (32-bit) floating-point elements in "a", storing the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := (1 / a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Elementary Math Functions + + + + Computes the reciprocal of packed single-precision (32-bit) floating-point elements in "a", storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := (1 / a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + Rounds the packed double-precision (64-bit) floating-point elements in "a" to the nearest even integer value and stores the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := RoundToNearestEven(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + Rounds the packed double-precision (64-bit) floating-point elements in "a" to the nearest even integer value and stores the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := RoundToNearestEven(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + Rounds the packed single-precision (32-bit) floating-point elements in "a" to the nearest even integer value and stores the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := RoundToNearestEven(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + Rounds the packed single-precision (32-bit) floating-point elements in "a" to the nearest even integer value and stores the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := RoundToNearestEven(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + Round the packed double-precision (64-bit) floating-point elements in "a" to the nearest integer value, and store the results as packed double-precision floating-point elements in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := ROUND(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + Round the packed double-precision (64-bit) floating-point elements in "a" to the nearest integer value, and store the results as packed double-precision floating-point elements in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ROUND(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + Compute the sine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := SIN(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + + + Compute the sine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := SIN(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + Compute the sine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := SIN(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + + + Compute the sine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := SIN(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + Compute the hyperbolic sine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := SINH(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + + + Compute the hyperbolic sine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := SINH(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + Compute the hyperbolic sine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := SINH(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + + + Compute the hyperbolic sine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := SINH(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + Compute the sine of packed double-precision (64-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := SIND(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + + + Compute the sine of packed double-precision (64-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := SIND(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + Compute the sine of packed single-precision (32-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := SIND(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + + + Compute the sine of packed single-precision (32-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := SIND(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + Compute the tangent of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := TAN(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + + + Compute the tangent of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := TAN(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + Compute the tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := TAN(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + + + Compute the tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := TAN(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + Compute the tangent of packed double-precision (64-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := TAND(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + + + Compute the tangent of packed double-precision (64-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := TAND(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + Compute the tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := TAND(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + + + Compute the tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := TAND(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + Compute the hyperbolic tangent of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := TANH(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + + + Compute the hyperbolic tangent of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := TANH(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + Compute the hyperbolic tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := TANH(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + + + Compute the hyperbolic tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := TANH(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + Truncate the packed double-precision (64-bit) floating-point elements in "a", and store the results as packed double-precision floating-point elements in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := TRUNCATE(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + Truncate the packed double-precision (64-bit) floating-point elements in "a", and store the results as packed double-precision floating-point elements in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := TRUNCATE(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + Truncate the packed single-precision (32-bit) floating-point elements in "a", and store the results as packed single-precision floating-point elements in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := TRUNCATE(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Special Math Functions + + + + Truncate the packed single-precision (32-bit) floating-point elements in "a", and store the results as packed single-precision floating-point elements in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := TRUNCATE(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Integer + AVX512F + Arithmetic + + + Divide packed unsigned 32-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 15 + i := 32*j + dst[i+31:i] := TRUNCATE(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Integer + AVX512F + Arithmetic + + + + + Divide packed unsigned 32-bit integers in "a" by packed elements in "b", and store the truncated results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + IF k[j] + dst[i+31:i] := TRUNCATE(a[i+31:i] / b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Integer + AVX512F + Arithmetic + + + Divide packed unsigned 8-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 63 + i := 8*j + dst[i+7:i] := TRUNCATE(a[i+7:i] / b[i+7:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Integer + AVX512F + Arithmetic + + + Divide packed unsigned 16-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 31 + i := 16*j + dst[i+15:i] := TRUNCATE(a[i+15:i] / b[i+15:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Integer + AVX512F + Arithmetic + + + Divide packed unsigned 64-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 7 + i := 64*j + dst[i+63:i] := TRUNCATE(a[i+63:i] / b[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Integer + AVX512F + Arithmetic + + + Divide packed unsigned 32-bit integers in "a" by packed elements in "b", and store the remainders as packed unsigned 32-bit integers in "dst". + +FOR j := 0 to 15 + i := 32*j + dst[i+31:i] := REMAINDER(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Integer + AVX512F + Arithmetic + + + + + Divide packed unsigned 32-bit integers in "a" by packed elements in "b", and store the remainders as packed unsigned 32-bit integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + IF k[j] + dst[i+31:i] := REMAINDER(a[i+31:i] / b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Integer + AVX512F + Arithmetic + + + Divide packed unsigned 8-bit integers in "a" by packed elements in "b", and store the remainders as packed unsigned 32-bit integers in "dst". + +FOR j := 0 to 63 + i := 8*j + dst[i+7:i] := REMAINDER(a[i+7:i] / b[i+7:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Integer + AVX512F + Arithmetic + + + Divide packed unsigned 16-bit integers in "a" by packed elements in "b", and store the remainders as packed unsigned 32-bit integers in "dst". + +FOR j := 0 to 31 + i := 16*j + dst[i+15:i] := REMAINDER(a[i+15:i] / b[i+15:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Integer + AVX512F + Arithmetic + + + Divide packed unsigned 64-bit integers in "a" by packed elements in "b", and store the remainders as packed unsigned 32-bit integers in "dst". + +FOR j := 0 to 7 + i := 64*j + dst[i+63:i] := REMAINDER(a[i+63:i] / b[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + KNCNI + Special Math Functions + + Determines the minimum element of the packed single-precision (32-bit) floating-point elements stored in "a" and stores the result in "dst". + +min = a[31:0] +FOR j := 1 to 15 + i := j*32 + dst = FpMin(min, a[i+31:i]) +ENDFOR +dst := min + +
immintrin.h
+
+ + Floating Point + KNCNI + Special Math Functions + + + Determines the minimum element of the packed single-precision (32-bit) floating-point elements stored in "a" and stores the result in "dst" using writemask "k" (elements are ignored when the corresponding mask bit is not set). + +min = a[31:0] +FOR j := 1 to 15 + i := j*32 + IF k[j] + CONTINUE + ELSE + dst = FpMin(min, a[i+31:i]) + FI +ENDFOR +dst := min + +
immintrin.h
+
+ + Floating Point + KNCNI + Special Math Functions + + Determines the minimum element of the packed double-precision (64-bit) floating-point elements stored in "a" and stores the result in "dst". + +min = a[63:0] +FOR j := 1 to 7 + i := j*64 + dst = FpMin(min, a[i+63:i]) +ENDFOR +dst := min + +
immintrin.h
+
+ + Floating Point + KNCNI + Special Math Functions + + + Determines the minimum element of the packed double-precision (64-bit) floating-point elements stored in "a" and stores the result in "dst". Bitmask "k" is used to exclude certain elements (elements are ignored when the corresponding mask bit is not set). + +min = a[63:0] +FOR j := 1 to 7 + i := j*64 + IF k[j] + CONTINUE + ELSE + dst = FpMin(min, a[i+63:i]) + FI +ENDFOR +dst := min + +
immintrin.h
+
+ + Floating Point + KNCNI + Special Math Functions + + Determines the maximum element of the packed single-precision (32-bit) floating-point elements stored in "a" and stores the result in "dst". + +max = a[31:0] +FOR j := 1 to 15 + i := j*32 + dst = FpMax(max, a[i+31:i]) +ENDFOR +dst := max + +
immintrin.h
+
+ + Floating Point + KNCNI + Special Math Functions + + + Determines the maximum element of the packed single-precision (32-bit) floating-point elements stored in "a" and stores the result in "dst". Bitmask "k" is used to exclude certain elements (elements are ignored when the corresponding mask bit is not set). + +max = a[31:0] +FOR j := 1 to 15 + i := j*32 + IF k[j] + CONTINUE + ELSE + dst = FpMax(max, a[i+31:i]) + FI +ENDFOR +dst := max + +
immintrin.h
+
+ + Floating Point + KNCNI + Special Math Functions + + Determines the maximum element of the packed double-precision (64-bit) floating-point elements stored in "a" and stores the result in "dst". + +max = a[63:0] +FOR j := 1 to 7 + i := j*64 + dst = FpMax(max, a[i+63:i]) +ENDFOR +dst := max + +
immintrin.h
+
+ + Floating Point + KNCNI + Special Math Functions + + + Determines the maximum element of the packed double-precision (64-bit) floating-point elements stored in "a" and stores the result in "dst". Bitmask "k" is used to exclude certain elements (elements are ignored when the corresponding mask bit is not set). + +max = a[63:0] +FOR j := 1 to 7 + i := j*64 + IF k[j] + CONTINUE + ELSE + dst = FpMax(max, a[i+63:i]) + FI +ENDFOR +dst := max + +
immintrin.h
+
+ + KNCNI + Bit Manipulation + + + Counts the number of trailing bits in unsigned 32-bit integer "x" starting at bit "a" storing the result in "dst". + +count := 0 +FOR j := a to 31 + IF NOT(x[j] 1) + count := count + 1 + FI +ENDFOR +dst := count + + +
immintrin.h
+
+ + KNCNI + Bit Manipulation + + + Counts the number of trailing bits in unsigned 64-bit integer "x" starting at bit "a" storing the result in "dst". + +count := 0 +FOR j := a to 63 + IF NOT(x[j] 1) + count := count + 1 + FI +ENDFOR +dst := count + + +
immintrin.h
+
+ + KNCNI + General Support + + Stalls a thread without blocking other threads for 32-bit unsigned integer "r1" clock cycles. + +BlockThread(r1) + + +
immintrin.h
+
+ + KNCNI + General Support + + Stalls a thread without blocking other threads for 64-bit unsigned integer "r1" clock cycles. + +BlockThread(r1) + + +
immintrin.h
+
+ + KNCNI + General Support + + Set performance monitoring filtering mask to 32-bit unsigned integer "r1". + +SetPerfMonMask(r1[31:0]) + + +
immintrin.h
+
+ + KNCNI + General Support + + Set performance monitoring filtering mask to 64-bit unsigned integer "r1". + +SetPerfMonMask(r1[63:0]) + + +
immintrin.h
+
+ + KNCNI + General Support + + + Evicts the cache line containing the address "ptr" from cache level "level" (can be either 0 or 1). + +CacheLineEvict(ptr, level) + + + +
immintrin.h
+
+ + Mask + KNCNI + Mask + + + Performs a bitwise AND operation between NOT of "k2" and "k1", storing the result in "dst". + +dst[15:0] := NOT(k2[15:0]) & k1[15:0] + + +
immintrin.h
+
+ + Mask + KNCNI + Mask + + + Moves high byte from "k2" to low byte of "k1", and moves low byte of "k2" to high byte of "k1". + +tmp[7:0] := k2[15:8] +k2[15:8] := k1[7:0] +k1[7:0] := tmp[7:0] + +tmp[7:0] := k2[7:0] +k2[7:0] := k1[15:8] +k1[15:8] := tmp[7:0] + + +
immintrin.h
+
+ + Mask + KNCNI + Mask + + + Performs bitwise OR between "k1" and "k2", storing the result in "dst". ZF flag is set if "dst" is 0. + +dst[15:0] := k1[15:0] | k2[15:0] +IF dst = 0 + SetZF() +FI + + +
immintrin.h
+
+ + Mask + KNCNI + Mask + + + Performs bitwise OR between "k1" and "k2", storing the result in "dst". CF flag is set if "dst" consists of all 1's. + +dst[15:0] := k1[15:0] | k2[15:0] +IF PopCount(dst[15:0]) = 16 + SetCF() +FI + + +
immintrin.h
+
+ + Mask + AVX512F + Mask + + + Performs bitwise OR between "k1" and "k2", storing the result in "dst". ZF flag is set if "dst" is 0. + +dst[15:0] := k1[15:0] | k2[15:0] +IF dst = 0 + SetZF() +FI + + +
immintrin.h
+
+ + Mask + AVX512F + Mask + + + Performs bitwise OR between "k1" and "k2", storing the result in "dst". CF flag is set if "dst" consists of all 1's. + +dst[15:0] := k1[15:0] | k2[15:0] +IF PopCount(dst[15:0]) = 16 + SetCF() +FI + + +
immintrin.h
+
+ + KNCNI + Mask + + Converts bit mask "k1" into an integer value, storing the results in "dst". + +dst := ZeroExtend(k1) + +
immintrin.h
+ +
+ + AVX512F + Mask + + Converts bit mask "k1" into an integer value, storing the results in "dst". + +dst := ZeroExtend(k1) + +
immintrin.h
+ +
+ + KNCNI + Mask + + Converts integer "mask" into bitmask, storing the result in "dst". + +dst := mask[15:0] + +
immintrin.h
+ +
+ + AVX512F + Mask + + Converts integer "mask" into bitmask, storing the result in "dst". + +dst := mask[15:0] + +
immintrin.h
+ +
+ + Mask + KNCNI + Mask + + + Packs masks "k1" and "k2" into the high 32 bits of "dst". The rest of "dst" is set to 0. + +dst[63:48] := k1[15:0] +dst[47:32] := k2[15:0] +dst[31:0] := 0 + + +
immintrin.h
+
+ + Mask + KNCNI + Mask + + + Packs masks "k1" and "k2" into the low 32 bits of "dst". The rest of "dst" is set to 0. + +dst[31:16] := k1[15:0] +dst[15:0] := k2[15:0] +dst[63:32] := 0 + + +
immintrin.h
+
+ + Mask + KNCNI + Mask + + + Extracts 16-bit value "b" from 64-bit integer "a", storing the result in "dst". + +CASE b of +0: dst[15:0] := a[63:48] +1: dst[15:0] := a[47:32] +2: dst[15:0] := a[31:16] +3: dst[15:0] := a[15:0] +ESAC +dst[MAX:15] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Arithmetic + + + Multiply packed single-precision (32-bit) floating-point elements in each 4-element set of "a" and by element 1 of the corresponding 4-element set from "b", add the intermediate result to element 0 of the corresponding 4-element set from "b", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + base := (j & ~0x3) * 32 + scale[31:0] := b[base+63:base+32] + bias[31:0] := b[base+31:base] + dst[i+31:i] := (a[i+31:i] * scale[31:0]) + bias[31:0] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in each 4-element set of "a" and by element 1 of the corresponding 4-element set from "b", add the intermediate result to element 0 of the corresponding 4-element set from "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + base := (j & ~0x3) * 32 + scale[31:0] := b[base+63:base+32] + bias[31:0] := b[base+31:base] + dst[i+31:i] := (a[i+31:i] * scale[31:0]) + bias[31:0] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + KNCNI + Load + + + + + + Up-converts 8 single-precision (32-bit) memory locations starting at location "mv" at packed 64-bit integer indices stored in "index" scaled by "scale" using "conv" to 32-bit integer elements and stores them in "dst". "hint" indicates to the processor whether the data is non-temporal. + +FOR j := 0 to 7 + addr := MEM[mv + index[j] * scale] + i := j*32 + CASE conv OF + _MM_UPCONV_EPI32_NONE: + dst[i+31:i] := addr[i+31:i] + _MM_UPCONV_EPI32_UINT8: + n := j*8 + dst[i+31:i] := UInt8ToInt32(addr[n+7:n]) + _MM_UPCONV_EPI32_SINT8: + n := j*8 + dst[i+31:i] := SInt8ToInt32(addr[n+7:n]) + _MM_UPCONV_EPI32_UINT16: + n := j*16 + dst[i+31:i] := UInt16ToInt32(addr[n+15:n]) + _MM_UPCONV_EPI32_SINT16: + n := j*16 + dst[i+31:i] := SInt16ToInt32(addr[n+15:n]) + ESAC +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Integer + KNCNI + Load + + + + + + + + Up-converts 8 single-precision (32-bit) memory locations starting at location "mv" at packed 64-bit integer indices stored in "index" scaled by "scale" using "conv" to 32-bit integer elements and stores them in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "hint" indicates to the processor whether the data is non-temporal. + +FOR j := 0 to 7 + addr := MEM[mv + index[j] * scale] + i := j*32 + IF k[j] + CASE conv OF + _MM_UPCONV_EPI32_NONE: + dst[i+31:i] := addr[i+31:i] + _MM_UPCONV_EPI32_UINT8: + n := j*8 + dst[i+31:i] := UInt8ToInt32(addr[n+7:n]) + _MM_UPCONV_EPI32_SINT8: + n := j*8 + dst[i+31:i] := SInt8ToInt32(addr[n+7:n]) + _MM_UPCONV_EPI32_UINT16: + n := j*16 + dst[i+31:i] := UInt16ToInt32(addr[n+15:n]) + _MM_UPCONV_EPI32_SINT16: + n := j*16 + dst[i+31:i] := SInt16ToInt32(addr[n+15:n]) + ESAC + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Integer + KNCNI + Load + + + + + + Up-converts 8 double-precision (64-bit) memory locations starting at location "mv" at packed 64-bit integer indices stored in "index" scaled by "scale" using "conv" to 64-bit integer elements and stores them in "dst". "hint" indicates to the processor whether the load is non-temporal. + +FOR j := 0 to 7 + i := j*64 + addr := MEM[mv + index[j] * scale] + CASE conv OF + _MM_UPCONV_EPI64_NONE: dst[i+63:i] := addr[i+63:i] + ESAC +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Integer + KNCNI + Load + + + + + + + + Up-converts 8 double-precision (64-bit) memory locations starting at location "mv" at packed 64-bit integer indices stored in "index" scaled by "scale" using "conv" to 64-bit integer elements and stores them in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "hint" indicates to the processor whether the load is non-temporal. + +FOR j := 0 to 7 + i := j*64 + IF k[j] + addr := MEM[mv + index[j] * scale] + CASE conv OF + _MM_UPCONV_EPI64_NONE: dst[i+63:i] := addr[i+63:i] + ESAC + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + KNCNI + Load + + + + + + Up-converts 8 memory locations starting at location "mv" at packed 64-bit integer indices stored in "index" scaled by "scale" using "conv" to single-precision (32-bit) floating-point elements and stores them in the lower half of "dst". "hint" indicates to the processor whether the load is non-temporal. + +FOR j := 0 to 7 + addr := MEM[mv + index[j] * scale] + i := j*32 + CASE conv OF + _MM_UPCONV_PS_NONE: + dst[i+31:i] := addr[i+31:i] + _MM_UPCONV_PS_FLOAT16: + n := j*16 + dst[i+31:i] := Float16ToFloat32(addr[n+15:n]) + _MM_UPCONV_PS_UINT8: + n := j*8 + dst[i+31:i] := UInt8ToFloat32(addr[n+7:n]) + _MM_UPCONV_PS_SINT8: + n := j*8 + dst[i+31:i] := SInt8ToFloat32(addr[n+7:n]) + _MM_UPCONV_PS_UINT16: + n := j*16 + dst[i+31:i] := UInt16ToFloat32(addr[n+15:n]) + _MM_UPCONV_PS_SINT16: + n := j*16 + dst[i+31:i] := SInt16ToFloat32(addr[n+15:n]) + ESAC +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + KNCNI + Load + + + + + + + + Up-converts 8 memory locations starting at location "mv" at packed 64-bit integer indices stored in "index" scaled by "scale" using "conv" to single-precision (32-bit) floating-point elements and stores them in the lower half of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "hint" indicates to the processor whether the load is non-temporal. + +FOR j := 0 to 7 + addr := MEM[mv + index[j] * scale] + i := j*32 + IF k[j] + CASE conv OF + _MM_UPCONV_PS_NONE: + dst[i+31:i] := addr[i+31:i] + _MM_UPCONV_PS_FLOAT16: + n := j*16 + dst[i+31:i] := Float16ToFloat32(addr[n+15:n]) + _MM_UPCONV_PS_UINT8: + n := j*8 + dst[i+31:i] := UInt8ToFloat32(addr[n+7:n]) + _MM_UPCONV_PS_SINT8: + n := j*8 + dst[i+31:i] := SInt8ToFloat32(addr[n+7:n]) + _MM_UPCONV_PS_UINT16: + n := j*16 + dst[i+31:i] := UInt16ToFloat32(addr[n+15:n]) + _MM_UPCONV_PS_SINT16: + n := j*16 + dst[i+31:i] := SInt16ToFloat32(addr[n+15:n]) + ESAC + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + KNCNI + Load + + + + + + Up-converts 8 double-precision (64-bit) floating-point elements stored in memory starting at location "mv" at packed 64-bit integer indices stored in "index" scaled by "scale" using "conv" to 64-bit floating-point elements and stores them in "dst". "hint" indicates to the processor whether the data is non-temporal. + +FOR j := 0 to 7 + addr := MEM[mv + index[j] * scale] + i := j*64 + CASE conv OF + _MM_UPCONV_PD_NONE: dst[i+63:i] := addr[i+63:i] + ESAC +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + KNCNI + Load + + + + + + + + Up-converts 8 double-precision (64-bit) floating-point elements stored in memory starting at location "mv" at packed 64-bit integer indices stored in "index" scaled by "scale" using "conv" to 64-bit floating-point elements and stores them in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "hint" indicates to the processor whether the data is non-temporal. + +FOR j := 0 to 7 + addr := MEM[mv + index[j] * scale] + i := j*64 + IF k[j] + CASE conv OF + _MM_UPCONV_PD_NONE: dst[i+63:i] := addr[i+63:i] + ESAC + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Store + + + + + + + Down-converts 16 packed 32-bit integer elements in "v1" using "conv" and stores them in memory locations starting at location "mv" at packed 32-bit integer indices stored in "index" scaled by "scale". "hint" indicates to the processor whether the data is non-temporal. AVX512 supports _MM_DOWNCONV_EPI32_NONE. + +FOR j := 0 to 15 + addr := MEM[mv + index[j] * scale] + i := j*32 + CASE conv OF + _MM_DOWNCONV_EPI32_NONE: + addr[i+31:i] := v1[i+31:i] + _MM_DOWNCONV_EPI32_UINT8: + n := j*8 + addr[n+7:n] := UInt32ToUInt8(v1[i+31:i]) + _MM_DOWNCONV_EPI32_SINT8: + n := j*8 + addr[n+7:n] := SInt32ToSInt8(v1[i+31:i]) + _MM_DOWNCONV_EPI32_UINT16: + n := j*16 + addr[n+15:n] := UInt32ToUInt16(v1[i+31:i]) + _MM_DOWNCONV_EPI32_SINT16: + n := j*16 + addr[n+15:n] := SInt32ToSInt16(v1[n+15:n]) + ESAC +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512F/KNCNI + Store + + + + + + + + Down-converts 16 packed 32-bit integer elements in "v1" using "conv" and stores them in memory locations starting at location "mv" at packed 32-bit integer indices stored in "index" scaled by "scale". Elements are written using writemask "k" (elements are only written when the corresponding mask bit is set; otherwise, elements are left unchanged in memory). "hint" indicates to the processor whether the data is non-temporal. AVX512 supports _MM_DOWNCONV_EPI32_NONE. + +FOR j := 0 to 15 + addr := MEM[mv + index[j] * scale] + i := j*32 + IF k[j] + CASE conv OF + _MM_DOWNCONV_EPI32_NONE: + addr[i+31:i] := v1[i+31:i] + _MM_DOWNCONV_EPI32_UINT8: + n := j*8 + addr[n+7:n] := UInt32ToUInt8(v1[i+31:i]) + _MM_DOWNCONV_EPI32_SINT8: + n := j*8 + addr[n+7:n] := SInt32ToSInt8(v1[i+31:i]) + _MM_DOWNCONV_EPI32_UINT16: + n := j*16 + addr[n+15:n] := UInt32ToUInt16(v1[i+31:i]) + _MM_DOWNCONV_EPI32_SINT16: + n := j*16 + addr[n+15:n] := SInt32ToSInt16(v1[n+15:n]) + ESAC + FI +ENDFOR + + +
immintrin.h
+
+ + Floating Point + KNCNI + Store + + + + + + + Down-converts 8 packed single-precision (32-bit) floating-point elements in "v1" using "conv" and stores them in memory locations starting at location "mv" at packed 64-bit integer indices stored in "index" scaled by "scale". "hint" indicates to the processor whether the data is non-temporal. + +FOR j := 0 to 7 + addr := MEM[mv + index[j] * scale] + i := j*32 + CASE conv OF + _MM_DOWNCONV_PS_NONE: + addr[i+31:i] := v1[i+31:i] + _MM_DOWNCONV_PS_FLOAT16: + n := j*16 + addr[n+15:n] := Float32ToFloat16(v1[i+31:i]) + _MM_DOWNCONV_PS_UINT8: + n := j*8 + addr[n+7:n] := Float32ToUInt8(v1[i+31:i]) + _MM_DOWNCONV_PS_SINT8: + n := j*8 + addr[n+7:n] := Float32ToSInt8(v1[i+31:i]) + _MM_DOWNCONV_PS_UINT16: + n := j*16 + addr[n+15:n] := Float32ToUInt16(v1[i+31:i]) + _MM_DOWNCONV_PS_SINT16: + n := j*16 + addr[n+15:n] := Float32ToSInt16(v1[i+31:i]) + ESAC +ENDFOR + +
immintrin.h
+
+ + Floating Point + KNCNI + Store + + + + + + + + Down-converts 8 packed single-precision (32-bit) floating-point elements in "v1" using "conv" and stores them in memory locations starting at location "mv" at packed 64-bit integer indices stored in "index" scaled by "scale". Elements are only written when the corresponding mask bit is set in "k"; otherwise, elements are unchanged in memory. "hint" indicates to the processor whether the data is non-temporal. + +FOR j := 0 to 7 + addr := MEM[mv + index[j] * scale] + i := j*32 + IF k[j] + CASE conv OF + _MM_DOWNCONV_PS_NONE: + addr[i+31:i] := v[i+31:i] + _MM_DOWNCONV_PS_FLOAT16: + n := j*16 + addr[n+15:n] := Float32ToFloat16(v1[i+31:i]) + _MM_DOWNCONV_PS_UINT8: + n := j*8 + addr[n+7:n] := Float32ToUInt8(v1[i+31:i]) + _MM_DOWNCONV_PS_SINT8: + n := j*8 + addr[n+7:n] := Float32ToSInt8(v1[i+31:i]) + _MM_DOWNCONV_PS_UINT16: + n := j*16 + addr[n+15:n] := Float32ToUInt16(v1[i+31:i]) + _MM_DOWNCONV_PS_SINT16: + n := j*16 + addr[n+15:n] := Float32ToSInt16(v1[i+31:i]) + ESAC + FI +ENDFOR + +
immintrin.h
+
+ + Floating Point + KNCNI + Store + + + + + + + Down-converts 8 packed double-precision (64-bit) floating-point elements in "v1" using "conv" and stores them in memory locations starting at location "mv" at packed 64-bit integer indices stored in "index" scaled by "scale". "hint" indicates to the processor whether the data is non-temporal. + +FOR j := 0 to 7 + addr := MEM[mv + index[j] * scale] + i := j*64 + CASE conv OF + _MM_DOWNCONV_EPI64_NONE: + addr[i+63:i] := v1[i+63:i] + ESAC +ENDFOR + +
immintrin.h
+
+ + Floating Point + KNCNI + Store + + + + + + + + Down-converts 8 packed double-precision (64-bit) floating-point elements in "v1" using "conv" and stores them in memory locations starting at location "mv" at packed 64-bit integer indices stored in "index" scaled by "scale". Elements are written to memory using writemask "k" (elements are not stored to memory when the corresponding mask bit is not set; the memory location is left unchagned). "hint" indicates to the processor whether the data is non-temporal. + +FOR j := 0 to 7 + addr := MEM[mv + index[j] * scale] + i := j*64 + IF k[j] + CASE conv OF + _MM_DOWNCONV_EPI64_NONE: + addr[i+63:i] := v1[i+63:i] + ESAC + FI +ENDFOR + +
immintrin.h
+
+ + Integer + KNCNI + Store + + + + + + + Down-converts the low 8 packed 32-bit integer elements in "v1" using "conv" and stores them in memory locations starting at location "mv" at packed 64-bit integer indices stored in "index" scaled by "scale". "hint" indicates to the processor whether the data is non-temporal. + +FOR j := 0 to 7 + addr := MEM[mv + index[j] * scale] + i := j*64 + CASE conv OF + _MM_DOWNCONV_EPI32_NONE: + addr[i+31:i] := v1[i+31:i] + _MM_DOWNCONV_EPI32_UINT8: + n := j*8 + addr[n+7:n] := UInt32ToUInt8(v1[i+31:i]) + _MM_DOWNCONV_EPI32_SINT8: + n := j*8 + addr[n+7:n] := SInt32ToSInt8(v1[i+31:i]) + _MM_DOWNCONV_EPI32_UINT16: + n := j*16 + addr[n+15:n] := UInt32ToUInt16(v1[i+31:i]) + _MM_DOWNCONV_EPI32_SINT16: + n := j*16 + addr[n+15:n] := SInt32ToSInt16(v1[n+15:n]) + ESAC +ENDFOR + +
immintrin.h
+
+ + Integer + KNCNI + Store + + + + + + + + Down-converts the low 8 packed 32-bit integer elements in "v1" using "conv" and stores them in memory locations starting at location "mv" at packed 64-bit integer indices stored in "index" scaled by "scale". Elements are written to memory using writemask "k" (elements are only written when the corresponding mask bit is set; otherwise, the memory location is left unchanged). "hint" indicates to the processor whether the data is non-temporal. + +FOR j := 0 to 7 + addr := MEM[mv + index[j] * scale] + i := j*64 + IF k[j] + CASE conv OF + _MM_DOWNCONV_EPI32_NONE: + addr[i+31:i] := v1[i+31:i] + _MM_DOWNCONV_EPI32_UINT8: + n := j*8 + addr[n+7:n] := UInt32ToUInt8(v1[i+31:i]) + _MM_DOWNCONV_EPI32_SINT8: + n := j*8 + addr[n+7:n] := SInt32ToSInt8(v1[i+31:i]) + _MM_DOWNCONV_EPI32_UINT16: + n := j*16 + addr[n+15:n] := UInt32ToUInt16(v1[i+31:i]) + _MM_DOWNCONV_EPI32_SINT16: + n := j*16 + addr[n+15:n] := SInt32ToSInt16(v1[n+15:n]) + ESAC + FI +ENDFOR + +
immintrin.h
+
+ + Integer + KNCNI + Store + + + + + + + Down-converts 8 packed 64-bit integer elements in "v1" using "conv" and stores them in memory locations starting at location "mv" at packed 64-bit integer indices stored in "index" scaled by "scale". "hint" indicates to the processor whether the load is non-temporal. + +FOR j := 0 to 7 + addr := MEM[mv + index[j] * scale] + i := j*64 + CASE conv OF + _MM_DOWNCONV_EPI64_NONE: addr[i+63:i] := v1[i+63:i] + ESAC +ENDFOR + +
immintrin.h
+
+ + Integer + KNCNI + Store + + + + + + + + Down-converts 8 packed 64-bit integer elements in "v1" using "conv" and stores them in memory locations starting at location "mv" at packed 64-bit integer indices stored in "index" scaled by "scale". Only those elements whose corresponding mask bit is set in writemask "k" are written to memory. + +FOR j := 0 to 7 + IF k[j] + addr := MEM[mv + index[j] * scale] + i := j*64 + CASE conv OF + _MM_DOWNCONV_EPI64_NONE: addr[i+63:i] := v1[i+63:i] + ESAC + FI +ENDFOR + +
immintrin.h
+
+ + Integer + AVX512F + Store + + + Multiplies elements in packed 64-bit integer vectors "a" and "b" together, storing the lower 64 bits of the result in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := a[i+63:i] * b[i+63:i] +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Integer + AVX512F + Store + + + + + Multiplies elements in packed 64-bit integer vectors "a" and "b" together, storing the lower 64 bits of the result in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] * b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + KNCNI + Store + + + Permutes 128-bit blocks of the packed single-precision (32-bit) floating-point elements in "a" using constant "imm8". The results are stored in "dst". + +SELECT4(src, control) { + CASE control[1:0] OF + 0: tmp[127:0] := src[127:0] + 1: tmp[127:0] := src[255:128] + 2: tmp[127:0] := src[383:256] + 3: tmp[127:0] := src[511:384] + ESAC + RETURN tmp[127:0] +} + +FOR j := 0 to 3 + i := j*128 + n := j*2 + dst[i+127:i] := SELECT4(a[511:0], imm8[n+1:n]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + KNCNI + Swizzle + + + + + Permutes 128-bit blocks of the packed single-precision (32-bit) floating-point elements in "a" using constant "imm8". The results are stored in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +SELECT4(src, control) { + CASE control[1:0] OF + 0: tmp[127:0] := src[127:0] + 1: tmp[127:0] := src[255:128] + 2: tmp[127:0] := src[383:256] + 3: tmp[127:0] := src[511:384] + ESAC + RETURN tmp[127:0] +} + +tmp[511:0] := 0 +FOR j := 0 to 4 + i := j*128 + n := j*2 + tmp[i+127:i] := SELECT4(a[511:0], imm8[n+1:n]) +ENDFOR +FOR j := 0 to 15 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + + Computes the sine and cosine of the packed double-precision (64-bit) floating-point elements in "a" and stores the results of the sine computation in "dst" and the results of the cosine computation in "cos_res". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := SIN(a[i+63:i]) + cos_res[i+63:i] := COS(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 +cos_res[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + + + + + Computes the sine and cosine of the packed double-precision (64-bit) floating-point elements in "a" and stores the results of the sine computation in "dst" and the results of the cosine computation in "cos_res". Elements are written to their respective locations using writemask "k" (elements are copied from "sin_src" or "cos_src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := SIN(a[i+63:i]) + cos_res[i+63:i] := COS(a[i+63:i]) + ELSE + dst[i+63:i] := sin_src[i+63:i] + cos_res[i+63:i] := cos_src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 +cos_res[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + + Computes the sine and cosine of the packed single-precision (32-bit) floating-point elements in "a" and stores the results of the sine computation in "dst" and the results of the cosine computation in "cos_res". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := SIN(a[i+31:i]) + cos_res[i+31:i] := COS(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 +cos_res[MAX:512] := 0 + +
immintrin.h
+
+ + Floating Point + AVX512F + Trigonometry + + + + + + Computes the sine and cosine of the packed single-precision (32-bit) floating-point elements in "a" and stores the results of the sine computation in "dst" and the results of the cosine computation in "cos_res". Elements are written to their respective locations using writemask "k" (elements are copied from "sin_src" or "cos_src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := SIN(a[i+31:i]) + cos_res[i+31:i] := COS(a[i+31:i]) + ELSE + dst[i+31:i] := sin_src[i+31:i] + cos_res[i+31:i] := cos_src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 +cos_res[MAX:512] := 0 + +
immintrin.h
+
+ + Integer + KNCNI + Load + + + + Loads 8 32-bit integer memory locations starting at location "mv" at packed 64-bit integer indices stored in "index" scaled by "scale" to "dst". + +FOR j := 0 to 7 + addr := MEM[mv + index[j] * scale] + i := j*32 + dst[i+31:i] := addr[i+31:i] +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Integer + KNCNI + Load + + + + + + Loads 8 32-bit integer memory locations starting at location "mv" at packed 64-bit integer indices stored in "index" scaled by "scale" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + addr := MEM[mv + index[j] * scale] + dst[i+31:i] := addr[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + KNCNI + Load + + + + Loads 8 single-precision (32-bit) floating-point memory locations starting at location "mv" at packed 64-bit integer indices stored in "index" scaled by "scale" to "dst". + +FOR j := 0 to 7 + addr := MEM[mv + index[j] * scale] + i := j*32 + dst[i+31:i] := addr[i+31:i] +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + KNCNI + Load + + + + + + Loads 8 single-precision (32-bit) floating-point memory locations starting at location "mv" at packed 64-bit integer indices stored in "index" scaled by "scale" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + addr := MEM[mv + index[j] * scale] + dst[i+31:i] := addr[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + +
immintrin.h
+
+ + Floating Point + KNCNI + Store + + + + + Stores 8 packed single-precision (32-bit) floating-point elements in "v" in memory locations starting at location "mv" at packed 64-bit integer indices stored in "index" scaled by "scale". + +FOR j := 0 to 7 + i := j*32 + addr := MEM[mv + index[j] * scale] + addr[i+31:i] := v[i+31:i] +ENDFOR + +
immintrin.h
+
+ + Floating Point + KNCNI + Store + + + + + + Stores 8 packed single-precision (32-bit) floating-point elements in "v1" in memory locations starting at location "mv" at packed 64-bit integer indices stored in "index" scaled by "scale" using writemask "k" (elements are only written to memory when the corresponding mask bit is set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + addr := MEM[mv + index[j] * scale] + addr[i+31:i] := v1[i+31:i] + FI +ENDFOR + +
immintrin.h
+
+ + Integer + KNCNI + Store + + + + + Stores 8 packed 32-bit integer elements in "v1" in memory locations starting at location "mv" at packed 64-bit integer indices stored in "index" scaled by "scale". + +FOR j := 0 to 7 + i := j*32 + addr := MEM[mv + index[j] * scale] + addr[i+31:i] := v1[i+31:i] +ENDFOR + +
immintrin.h
+
+ + Integer + KNCNI + Store + + + + + + Stores 8 packed 32-bit integer elements in "v1" in memory locations starting at location "mv" at packed 64-bit integer indices stored in "index" scaled by "scale" using writemask "k" (elements are only written to memory when the corresponding mask bit is set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + addr := MEM[mv + index[j] * scale] + addr[i+31:i] := v1[i+31:i] + FI +ENDFOR + +
immintrin.h
+
+ + Mask + KNCNI + Mask + + + Move the high element from "k1" to the low element of "k1", and insert the low element of "k2" into the high element of "k1". + +tmp[7:0] := k1[15:8] +k1[15:8] := k2[7:0] +k1[7:0] := tmp[7:0] + + +
immintrin.h
+
+ + Mask + KNCNI + Mask + + + Insert the low element of "k2" into the high element of "k1". + +k1[15:8] := k2[7:0] + + +
immintrin.h
+
+ + Floating Point + AVX512 + Convert + + Copy the lower single-precision (32-bit) floating-point element of "a" to "dst". + dst[31:0] := a[31:0] + +
immintrin.h
+
+ + Floating Point + AVX512 + Convert + + Copy the lower double-precision (64-bit) floating-point element of "a" to "dst". + dst[63:0] := a[63:0] + +
immintrin.h
+
+ + Integer + AVX512 + Convert + + Copy the lower 32-bit integer in "a" to "dst". + +dst[31:0] := a[31:0] + + +
immintrin.h
+
+ + Integer + AVX512_4VNNIW + Arithmetic + + + + + + + Compute 4 sequential operand source-block dot-products of two signed 16-bit element operands with 32-bit element accumulation, and store the results in "dst". + +FOR j := 0 to 15 + FOR m := 0 to 3 + lim_base := m*32 + i := j*32 + tl := b[lim_base+15:lim_base] + tu := b[lim_base+31:lim_base+16] + lword := a{m}[i+15:i] * tl + uword := a{m}[i+31:i+16] * tu + dst[i+31:i] := src[i+31:i] + lword + uword + ENDFOR +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512_4VNNIW + Arithmetic + + + + + + + + Compute 4 sequential operand source-block dot-products of two signed 16-bit element operands with 32-bit element accumulation with mask, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).. + +FOR j := 0 to 15 + IF mask[j] + FOR m := 0 to 3 + lim_base := m*32 + i := j*32 + tl := b[lim_base+15:lim_base] + tu := b[lim_base+31:lim_base+16] + lword := a{m}[i+15:i] * tl + uword := a{m}[i+31:i+16] * tu + dst[i+31:i] := src[i+31:i] + lword + uword + ENDFOR + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512_4VNNIW + Arithmetic + + + + + + + + Compute 4 sequential operand source-block dot-products of two signed 16-bit element operands with 32-bit element accumulation with mask, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + IF mask[j] + FOR m := 0 to 3 + lim_base := m*32 + i := j*32 + tl := b[lim_base+15:lim_base] + tu := b[lim_base+31:lim_base+16] + lword := a{m}[i+15:i] * tl + uword := a{m}[i+31:i+16] * tu + dst[i+31:i] := src[i+31:i] + lword + uword + ENDFOR + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512_4VNNIW + Arithmetic + + + + + + + Compute 4 sequential operand source-block dot-products of two signed 16-bit element operands with 32-bit element accumulation and signed saturation, and store the results in "dst". + +FOR j := 0 to 15 + FOR m := 0 to 3 + lim_base := m*32 + i := j*32 + tl := b[lim_base+15:lim_base] + tu := b[lim_base+31:lim_base+16] + lword := a{m}[i+15:i] * tl + uword := a{m}[i+31:i+16] * tu + dst[i+31:i] := SIGNED_DWORD_SATURATE(src[i+31:i] + lword + uword) + ENDFOR +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512_4VNNIW + Arithmetic + + + + + + + + Compute 4 sequential operand source-block dot-products of two signed 16-bit element operands with 32-bit element accumulation with mask and signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).. + +FOR j := 0 to 15 + IF mask[i] + FOR m := 0 to 3 + lim_base := m*32 + i := j*32 + tl := b[lim_base+15:lim_base] + tu := b[lim_base+31:lim_base+16] + lword := a{m}[i+15:i] * tl + uword := a{m}[i+31:i+16] * tu + dst[i+31:i] := SIGNED_DWORD_SATURATE(src[i+31:i] + lword + uword) + ENDFOR + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512_4VNNIW + Arithmetic + + + + + + + + Compute 4 sequential operand source-block dot-products of two signed 16-bit element operands with 32-bit element accumulation with mask and signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).. + +FOR j := 0 to 15 + IF mask[i] + FOR m := 0 to 3 + lim_base := m*32 + i := j*32 + tl := b[lim_base+15:lim_base] + tu := b[lim_base+31:lim_base+16] + lword := a{m}[i+15:i] * tl + uword := a{m}[i+31:i+16] * tu + dst[i+31:i] := SIGNED_DWORD_SATURATE(src[i+31:i] + lword + uword) + ENDFOR + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512_4FMAPS + Arithmetic + + + + + + + Multiply packed single-precision (32-bit) floating-point elements specified in 4 consecutive operands "b0" through "b3" by the 4 corresponding packed elements in "c", accumulating with the corresponding elements in "a". Store the results in "dst". + +dst := a +FOR m := 0 to 3 + FOR j := 0 to 15 + i = j*32 + n = m*32 + dst[i+31:i] := RoundFPControl_MXCSR(dst[i+31:i] + b{m}[i+31:i] * c[n+31:n]) + ENDFOR +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512_4FMAPS + Arithmetic + + + + + + + + Multiply packed single-precision (32-bit) floating-point elements specified in 4 consecutive operands "b0" through "b3" by the 4 corresponding packed elements in "c", accumulating with the corresponding elements in "a". Store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +dst := a +FOR m := 0 to 3 + FOR j := 0 to 15 + i = j*32 + n = m*32 + IF mask[j] + dst[i+31:i] := RoundFPControl_MXCSR(dst[i+31:i] + b{m}[i+31:i] * c[n+31:n]) + FI + ENDFOR +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512_4FMAPS + Arithmetic + + + + + + + + Multiply packed single-precision (32-bit) floating-point elements specified in 4 consecutive operands "b0" through "b3" by the 4 corresponding packed elements in "c", accumulating with the corresponding elements in "a". Store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +dst := a +FOR m := 0 to 3 + FOR j := 0 to 15 + i = j*32 + n = m*32 + IF mask[j] + dst[i+31:i] := RoundFPControl_MXCSR(dst[i+31:i] + b{m}[i+31:i] * c[n+31:n]) + ELSE + dst[i+31:i] := 0 + FI + ENDFOR +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512_4FMAPS + Arithmetic + + + + + + + Multiply packed single-precision (32-bit) floating-point elements specified in 4 consecutive operands "b0" through "b3" by the 4 corresponding packed elements in "c", accumulating the negated intermediate result with the corresponding elements in "a". Store the results in "dst". + +dst := a +FOR m := 0 to 3 + FOR j := 0 to 15 + i = j*32 + n = m*32 + dst[i+31:i] := RoundFPControl_MXCSR(dst[i+31:i] - b{m}[i+31:i] * c[n+31:n]) + ENDFOR +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512_4FMAPS + Arithmetic + + + + + + + + Multiply packed single-precision (32-bit) floating-point elements specified in 4 consecutive operands "b0" through "b3" by the 4 corresponding packed elements in "c", accumulating the negated intermediate result with the corresponding elements in "a". Store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +dst := a +FOR m := 0 to 3 + FOR j := 0 to 15 + i = j*32 + n = m*32 + IF mask[j] + dst[i+31:i] := RoundFPControl_MXCSR(dst[i+31:i] - b{m}[i+31:i] * c[n+31:n]) + FI + ENDFOR +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512_4FMAPS + Arithmetic + + + + + + + + Multiply packed single-precision (32-bit) floating-point elements specified in 4 consecutive operands "b0" through "b3" by the 4 corresponding packed elements in "c", accumulating the negated intermediate result with the corresponding elements in "a". Store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +dst := a +FOR m := 0 to 3 + FOR j := 0 to 15 + i = j*32 + n = m*32 + IF mask[j] + dst[i+31:i] := RoundFPControl_MXCSR(dst[i+31:i] - b{m}[i+31:i] * c[n+31:n]) + ELSE + dst[i+31:i] := 0 + FI + ENDFOR +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512_4FMAPS + Arithmetic + + + + + + + Multiply the lower single-precision (32-bit) floating-point elements specified in 4 consecutive operands "b0" through "b3" by corresponding element in "c", accumulating with the lower element in "a". Store the result in the lower element of "dst". + +dst := a +FOR j := 0 to 3 + i := j*32 + dst[31:0] := RoundFPControl_MXCSR(dst[31:0] + b{j}[31:0] * c[i+31:i]) +ENDFOR +dst[MAX:32] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512_4FMAPS + Arithmetic + + + + + + + + Multiply the lower single-precision (32-bit) floating-point elements specified in 4 consecutive operands "b0" through "b3" by corresponding element in "c", accumulating with the lower element in "a". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set). + +dst := a +IF k[0] + FOR j := 0 to 3 + i := j*32 + dst[31:0] := RoundFPControl_MXCSR(dst[31:0] + b{j}[31:0] * c[i+31:i]) + ENDFOR +FI +dst[MAX:32] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512_4FMAPS + Arithmetic + + + + + + + + Multiply the lower single-precision (32-bit) floating-point elements specified in 4 consecutive operands "b0" through "b3" by corresponding element in "c", accumulating with the lower element in "a". Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set). + +dst := a +IF k[0] + FOR j := 0 to 3 + i := j*32 + dst[31:0] := RoundFPControl_MXCSR(dst[31:0] + b{j}[31:0] * c[i+31:i]) + ENDFOR +ELSE + dst[31:0] := 0 +FI +dst[MAX:32] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512_4FMAPS + Arithmetic + + + + + + + Multiply the lower single-precision (32-bit) floating-point elements specified in 4 consecutive operands "b0" through "b3" by corresponding element in "c", accumulating the negated intermediate result with the lower element in "a". Store the result in the lower element of "dst". + +dst := a +FOR j := 0 to 3 + i := j*32 + dst[31:0] := RoundFPControl_MXCSR(dst[31:0] - b{j}[31:0] * c[i+31:i]) +ENDFOR +dst[MAX:32] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512_4FMAPS + Arithmetic + + + + + + + + Multiply the lower single-precision (32-bit) floating-point elements specified in 4 consecutive operands "b0" through "b3" by corresponding element in "c", accumulating the negated intermediate result with the lower element in "a". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set). + +dst := a +IF k[0] + FOR j := 0 to 3 + i := j*32 + dst[31:0] := RoundFPControl_MXCSR(dst[31:0] - b{j}[31:0] * c[i+31:i]) + ENDFOR +FI +dst[MAX:32] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512_4FMAPS + Arithmetic + + + + + + + + Multiply the lower single-precision (32-bit) floating-point elements specified in 4 consecutive operands "b0" through "b3" by corresponding element in "c", accumulating the negated intermediate result with the lower element in "a". Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set). + +dst := a +IF k[0] + FOR j := 0 to 3 + i := j*32 + dst[31:0] := RoundFPControl_MXCSR(dst[31:0] - b{j}[31:0] * c[i+31:i]) + ENDFOR +ELSE + dst[31:0] := 0 +FI +dst[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VPOPCNTDQ + Bit Manipulation + + Count the number of logical 1 bits in packed 32-bit integers in "a", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := a[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VPOPCNTDQ + Bit Manipulation + + + + Count the number of logical 1 bits in packed 32-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := POPCNT(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VPOPCNTDQ + Bit Manipulation + + + Count the number of logical 1 bits in packed 32-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := POPCNT(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VPOPCNTDQ + Bit Manipulation + + Count the number of logical 1 bits in packed 64-bit integers in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := a[i+63:i] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VPOPCNTDQ + Bit Manipulation + + + + Count the number of logical 1 bits in packed 64-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := POPCNT(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VPOPCNTDQ + Bit Manipulation + + + Count the number of logical 1 bits in packed 64-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := POPCNT(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Mask + AVX512BW + Miscellaneous + + + Unpack and interleave 32 bits from masks "a" and "b", and store the 64-bit result in "k". + +k[31:0] := a[31:0] +k[63:32] := b[31:0] +k[MAX:64] := 0 + + +
immintrin.h
+
+ + Mask + AVX512BW + Miscellaneous + + + Unpack and interleave 16 bits from masks "a" and "b", and store the 32-bit result in "k". + +k[15:0] := a[15:0] +k[31:16] := b[15:0] +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + AVX512VL + Arithmetic + + + + + Add packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + AVX512VL + Arithmetic + + + + Add packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + AVX512VL + Arithmetic + + + + + Add packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + AVX512VL + Arithmetic + + + + Add packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + AVX512VL + Arithmetic + + + + + Add packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + AVX512VL + Arithmetic + + + + Add packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + AVX512VL + Arithmetic + + + + + Add packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512F + AVX512VL + Arithmetic + + + + Add packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + AVX512VL + Miscellaneous + + + + Concatenate "a" and "b" into a 64-byte immediate result, shift the result right by "count" 32-bit elements, and store the low 32 bytes (8 elements) in "dst". + +temp[511:256] := a[255:0] +temp[255:0] := b[255:0] +temp[511:0] := temp[511:0] >> (32*count) +dst[255:0] := temp[255:0] +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + AVX512VL + Miscellaneous + + + + + + Concatenate "a" and "b" into a 64-byte immediate result, shift the result right by "count" 32-bit elements, and store the low 32 bytes (8 elements) in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +temp[511:256] := a[255:0] +temp[255:0] := b[255:0] +temp[511:0] := temp[511:0] >> (32*count) +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := temp[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + AVX512VL + Miscellaneous + + + + + Concatenate "a" and "b" into a 64-byte immediate result, shift the result right by "count" 32-bit elements, and store the low 32 bytes (8 elements) in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +temp[511:256] := a[255:0] +temp[255:0] := b[255:0] +temp[511:0] := temp[511:0] >> (32*count) +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := temp[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + AVX512VL + Miscellaneous + + + + Concatenate "a" and "b" into a 32-byte immediate result, shift the result right by "count" 32-bit elements, and store the low 16 bytes (4 elements) in "dst". + +temp[255:128] := a[127:0] +temp[127:0] := b[127:0] +temp[255:0] := temp[255:0] >> (32*count) +dst[127:0] := temp[127:0] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + AVX512VL + Miscellaneous + + + + + + Concatenate "a" and "b" into a 32-byte immediate result, shift the result right by "count" 32-bit elements, and store the low 16 bytes (4 elements) in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +temp[255:128] := a[127:0] +temp[127:0] := b[127:0] +temp[255:0] := temp[255:0] >> (32*count) +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := temp[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + AVX512VL + Miscellaneous + + + + + Concatenate "a" and "b" into a 32-byte immediate result, shift the result right by "count" 32-bit elements, and store the low 16 bytes (4 elements) in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +temp[255:128] := a[127:0] +temp[127:0] := b[127:0] +temp[255:0] := temp[255:0] >> (32*count) +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := temp[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + AVX512VL + Miscellaneous + + + + Concatenate "a" and "b" into a 64-byte immediate result, shift the result right by "count" 64-bit elements, and store the low 32 bytes (4 elements) in "dst". + +temp[511:256] := a[255:0] +temp[255:0] := b[255:0] +temp[511:0] := temp[511:0] >> (64*count) +dst[255:0] := temp[255:0] +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + AVX512VL + Miscellaneous + + + + + + Concatenate "a" and "b" into a 64-byte immediate result, shift the result right by "count" 64-bit elements, and store the low 32 bytes (4 elements) in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +temp[511:256] := a[255:0] +temp[255:0] := b[255:0] +temp[511:0] := temp[511:0] >> (64*count) +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := temp[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + AVX512VL + Miscellaneous + + + + + Concatenate "a" and "b" into a 64-byte immediate result, shift the result right by "count" 64-bit elements, and store the low 32 bytes (4 elements) in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +temp[511:256] := a[255:0] +temp[255:0] := b[255:0] +temp[511:0] := temp[511:0] >> (64*count) +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := temp[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + AVX512VL + Miscellaneous + + + + Concatenate "a" and "b" into a 32-byte immediate result, shift the result right by "count" 64-bit elements, and store the low 16 bytes (2 elements) in "dst". + +temp[255:128] := a[127:0] +temp[127:0] := b[127:0] +temp[255:0] := temp[255:0] >> (64*count) +dst[127:0] := temp[127:0] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + AVX512VL + Miscellaneous + + + + + + Concatenate "a" and "b" into a 32-byte immediate result, shift the result right by "count" 64-bit elements, and store the low 16 bytes (2 elements) in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +temp[255:128] := a[127:0] +temp[127:0] := b[127:0] +temp[255:0] := temp[255:0] >> (64*count) +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := temp[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + AVX512VL + Miscellaneous + + + + + Concatenate "a" and "b" into a 32-byte immediate result, shift the result right by "count" 64-bit elements, and store the low 16 bytes (2 elements) in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +temp[255:128] := a[127:0] +temp[127:0] := b[127:0] +temp[255:0] := temp[255:0] >> (64*count) +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := temp[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Logical + + + + + Compute the bitwise NOT of packed double-precision (64-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Logical + + + + Compute the bitwise NOT of packed double-precision (64-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Logical + + + Compute the bitwise NOT of packed double-precision (64-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Logical + + + + + Compute the bitwise NOT of packed double-precision (64-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Logical + + + + Compute the bitwise NOT of packed double-precision (64-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Logical + + + + + Compute the bitwise NOT of packed double-precision (64-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Logical + + + + Compute the bitwise NOT of packed double-precision (64-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Logical + + + + + Compute the bitwise NOT of packed single-precision (32-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := ((NOT a[i+31:i]) AND b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Logical + + + + Compute the bitwise NOT of packed single-precision (32-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := ((NOT a[i+31:i]) AND b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Logical + + + Compute the bitwise NOT of packed single-precision (32-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := ((NOT a[i+31:i]) AND b[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Logical + + + + + Compute the bitwise NOT of packed single-precision (32-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ((NOT a[i+31:i]) AND b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Logical + + + + Compute the bitwise NOT of packed single-precision (32-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ((NOT a[i+31:i]) AND b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Logical + + + + + Compute the bitwise NOT of packed single-precision (32-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := ((NOT a[i+31:i]) AND b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Logical + + + + Compute the bitwise NOT of packed single-precision (32-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := ((NOT a[i+31:i]) AND b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Logical + + + + + Compute the bitwise AND of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] AND b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Logical + + + + Compute the bitwise AND of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] AND b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Logical + + + Compute the bitwise AND of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := (a[i+63:i] AND b[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Logical + + + + + Compute the bitwise AND of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] AND b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Logical + + + + Compute the bitwise AND of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] AND b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Logical + + + + + Compute the bitwise AND of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] AND b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Logical + + + + Compute the bitwise AND of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] AND b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Logical + + + + + Compute the bitwise AND of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] AND b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Logical + + + + Compute the bitwise AND of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] AND b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Logical + + + Compute the bitwise AND of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := (a[i+31:i] AND b[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Logical + + + + + Compute the bitwise AND of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] AND b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Logical + + + + Compute the bitwise AND of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] AND b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Logical + + + + + Compute the bitwise AND of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] AND b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Logical + + + + Compute the bitwise AND of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] AND b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Blend packed double-precision (64-bit) floating-point elements from "a" and "b" using control mask "k", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := b[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Blend packed double-precision (64-bit) floating-point elements from "a" and "b" using control mask "k", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := b[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Blend packed single-precision (32-bit) floating-point elements from "a" and "b" using control mask "k", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := b[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Blend packed single-precision (32-bit) floating-point elements from "a" and "b" using control mask "k", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := b[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Miscellaneous + + Broadcast the lower 2 packed single-precision (32-bit) floating-point elements from "a" to all elements of "dst". + +FOR j := 0 to 7 + i := j*32 + n := (j mod 2)*32 + dst[i+31:i] := a[n+31:n] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Miscellaneous + + + + Broadcast the lower 2 packed single-precision (32-bit) floating-point elements from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + n := (j mod 2)*32 + IF k[j] + dst[i+31:i] := a[n+31:n] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Miscellaneous + + + Broadcast the lower 2 packed single-precision (32-bit) floating-point elements from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + n := (j mod 2)*32 + IF k[j] + dst[i+31:i] := a[n+31:n] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + Broadcast the lower 2 packed single-precision (32-bit) floating-point elements from "a" to all elements of "dst". + +FOR j := 0 to 15 + i := j*32 + n := (j mod 2)*32 + dst[i+31:i] := a[n+31:n] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + Broadcast the lower 2 packed single-precision (32-bit) floating-point elements from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + n := (j mod 2)*32 + IF k[j] + dst[i+31:i] := a[n+31:n] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + Broadcast the lower 2 packed single-precision (32-bit) floating-point elements from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + n := (j mod 2)*32 + IF k[j] + dst[i+31:i] := a[n+31:n] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + Broadcast the 4 packed single-precision (32-bit) floating-point elements from "a" to all elements of "dst". + +FOR j := 0 to 7 + i := j*32 + n := (j mod 4)*32 + dst[i+31:i] := a[n+31:n] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Broadcast the 4 packed single-precision (32-bit) floating-point elements from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + n := (j mod 4)*32 + IF k[j] + dst[i+31:i] := a[n+31:n] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + Broadcast the 4 packed single-precision (32-bit) floating-point elements from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + n := (j mod 4)*32 + IF k[j] + dst[i+31:i] := a[n+31:n] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + Broadcast the 8 packed single-precision (32-bit) floating-point elements from "a" to all elements of "dst". + +FOR j := 0 to 15 + i := j*32 + n := (j mod 8)*32 + dst[i+31:i] := a[n+31:n] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + Broadcast the 8 packed single-precision (32-bit) floating-point elements from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + n := (j mod 8)*32 + IF k[j] + dst[i+31:i] := a[n+31:n] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + Broadcast the 8 packed single-precision (32-bit) floating-point elements from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + n := (j mod 8)*32 + IF k[j] + dst[i+31:i] := a[n+31:n] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Miscellaneous + + Broadcast the 2 packed double-precision (64-bit) floating-point elements from "a" to all elements of "dst". + +FOR j := 0 to 3 + i := j*64 + n := (j mod 2)*64 + dst[i+63:i] := a[n+63:n] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Miscellaneous + + + + Broadcast the 2 packed double-precision (64-bit) floating-point elements from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + n := (j mod 2)*64 + IF k[j] + dst[i+63:i] := a[n+63:n] + ELSE + dst[i+63:i] := src[n+63:n] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Miscellaneous + + + Broadcast the 2 packed double-precision (64-bit) floating-point elements from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + n := (j mod 2)*64 + IF k[j] + dst[i+63:i] := a[n+63:n] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + Broadcast the 2 packed double-precision (64-bit) floating-point elements from "a" to all elements of "dst". + +FOR j := 0 to 7 + i := j*64 + n := (j mod 2)*64 + dst[i+63:i] := a[n+63:n] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + Broadcast the 2 packed double-precision (64-bit) floating-point elements from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + n := (j mod 2)*64 + IF k[j] + dst[i+63:i] := a[n+63:n] + ELSE + dst[i+63:i] := src[n+63:n] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + Broadcast the 2 packed double-precision (64-bit) floating-point elements from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + n := (j mod 2)*64 + IF k[j] + dst[i+63:i] := a[n+63:n] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + AVX512VL + AVX512DQ + Miscellaneous + + Broadcast the lower 2 packed 32-bit integers from "a" to all elements of "dst. + +FOR j := 0 to 7 + i := j*32 + n := (j mod 2)*32 + dst[i+31:i] := a[n+31:n] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + AVX512VL + AVX512DQ + Miscellaneous + + + + Broadcast the lower 2 packed 32-bit integers from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + n := (j mod 2)*32 + IF k[j] + dst[i+31:i] := a[n+31:n] + ELSE + dst[i+31:i] := src[n+31:n] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + AVX512VL + AVX512DQ + Miscellaneous + + + Broadcast the lower 2 packed 32-bit integers from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + n := (j mod 2)*32 + IF k[j] + dst[i+31:i] := a[n+31:n] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + AVX512DQ + Miscellaneous + + Broadcast the lower 2 packed 32-bit integers from "a" to all elements of "dst. + +FOR j := 0 to 15 + i := j*32 + n := (j mod 2)*32 + dst[i+31:i] := a[n+31:n] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + AVX512DQ + Miscellaneous + + + + Broadcast the lower 2 packed 32-bit integers from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + n := (j mod 2)*32 + IF k[j] + dst[i+31:i] := a[n+31:n] + ELSE + dst[i+31:i] := src[n+31:n] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + AVX512DQ + Miscellaneous + + + Broadcast the lower 2 packed 32-bit integers from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + n := (j mod 2)*32 + IF k[j] + dst[i+31:i] := a[n+31:n] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + AVX512VL + AVX512DQ + Miscellaneous + + Broadcast the lower 2 packed 32-bit integers from "a" to all elements of "dst. + +FOR j := 0 to 3 + i := j*32 + n := (j mod 2)*32 + dst[i+31:i] := a[n+31:n] +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + AVX512VL + AVX512DQ + Miscellaneous + + + + Broadcast the lower 2 packed 32-bit integers from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + n := (j mod 2)*32 + IF k[j] + dst[i+31:i] := a[n+31:n] + ELSE + dst[i+31:i] := src[n+31:n] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + AVX512VL + AVX512DQ + Miscellaneous + + + Broadcast the lower 2 packed 32-bit integers from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + n := (j mod 2)*32 + IF k[j] + dst[i+31:i] := a[n+31:n] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + AVX512VL + AVX512F + Miscellaneous + + Broadcast the 4 packed 32-bit integers from "a" to all elements of "dst". + +FOR j := 0 to 7 + i := j*32 + n := (j mod 4)*32 + dst[i+31:i] := a[n+31:n] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + AVX512VL + AVX512F + Miscellaneous + + + + Broadcast the 4 packed 32-bit integers from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + n := (j mod 4)*32 + IF k[j] + dst[i+31:i] := a[n+31:n] + ELSE + dst[i+31:i] := src[n+31:n] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + AVX512VL + AVX512F + Miscellaneous + + + Broadcast the 4 packed 32-bit integers from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + n := (j mod 4)*32 + IF k[j] + dst[i+31:i] := a[n+31:n] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + AVX512DQ + Miscellaneous + + Broadcast the 8 packed 32-bit integers from "a" to all elements of "dst". + +FOR j := 0 to 15 + i := j*32 + n := (j mod 8)*32 + dst[i+31:i] := a[n+31:n] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + AVX512DQ + Miscellaneous + + + + Broadcast the 8 packed 32-bit integers from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + n := (j mod 8)*32 + IF k[j] + dst[i+31:i] := a[n+31:n] + ELSE + dst[i+31:i] := src[n+31:n] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + AVX512DQ + Miscellaneous + + + Broadcast the 8 packed 32-bit integers from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + n := (j mod 8)*32 + IF k[j] + dst[i+31:i] := a[n+31:n] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + AVX512VL + AVX512DQ + Miscellaneous + + Broadcast the 2 packed 64-bit integers from "a" to all elements of "dst". + +FOR j := 0 to 3 + i := j*64 + n := (j mod 2)*64 + dst[i+63:i] := a[n+63:n] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + AVX512VL + AVX512DQ + Miscellaneous + + + + Broadcast the 2 packed 64-bit integers from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + n := (j mod 2)*64 + IF k[j] + dst[i+63:i] := a[n+63:n] + ELSE + dst[i+63:i] := src[n+63:n] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + AVX512VL + AVX512DQ + Miscellaneous + + + Broadcast the 2 packed 64-bit integers from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + n := (j mod 2)*64 + IF k[j] + dst[i+63:i] := a[n+63:n] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + AVX512DQ + Miscellaneous + + Broadcast the 2 packed 64-bit integers from "a" to all elements of "dst". + +FOR j := 0 to 7 + i := j*64 + n := (j mod 2)*64 + dst[i+63:i] := a[n+63:n] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + AVX512DQ + Miscellaneous + + + + Broadcast the 2 packed 64-bit integers from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + n := (j mod 2)*64 + IF k[j] + dst[i+63:i] := a[n+63:n] + ELSE + dst[i+63:i] := src[n+63:n] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + AVX512DQ + Miscellaneous + + + Broadcast the 2 packed 64-bit integers from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + n := (j mod 2)*64 + IF k[j] + dst[i+63:i] := a[n+63:n] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Broadcast the low double-precision (64-bit) floating-point element from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[63:0] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + Broadcast the low double-precision (64-bit) floating-point element from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[63:0] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Broadcast the low single-precision (32-bit) floating-point element from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[31:0] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + Broadcast the low single-precision (32-bit) floating-point element from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[31:0] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Broadcast the low single-precision (32-bit) floating-point element from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[31:0] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + Broadcast the low single-precision (32-bit) floating-point element from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[31:0] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + +CASE (imm8[7:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +FOR j := 0 to 3 + i := j*64 + k[j] := (a[i+63:i] OP b[i+63:i]) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512VL + AVX512F + Compare + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +CASE (imm8[7:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +FOR j := 0 to 3 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + +CASE (imm8[7:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +FOR j := 0 to 1 + i := j*64 + k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:2] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512VL + AVX512F + Compare + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +CASE (imm8[7:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +FOR j := 0 to 1 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:2] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + +CASE (imm8[7:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +FOR j := 0 to 7 + i := j*32 + k[j] := (a[i+31:i] OP b[i+31:i]) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512VL + AVX512F + Compare + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +CASE (imm8[7:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +FOR j := 0 to 7 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + +CASE (imm8[7:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +FOR j := 0 to 3 + i := j*32 + k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512VL + AVX512F + Compare + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +CASE (imm8[7:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +FOR j := 0 to 3 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Contiguously store the active double-precision (64-bit) floating-point elements in "a" (those with their respective bit set in writemask "k") to "dst", and pass through the remaining elements from "src". + +size := 64 +m := 0 +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[m+size-1:m] := a[i+63:i] + m := m + size + FI +ENDFOR +dst[255:m] := src[255:m] +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Store + + + + Contiguously store the active double-precision (64-bit) floating-point elements in "a" (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +size := 64 +m := base_addr +FOR j := 0 to 3 + i := j*64 + IF k[j] + MEM[m+size-1:m] := a[i+63:i] + m := m + size + FI +ENDFOR + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + Contiguously store the active double-precision (64-bit) floating-point elements in "a" (those with their respective bit set in zeromask "k") to "dst", and set the remaining elements to zero. + +size := 64 +m := 0 +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[m+size-1:m] := a[i+63:i] + m := m + size + FI +ENDFOR +dst[255:m] := 0 +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Contiguously store the active double-precision (64-bit) floating-point elements in "a" (those with their respective bit set in writemask "k") to "dst", and pass through the remaining elements from "src". + +size := 64 +m := 0 +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[m+size-1:m] := a[i+63:i] + m := m + size + FI +ENDFOR +dst[127:m] := src[127:m] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Store + + + + Contiguously store the active double-precision (64-bit) floating-point elements in "a" (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +size := 64 +m := base_addr +FOR j := 0 to 1 + i := j*64 + IF k[j] + MEM[m+size-1:m] := a[i+63:i] + m := m + size + FI +ENDFOR + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + Contiguously store the active double-precision (64-bit) floating-point elements in "a" (those with their respective bit set in zeromask "k") to "dst", and set the remaining elements to zero. + +size := 64 +m := 0 +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[m+size-1:m] := a[i+63:i] + m := m + size + FI +ENDFOR +dst[127:m] := 0 +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Contiguously store the active single-precision (32-bit) floating-point elements in "a" (those with their respective bit set in writemask "k") to "dst", and pass through the remaining elements from "src". + +size := 32 +m := 0 +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[m+size-1:m] := a[i+31:i] + m := m + size + FI +ENDFOR +dst[255:m] := src[255:m] +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Store + + + + Contiguously store the active single-precision (32-bit) floating-point elements in "a" (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +size := 32 +m := base_addr +FOR j := 0 to 7 + i := j*32 + IF k[j] + MEM[m+size-1:m] := a[i+31:i] + m := m + size + FI +ENDFOR + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + Contiguously store the active single-precision (32-bit) floating-point elements in "a" (those with their respective bit set in zeromask "k") to "dst", and set the remaining elements to zero. + +size := 32 +m := 0 +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[m+size-1:m] := a[i+31:i] + m := m + size + FI +ENDFOR +dst[255:m] := 0 +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Contiguously store the active single-precision (32-bit) floating-point elements in "a" (those with their respective bit set in writemask "k") to "dst", and pass through the remaining elements from "src". + +size := 32 +m := 0 +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[m+size-1:m] := a[i+31:i] + m := m + size + FI +ENDFOR +dst[127:m] := src[127:m] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Store + + + + Contiguously store the active single-precision (32-bit) floating-point elements in "a" (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +size := 32 +m := base_addr +FOR j := 0 to 3 + i := j*32 + IF k[j] + MEM[m+size-1:m] := a[i+31:i] + m := m + size + FI +ENDFOR + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + Contiguously store the active single-precision (32-bit) floating-point elements in "a" (those with their respective bit set in zeromask "k") to "dst", and set the remaining elements to zero. + +size := 32 +m := 0 +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[m+size-1:m] := a[i+31:i] + m := m + size + FI +ENDFOR +dst[127:m] := 0 +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + m := j*64 + IF k[j] + dst[m+63:m] := Convert_Int32_To_FP64(a[i+31:i]) + ELSE + dst[m+63:m] := src[m+63:m] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + + Convert packed 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + m := j*64 + IF k[j] + dst[m+63:m] := Convert_Int32_To_FP64(a[i+31:i]) + ELSE + dst[m+63:m] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*32 + m := j*64 + IF k[j] + dst[m+63:m] := Convert_Int32_To_FP64(a[i+31:i]) + ELSE + dst[m+63:m] := src[m+63:m] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + + Convert packed 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*32 + m := j*64 + IF k[j] + dst[m+63:m] := Convert_Int32_To_FP64(a[i+31:i]) + ELSE + dst[m+63:m] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + + Convert packed 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + + Convert packed 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + l := j*64 + IF k[j] + dst[i+31:i] := Convert_FP64_To_Int32(a[l+63:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_Int32(a[l+63:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*32 + l := j*64 + IF k[j] + dst[i+31:i] := Convert_FP64_To_Int32(a[l+63:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 32*j + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_Int32(a[l+63:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_FP32(a[l+63:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + l := j*64 + IF k[j] + dst[i+31:i] := Convert_FP64_To_FP32(a[l+63:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 32*j + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_FP32(a[l+63:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*32 + l := j*64 + IF k[j] + dst[i+31:i] := Convert_FP64_To_FP32(a[l+63:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := Convert_FP64_To_Int64(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_Int64(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_Int64(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst". + [round_note] + + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := Convert_FP64_To_Int64(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := Convert_FP64_To_Int64(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_Int64(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_Int64(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_Int64(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_Int64(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := Convert_FP64_To_Int64(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_Int64(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_Int64(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst". + +FOR j := 0 to 3 + i := 32*j + k := 64*j + dst[i+31:i] := Convert_FP64_To_UnsignedInt32(a[k+63:k]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + l := j*64 + IF k[j] + dst[i+31:i] := Convert_FP64_To_UnsignedInt32(a[l+63:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_UnsignedInt32(a[l+63:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst". + +FOR j := 0 to 1 + i := 32*j + k := 64*j + dst[i+31:i] := Convert_FP64_To_UnsignedInt32(a[k+63:k]) +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*32 + l := j*64 + IF k[j] + dst[i+31:i] := Convert_FP64_To_UnsignedInt32(a[l+63:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 32*j + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_UnsignedInt32(a[l+63:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := Convert_FP64_To_UnsignedInt64(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_UnsignedInt64(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_UnsignedInt64(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst". + [round_note] + + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := Convert_FP64_To_UnsignedInt64(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := Convert_FP64_To_UnsignedInt64(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_UnsignedInt64(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_UnsignedInt64(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_UnsignedInt64(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_UnsignedInt64(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := Convert_FP64_To_UnsignedInt64(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_UnsignedInt64(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_UnsignedInt64(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + m := j*16 + IF k[j] + dst[i+31:i] := Convert_FP16_To_FP32(a[m+15:m]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Convert + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + m := j*16 + IF k[j] + dst[i+31:i] := Convert_FP16_To_FP32(a[m+15:m]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + m := j*16 + IF k[j] + dst[i+31:i] := Convert_FP16_To_FP32(a[m+15:m]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Convert + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + m := j*16 + IF k[j] + dst[i+31:i] := Convert_FP16_To_FP32(a[m+15:m]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 7 + i := 16*j + l := 32*j + IF k[j] + dst[i+15:i] := Convert_FP32_To_FP16FP(a[l+31:l]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 7 + i := 16*j + l := 32*j + IF k[j] + dst[i+15:i] := Convert_FP32_To_FP16FP(a[l+31:l]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 7 + i := 16*j + l := 32*j + IF k[j] + dst[i+15:i] := Convert_FP32_To_FP16FP(a[l+31:l]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 7 + i := 16*j + l := 32*j + IF k[j] + dst[i+15:i] := Convert_FP32_To_FP16FP(a[l+31:l]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 3 + i := 16*j + l := 32*j + IF k[j] + dst[i+15:i] := Convert_FP32_To_FP16FP(a[l+31:l]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 3 + i := 16*j + l := 32*j + IF k[j] + dst[i+15:i] := Convert_FP32_To_FP16FP(a[l+31:l]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 3 + i := 16*j + l := 32*j + IF k[j] + dst[i+15:i] := Convert_FP32_To_FP16FP(a[l+31:l]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 3 + i := 16*j + l := 32*j + IF k[j] + dst[i+15:i] := Convert_FP32_To_FP16FP(a[l+31:l]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + l := j*32 + dst[i+63:i] := Convert_FP32_To_Int64(a[l+31:l]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_Int64(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_Int64(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst". + [round_note] + + +FOR j := 0 to 7 + i := j*64 + l := j*32 + dst[i+63:i] := Convert_FP32_To_Int64(a[l+31:l]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + l := j*32 + dst[i+63:i] := Convert_FP32_To_Int64(a[l+31:l]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_Int64(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_Int64(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_Int64(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_Int64(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + l := j*32 + dst[i+63:i] := Convert_FP32_To_Int64(a[l+31:l]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_Int64(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_Int64(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst". + +FOR j := 0 to 7 + i := 32*j + dst[i+31:i] := Convert_FP32_To_UnsignedInt32(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_FP32_To_UnsignedInt32(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_FP32_To_UnsignedInt32(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst". + +FOR j := 0 to 3 + i := 32*j + dst[i+31:i] := Convert_FP32_To_UnsignedInt32(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_FP32_To_UnsignedInt32(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_FP32_To_UnsignedInt32(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + l := j*32 + dst[i+63:i] := Convert_FP32_To_UnsignedInt64(a[l+31:l]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + + +FOR j := 0 to 3 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_UnsignedInt64(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_UnsignedInt64(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst". + [round_note] + + +FOR j := 0 to 7 + i := j*64 + l := j*32 + dst[i+63:i] := Convert_FP32_To_UnsignedInt64(a[l+31:l]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + l := j*32 + dst[i+63:i] := Convert_FP32_To_UnsignedInt64(a[l+31:l]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_UnsignedInt64(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_UnsignedInt64(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_UnsignedInt64(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_UnsignedInt64(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + l := j*32 + dst[i+63:i] := Convert_FP32_To_UnsignedInt64(a[l+31:l]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + + +FOR j := 0 to 1 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_UnsignedInt64(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_UnsignedInt64(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + Convert packed 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + + + Convert packed 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + + Convert packed 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + Convert packed 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst". + [round_note] + + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + Convert packed 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + + + Convert packed 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + + Convert packed 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + + Convert packed 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + Convert packed 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + Convert packed 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + + + Convert packed 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + + Convert packed 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + Convert packed 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + l := j*32 + dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + + + Convert packed 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + l := j*32 + IF k[j] + dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) + ELSE + dst[l+31:l] := src[l+31:l] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + + Convert packed 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + l := j*32 + IF k[j] + dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) + ELSE + dst[l+31:l] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + Convert packed 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + [round_note] + + +FOR j := 0 to 7 + i := j*64 + l := j*32 + dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + Convert packed 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + l := j*32 + dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + + + Convert packed 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) + ELSE + dst[l+31:l] := src[l+31:l] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + + Convert packed 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) + ELSE + dst[l+31:l] := src[l+31:l] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + + Convert packed 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) + ELSE + dst[l+31:l] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + Convert packed 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) + ELSE + dst[l+31:l] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + Convert packed 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + l := j*32 + dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + + + Convert packed 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + l := j*32 + IF k[j] + dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) + ELSE + dst[l+31:l] := src[l+31:l] + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + + Convert packed 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + l := j*32 + IF k[j] + dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) + ELSE + dst[l+31:l] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_Int32_Truncate(a[l+63:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_Int32_Truncate(a[l+63:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 32*j + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_Int32_Truncate(a[l+63:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 32*j + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_Int32_Truncate(a[l+63:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := Convert_FP64_To_Int64_Truncate(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_Int64_Truncate(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_Int64_Truncate(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst". Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := Convert_FP64_To_Int64_Truncate(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := Convert_FP64_To_Int64_Truncate(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_Int64_Truncate(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_Int64_Truncate(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_Int64_Truncate(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_Int64_Truncate(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := Convert_FP64_To_Int64_Truncate(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_Int64_Truncate(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_Int64_Truncate(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 3 + i := 32*j + k := 64*j + dst[i+31:i] := Convert_FP64_To_UnsignedInt32_Truncate(a[k+63:k]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_UnsignedInt32_Truncate(a[l+63:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_UnsignedInt32_Truncate(a[l+63:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 1 + i := 32*j + k := 64*j + dst[i+31:i] := Convert_FP64_To_UnsignedInt32_Truncate(a[k+63:k]) +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 32*j + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_UnsignedInt32_Truncate(a[l+63:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 32*j + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_UnsignedInt32_Truncate(a[l+63:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := Convert_FP64_To_UnsignedInt64_Truncate(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_UnsignedInt64_Truncate(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_UnsignedInt64_Truncate(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst". Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := Convert_FP64_To_UnsignedInt64_Truncate(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := Convert_FP64_To_UnsignedInt64_Truncate(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_UnsignedInt64_Truncate(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_UnsignedInt64_Truncate(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_UnsignedInt64_Truncate(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_UnsignedInt64_Truncate(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := Convert_FP64_To_UnsignedInt64_Truncate(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_UnsignedInt64_Truncate(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_UnsignedInt64_Truncate(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*i + IF k[j] + dst[i+31:i] := Convert_FP32_To_IntegerTruncate(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*i + IF k[j] + dst[i+31:i] := Convert_FP32_To_IntegerTruncate(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + l := j*32 + dst[i+63:i] := Convert_FP32_To_Int64_Truncate(a[l+31:l]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_Int64_Truncate(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_Int64_Truncate(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst". Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +FOR j := 0 to 7 + i := j*64 + l := j*32 + dst[i+63:i] := Convert_FP32_To_Int64_Truncate(a[l+31:l]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + l := j*32 + dst[i+63:i] := Convert_FP32_To_Int64_Truncate(a[l+31:l]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_Int64_Truncate(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_Int64_Truncate(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_Int64_Truncate(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_Int64_Truncate(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + l := j*32 + dst[i+63:i] := Convert_FP32_To_Int64_Truncate(a[l+31:l]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_Int64_Truncate(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_Int64_Truncate(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 7 + i := 32*j + dst[i+31:i] := Convert_FP32_To_UnsignedInt32_Truncate(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed double-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_UnsignedInt32_Truncate(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + + Convert packed double-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_UnsignedInt32_Truncate(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 3 + i := 32*j + dst[i+31:i] := Convert_FP32_To_UnsignedInt32_Truncate(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed double-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_UnsignedInt32_Truncate(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + + Convert packed double-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_UnsignedInt32_Truncate(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + l := j*32 + dst[i+63:i] := Convert_FP32_To_UnsignedInt64_Truncate(a[l+31:l]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_UnsignedInt64_Truncate(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 3 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_UnsignedInt64_Truncate(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst". Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + +FOR j := 0 to 7 + i := j*64 + l := j*32 + dst[i+63:i] := Convert_FP32_To_UnsignedInt64_Truncate(a[l+31:l]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + l := j*32 + dst[i+63:i] := Convert_FP32_To_UnsignedInt64_Truncate(a[l+31:l]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_UnsignedInt64_Truncate(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_UnsignedInt64_Truncate(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). Pass __MM_FROUND_NO_EXC to "sae" to suppress all exceptions. + + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_UnsignedInt64_Truncate(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_UnsignedInt64_Truncate(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + l := j*32 + dst[i+63:i] := Convert_FP32_To_UnsignedInt64_Truncate(a[l+31:l]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_UnsignedInt64_Truncate(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 1 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_UnsignedInt64_Truncate(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + Convert packed unsigned 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + l := j*32 + dst[i+63:i] := ConvertUnsignedIntegerTo_FP64(a[l+31:l]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed unsigned 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + + +FOR j := 0 to 3 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := ConvertUnsignedIntegerTo_FP64(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + + Convert packed unsigned 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := ConvertUnsignedIntegerTo_FP64(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + Convert packed unsigned 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + l := j*32 + dst[i+63:i] := ConvertUnsignedIntegerTo_FP64(a[l+31:l]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed unsigned 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + + +FOR j := 0 to 1 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := ConvertUnsignedIntegerTo_FP64(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512F + Convert + + + Convert packed unsigned 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := ConvertUnsignedIntegerTo_FP64(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + Convert packed unsigned 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := ConvertUnsignedInt64_To_FP64(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + + + Convert packed unsigned 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := ConvertUnsignedInt64_To_FP64(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + + Convert packed unsigned 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := ConvertUnsignedInt64_To_FP64(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + Convert packed unsigned 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst". + [round_note] + + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := ConvertUnsignedInt64_To_FP64(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + Convert packed unsigned 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := ConvertUnsignedInt64_To_FP64(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + + + Convert packed unsigned 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ConvertUnsignedInt64_To_FP64(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + + Convert packed unsigned 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ConvertUnsignedInt64_To_FP64(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + + Convert packed unsigned 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ConvertUnsignedInt64_To_FP64(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + Convert packed unsigned 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ConvertUnsignedInt64_To_FP64(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + Convert packed unsigned 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := ConvertUnsignedInt64_To_FP64(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + + + Convert packed unsigned 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := ConvertUnsignedInt64_To_FP64(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + + Convert packed unsigned 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := ConvertUnsignedInt64_To_FP64(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + Convert packed unsigned 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + l := j*32 + dst[l+31:l] := ConvertUnsignedInt64_To_FP32(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + + + Convert packed unsigned 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + + +FOR j := 0 to 3 + i := j*64 + l := j*32 + IF k[j] + dst[l+31:l] := ConvertUnsignedInt64_To_FP32(a[i+63:i]) + ELSE + dst[l+31:l] := src[l+31:l] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + + Convert packed unsigned 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + l := j*32 + IF k[j] + dst[l+31:l] := ConvertUnsignedInt64_To_FP32(a[i+63:i]) + ELSE + dst[l+31:l] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + Convert packed unsigned 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + [round_note] + + +FOR j := 0 to 7 + i := j*64 + l := j*32 + dst[l+31:l] := ConvertUnsignedInt64_To_FP32(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + Convert packed unsigned 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + l := j*32 + dst[l+31:l] := ConvertUnsignedInt64_To_FP32(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + + + Convert packed unsigned 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[l+31:l] := ConvertUnsignedInt64_To_FP32(a[i+63:i]) + ELSE + dst[l+31:l] := src[l+31:l] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + + Convert packed unsigned 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[l+31:l] := ConvertUnsignedInt64_To_FP32(a[i+63:i]) + ELSE + dst[l+31:l] := src[l+31:l] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + + Convert packed unsigned 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[l+31:l] := ConvertUnsignedInt64_To_FP32(a[i+63:i]) + ELSE + dst[l+31:l] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512DQ + Convert + + + Convert packed unsigned 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[l+31:l] := ConvertUnsignedInt64_To_FP32(a[i+63:i]) + ELSE + dst[l+31:l] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + Convert packed unsigned 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + l := j*32 + dst[l+31:l] := ConvertUnsignedInt64_To_FP32(a[i+63:i]) +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + + + Convert packed unsigned 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + + +FOR j := 0 to 1 + i := j*64 + l := j*32 + IF k[j] + dst[l+31:l] := ConvertUnsignedInt64_To_FP32(a[i+63:i]) + ELSE + dst[l+31:l] := src[l+31:l] + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Floating Point + Integer + AVX512VL + AVX512DQ + Convert + + + Convert packed unsigned 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + l := j*32 + IF k[j] + dst[l+31:l] := ConvertUnsignedInt64_To_FP32(a[i+63:i]) + ELSE + dst[l+31:l] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit integers in "a" compared to those in "b", and store the 16-bit results in "dst". + Four SADs are performed on four 8-bit quadruplets for each 64-bit lane. The first two SADs use the lower 8-bit quadruplet of the lane from "a", and the last two SADs use the uppper 8-bit quadruplet of the lane from "a". Quadruplets from "b" are selected from within 128-bit lanes according to the control in "imm8", and each SAD in each 64-bit lane uses the selected quadruplet at 8-bit offsets. + + +FOR j := 0 to 1 + i := j*128 + tmp[i+31:i] := select(b[i+127:i], imm8[1:0]) + tmp[i+63:i+32] := select(b[i+127:i], imm8[3:2]) + tmp[i+95:i+64] := select(b[i+127:i], imm8[5:4]) + tmp[i+127:i+96] := select(b[i+127:i], imm8[7:6]) +ENDFOR + +FOR j := 0 to 3 + i := j*64 + dst[i+15:i] := ABS(a[i+7:i] - tmp[i+7:i]) + ABS(a[i+15:i+8] - tmp[i+15:i+8]) + + ABS(a[i+23:i+16] - tmp[i+23:i+16]) + ABS(a[i+31:i+24] - tmp[i+31:i+24]) + + dst[i+31:i+16] := ABS(a[i+7:i] - tmp[i+15:i+8]) + ABS(a[i+15:i+8] - tmp[i+23:i+16]) + + ABS(a[i+23:i+16] - tmp[i+31:i+24]) + ABS(a[i+31:i+24] - tmp[i+39:i+32]) + + dst[i+47:i+32] := ABS(a[i+39:i+32] - tmp[i+23:i+16]) + ABS(a[i+47:i+40] - tmp[i+31:i+24]) + + ABS(a[i+55:i+48] - tmp[i+39:i+32]) + ABS(a[i+63:i+56] - tmp[i+47:i+40]) + + dst[i+63:i+48] := ABS(a[i+39:i+32] - tmp[i+31:i+24]) + ABS(a[i+47:i+40] - tmp[i+39:i+32]) + + ABS(a[i+55:i+48] - tmp[i+47:i+40]) + ABS(a[i+63:i+56] - tmp[i+55:i+48]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + + + Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit integers in "a" compared to those in "b", and store the 16-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + Four SADs are performed on four 8-bit quadruplets for each 64-bit lane. The first two SADs use the lower 8-bit quadruplet of the lane from "a", and the last two SADs use the uppper 8-bit quadruplet of the lane from "a". Quadruplets from "b" are selected from within 128-bit lanes according to the control in "imm8", and each SAD in each 64-bit lane uses the selected quadruplet at 8-bit offsets. + + +FOR j := 0 to 1 + i := j*128 + tmp[i+31:i] := select(b[i+127:i], imm8[1:0]) + tmp[i+63:i+32] := select(b[i+127:i], imm8[3:2]) + tmp[i+95:i+64] := select(b[i+127:i], imm8[5:4]) + tmp[i+127:i+96] := select(b[i+127:i], imm8[7:6]) +ENDFOR + +FOR j := 0 to 3 + i := j*64 + tmp_dst[i+15:i] := ABS(a[i+7:i] - tmp[i+7:i]) + ABS(a[i+15:i+8] - tmp[i+15:i+8]) + + ABS(a[i+23:i+16] - tmp[i+23:i+16]) + ABS(a[i+31:i+24] - tmp[i+31:i+24]) + + tmp_dst[i+31:i+16] := ABS(a[i+7:i] - tmp[i+15:i+8]) + ABS(a[i+15:i+8] - tmp[i+23:i+16]) + + ABS(a[i+23:i+16] - tmp[i+31:i+24]) + ABS(a[i+31:i+24] - tmp[i+39:i+32]) + + tmp_dst[i+47:i+32] := ABS(a[i+39:i+32] - tmp[i+23:i+16]) + ABS(a[i+47:i+40] - tmp[i+31:i+24]) + + ABS(a[i+55:i+48] - tmp[i+39:i+32]) + ABS(a[i+63:i+56] - tmp[i+47:i+40]) + + tmp_dst[i+63:i+48] := ABS(a[i+39:i+32] - tmp[i+31:i+24]) + ABS(a[i+47:i+40] - tmp[i+39:i+32]) + + ABS(a[i+55:i+48] - tmp[i+47:i+40]) + ABS(a[i+63:i+56] - tmp[i+55:i+48]) +ENDFOR + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + + Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit integers in "a" compared to those in "b", and store the 16-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + Four SADs are performed on four 8-bit quadruplets for each 64-bit lane. The first two SADs use the lower 8-bit quadruplet of the lane from "a", and the last two SADs use the uppper 8-bit quadruplet of the lane from "a". Quadruplets from "b" are selected from within 128-bit lanes according to the control in "imm8", and each SAD in each 64-bit lane uses the selected quadruplet at 8-bit offsets. + + +FOR j := 0 to 1 + i := j*128 + tmp[i+31:i] := select(b[i+127:i], imm8[1:0]) + tmp[i+63:i+32] := select(b[i+127:i], imm8[3:2]) + tmp[i+95:i+64] := select(b[i+127:i], imm8[5:4]) + tmp[i+127:i+96] := select(b[i+127:i], imm8[7:6]) +ENDFOR + +FOR j := 0 to 3 + i := j*64 + tmp_dst[i+15:i] := ABS(a[i+7:i] - tmp[i+7:i]) + ABS(a[i+15:i+8] - tmp[i+15:i+8]) + + ABS(a[i+23:i+16] - tmp[i+23:i+16]) + ABS(a[i+31:i+24] - tmp[i+31:i+24]) + + tmp_dst[i+31:i+16] := ABS(a[i+7:i] - tmp[i+15:i+8]) + ABS(a[i+15:i+8] - tmp[i+23:i+16]) + + ABS(a[i+23:i+16] - tmp[i+31:i+24]) + ABS(a[i+31:i+24] - tmp[i+39:i+32]) + + tmp_dst[i+47:i+32] := ABS(a[i+39:i+32] - tmp[i+23:i+16]) + ABS(a[i+47:i+40] - tmp[i+31:i+24]) + + ABS(a[i+55:i+48] - tmp[i+39:i+32]) + ABS(a[i+63:i+56] - tmp[i+47:i+40]) + + tmp_dst[i+63:i+48] := ABS(a[i+39:i+32] - tmp[i+31:i+24]) + ABS(a[i+47:i+40] - tmp[i+39:i+32]) + + ABS(a[i+55:i+48] - tmp[i+47:i+40]) + ABS(a[i+63:i+56] - tmp[i+55:i+48]) +ENDFOR + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Miscellaneous + + + + Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit integers in "a" compared to those in "b", and store the 16-bit results in "dst". + Four SADs are performed on four 8-bit quadruplets for each 64-bit lane. The first two SADs use the lower 8-bit quadruplet of the lane from "a", and the last two SADs use the uppper 8-bit quadruplet of the lane from "a". Quadruplets from "b" are selected from within 128-bit lanes according to the control in "imm8", and each SAD in each 64-bit lane uses the selected quadruplet at 8-bit offsets. + + +FOR j := 0 to 3 + i := j*128 + tmp[i+31:i] := select(b[i+127:i], imm8[1:0]) + tmp[i+63:i+32] := select(b[i+127:i], imm8[3:2]) + tmp[i+95:i+64] := select(b[i+127:i], imm8[5:4]) + tmp[i+127:i+96] := select(b[i+127:i], imm8[7:6]) +ENDFOR + +FOR j := 0 to 7 + i := j*64 + dst[i+15:i] := ABS(a[i+7:i] - tmp[i+7:i]) + ABS(a[i+15:i+8] - tmp[i+15:i+8]) + + ABS(a[i+23:i+16] - tmp[i+23:i+16]) + ABS(a[i+31:i+24] - tmp[i+31:i+24]) + + dst[i+31:i+16] := ABS(a[i+7:i] - tmp[i+15:i+8]) + ABS(a[i+15:i+8] - tmp[i+23:i+16]) + + ABS(a[i+23:i+16] - tmp[i+31:i+24]) + ABS(a[i+31:i+24] - tmp[i+39:i+32]) + + dst[i+47:i+32] := ABS(a[i+39:i+32] - tmp[i+23:i+16]) + ABS(a[i+47:i+40] - tmp[i+31:i+24]) + + ABS(a[i+55:i+48] - tmp[i+39:i+32]) + ABS(a[i+63:i+56] - tmp[i+47:i+40]) + + dst[i+63:i+48] := ABS(a[i+39:i+32] - tmp[i+31:i+24]) + ABS(a[i+47:i+40] - tmp[i+39:i+32]) + + ABS(a[i+55:i+48] - tmp[i+47:i+40]) + ABS(a[i+63:i+56] - tmp[i+55:i+48]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Miscellaneous + + + + + + Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit integers in "a" compared to those in "b", and store the 16-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + Four SADs are performed on four 8-bit quadruplets for each 64-bit lane. The first two SADs use the lower 8-bit quadruplet of the lane from "a", and the last two SADs use the uppper 8-bit quadruplet of the lane from "a". Quadruplets from "b" are selected from within 128-bit lanes according to the control in "imm8", and each SAD in each 64-bit lane uses the selected quadruplet at 8-bit offsets. + + +FOR j := 0 to 3 + i := j*128 + tmp[i+31:i] := select(b[i+127:i], imm8[1:0]) + tmp[i+63:i+32] := select(b[i+127:i], imm8[3:2]) + tmp[i+95:i+64] := select(b[i+127:i], imm8[5:4]) + tmp[i+127:i+96] := select(b[i+127:i], imm8[7:6]) +ENDFOR + +FOR j := 0 to 7 + i := j*64 + tmp_dst[i+15:i] := ABS(a[i+7:i] - tmp[i+7:i]) + ABS(a[i+15:i+8] - tmp[i+15:i+8]) + + ABS(a[i+23:i+16] - tmp[i+23:i+16]) + ABS(a[i+31:i+24] - tmp[i+31:i+24]) + + tmp_dst[i+31:i+16] := ABS(a[i+7:i] - tmp[i+15:i+8]) + ABS(a[i+15:i+8] - tmp[i+23:i+16]) + + ABS(a[i+23:i+16] - tmp[i+31:i+24]) + ABS(a[i+31:i+24] - tmp[i+39:i+32]) + + tmp_dst[i+47:i+32] := ABS(a[i+39:i+32] - tmp[i+23:i+16]) + ABS(a[i+47:i+40] - tmp[i+31:i+24]) + + ABS(a[i+55:i+48] - tmp[i+39:i+32]) + ABS(a[i+63:i+56] - tmp[i+47:i+40]) + + tmp_dst[i+63:i+48] := ABS(a[i+39:i+32] - tmp[i+31:i+24]) + ABS(a[i+47:i+40] - tmp[i+39:i+32]) + + ABS(a[i+55:i+48] - tmp[i+47:i+40]) + ABS(a[i+63:i+56] - tmp[i+55:i+48]) +ENDFOR + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Miscellaneous + + + + + Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit integers in "a" compared to those in "b", and store the 16-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + Four SADs are performed on four 8-bit quadruplets for each 64-bit lane. The first two SADs use the lower 8-bit quadruplet of the lane from "a", and the last two SADs use the uppper 8-bit quadruplet of the lane from "a". Quadruplets from "b" are selected from within 128-bit lanes according to the control in "imm8", and each SAD in each 64-bit lane uses the selected quadruplet at 8-bit offsets. + + +FOR j := 0 to 3 + i := j*128 + tmp[i+31:i] := select(b[i+127:i], imm8[1:0]) + tmp[i+63:i+32] := select(b[i+127:i], imm8[3:2]) + tmp[i+95:i+64] := select(b[i+127:i], imm8[5:4]) + tmp[i+127:i+96] := select(b[i+127:i], imm8[7:6]) +ENDFOR + +FOR j := 0 to 7 + i := j*64 + tmp_dst[i+15:i] := ABS(a[i+7:i] - tmp[i+7:i]) + ABS(a[i+15:i+8] - tmp[i+15:i+8]) + + ABS(a[i+23:i+16] - tmp[i+23:i+16]) + ABS(a[i+31:i+24] - tmp[i+31:i+24]) + + tmp_dst[i+31:i+16] := ABS(a[i+7:i] - tmp[i+15:i+8]) + ABS(a[i+15:i+8] - tmp[i+23:i+16]) + + ABS(a[i+23:i+16] - tmp[i+31:i+24]) + ABS(a[i+31:i+24] - tmp[i+39:i+32]) + + tmp_dst[i+47:i+32] := ABS(a[i+39:i+32] - tmp[i+23:i+16]) + ABS(a[i+47:i+40] - tmp[i+31:i+24]) + + ABS(a[i+55:i+48] - tmp[i+39:i+32]) + ABS(a[i+63:i+56] - tmp[i+47:i+40]) + + tmp_dst[i+63:i+48] := ABS(a[i+39:i+32] - tmp[i+31:i+24]) + ABS(a[i+47:i+40] - tmp[i+39:i+32]) + + ABS(a[i+55:i+48] - tmp[i+47:i+40]) + ABS(a[i+63:i+56] - tmp[i+55:i+48]) +ENDFOR + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit integers in "a" compared to those in "b", and store the 16-bit results in "dst". + Four SADs are performed on four 8-bit quadruplets for each 64-bit lane. The first two SADs use the lower 8-bit quadruplet of the lane from "a", and the last two SADs use the uppper 8-bit quadruplet of the lane from "a". Quadruplets from "b" are selected according to the control in "imm8", and each SAD in each 64-bit lane uses the selected quadruplet at 8-bit offsets. + + +tmp[31:0] := select(b[127:0], imm8[1:0]) +tmp[63:32] := select(b[127:0], imm8[3:2]) +tmp[95:64] := select(b[127:0], imm8[5:4]) +tmp[127:96] := select(b[127:0], imm8[7:6]) + +FOR j := 0 to 1 + i := j*64 + dst[i+15:i] := ABS(a[i+7:i] - tmp[i+7:i]) + ABS(a[i+15:i+8] - tmp[i+15:i+8]) + + ABS(a[i+23:i+16] - tmp[i+23:i+16]) + ABS(a[i+31:i+24] - tmp[i+31:i+24]) + + dst[i+31:i+16] := ABS(a[i+7:i] - tmp[i+15:i+8]) + ABS(a[i+15:i+8] - tmp[i+23:i+16]) + + ABS(a[i+23:i+16] - tmp[i+31:i+24]) + ABS(a[i+31:i+24] - tmp[i+39:i+32]) + + dst[i+47:i+32] := ABS(a[i+39:i+32] - tmp[i+23:i+16]) + ABS(a[i+47:i+40] - tmp[i+31:i+24]) + + ABS(a[i+55:i+48] - tmp[i+39:i+32]) + ABS(a[i+63:i+56] - tmp[i+47:i+40]) + + dst[i+63:i+48] := ABS(a[i+39:i+32] - tmp[i+31:i+24]) + ABS(a[i+47:i+40] - tmp[i+39:i+32]) + + ABS(a[i+55:i+48] - tmp[i+47:i+40]) + ABS(a[i+63:i+56] - tmp[i+55:i+48]) +ENDFOR + +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + + + Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit integers in "a" compared to those in "b", and store the 16-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + Four SADs are performed on four 8-bit quadruplets for each 64-bit lane. The first two SADs use the lower 8-bit quadruplet of the lane from "a", and the last two SADs use the uppper 8-bit quadruplet of the lane from "a". Quadruplets from "b" are selected according to the control in "imm8", and each SAD in each 64-bit lane uses the selected quadruplet at 8-bit offsets. + + +tmp[31:0] := select(b[127:0], imm8[1:0]) +tmp[63:32] := select(b[127:0], imm8[3:2]) +tmp[95:64] := select(b[127:0], imm8[5:4]) +tmp[127:96] := select(b[127:0], imm8[7:6]) + +FOR j := 0 to 1 + i := j*64 + tmp_dst[i+15:i] := ABS(a[i+7:i] - tmp[i+7:i]) + ABS(a[i+15:i+8] - tmp[i+15:i+8]) + + ABS(a[i+23:i+16] - tmp[i+23:i+16]) + ABS(a[i+31:i+24] - tmp[i+31:i+24]) + + tmp_dst[i+31:i+16] := ABS(a[i+7:i] - tmp[i+15:i+8]) + ABS(a[i+15:i+8] - tmp[i+23:i+16]) + + ABS(a[i+23:i+16] - tmp[i+31:i+24]) + ABS(a[i+31:i+24] - tmp[i+39:i+32]) + + tmp_dst[i+47:i+32] := ABS(a[i+39:i+32] - tmp[i+23:i+16]) + ABS(a[i+47:i+40] - tmp[i+31:i+24]) + + ABS(a[i+55:i+48] - tmp[i+39:i+32]) + ABS(a[i+63:i+56] - tmp[i+47:i+40]) + + tmp_dst[i+63:i+48] := ABS(a[i+39:i+32] - tmp[i+31:i+24]) + ABS(a[i+47:i+40] - tmp[i+39:i+32]) + + ABS(a[i+55:i+48] - tmp[i+47:i+40]) + ABS(a[i+63:i+56] - tmp[i+55:i+48]) +ENDFOR + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + + Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit integers in "a" compared to those in "b", and store the 16-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + Four SADs are performed on four 8-bit quadruplets for each 64-bit lane. The first two SADs use the lower 8-bit quadruplet of the lane from "a", and the last two SADs use the uppper 8-bit quadruplet of the lane from "a". Quadruplets from "b" are selected according to the control in "imm8", and each SAD in each 64-bit lane uses the selected quadruplet at 8-bit offsets. + + +tmp[31:0] := select(b[127:0], imm8[1:0]) +tmp[63:32] := select(b[127:0], imm8[3:2]) +tmp[95:64] := select(b[127:0], imm8[5:4]) +tmp[127:96] := select(b[127:0], imm8[7:6]) + +FOR j := 0 to 1 + i := j*64 + tmp_dst[i+15:i] := ABS(a[i+7:i] - tmp[i+7:i]) + ABS(a[i+15:i+8] - tmp[i+15:i+8]) + + ABS(a[i+23:i+16] - tmp[i+23:i+16]) + ABS(a[i+31:i+24] - tmp[i+31:i+24]) + + tmp_dst[i+31:i+16] := ABS(a[i+7:i] - tmp[i+15:i+8]) + ABS(a[i+15:i+8] - tmp[i+23:i+16]) + + ABS(a[i+23:i+16] - tmp[i+31:i+24]) + ABS(a[i+31:i+24] - tmp[i+39:i+32]) + + tmp_dst[i+47:i+32] := ABS(a[i+39:i+32] - tmp[i+23:i+16]) + ABS(a[i+47:i+40] - tmp[i+31:i+24]) + + ABS(a[i+55:i+48] - tmp[i+39:i+32]) + ABS(a[i+63:i+56] - tmp[i+47:i+40]) + + tmp_dst[i+63:i+48] := ABS(a[i+39:i+32] - tmp[i+31:i+24]) + ABS(a[i+47:i+40] - tmp[i+39:i+32]) + + ABS(a[i+55:i+48] - tmp[i+47:i+40]) + ABS(a[i+63:i+56] - tmp[i+55:i+48]) +ENDFOR + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Divide packed double-precision (64-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + IF k[j] + dst[i+63:i] := a[i+63:i] / b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + Divide packed double-precision (64-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + IF k[j] + dst[i+63:i] := a[i+63:i] / b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Divide packed double-precision (64-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + IF k[j] + dst[i+63:i] := a[i+63:i] / b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + Divide packed double-precision (64-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + IF k[j] + dst[i+63:i] := a[i+63:i] / b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Divide packed single-precision (32-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + IF k[j] + dst[i+31:i] := a[i+31:i] / b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + Divide packed single-precision (32-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + IF k[j] + dst[i+31:i] := a[i+31:i] / b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Divide packed single-precision (32-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + IF k[j] + dst[i+31:i] := a[i+31:i] / b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + Divide packed single-precision (32-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + IF k[j] + dst[i+31:i] := a[i+31:i] / b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Load contiguous active double-precision (64-bit) floating-point elements from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[m+63:m] + m := m + 64 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Load + + + + Load contiguous active double-precision (64-bit) floating-point elements from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+m+63:mem_addr+m] + m := m + 64 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + Load contiguous active double-precision (64-bit) floating-point elements from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[m+63:m] + m := m + 64 + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Load + + + Load contiguous active double-precision (64-bit) floating-point elements from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+m+63:mem_addr+m] + m := m + 64 + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Load contiguous active double-precision (64-bit) floating-point elements from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[m+63:m] + m := m + 64 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Load + + + + Load contiguous active double-precision (64-bit) floating-point elements from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+m+63:mem_addr+m] + m := m + 64 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + Load contiguous active double-precision (64-bit) floating-point elements from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[m+63:m] + m := m + 64 + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Load + + + Load contiguous active double-precision (64-bit) floating-point elements from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+m+63:mem_addr+m] + m := m + 64 + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Load contiguous active single-precision (32-bit) floating-point elements from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[m+31:m] + m := m + 32 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Load + + + + Load contiguous active single-precision (32-bit) floating-point elements from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+m+31:mem_addr+m] + m := m + 32 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + Load contiguous active single-precision (32-bit) floating-point elements from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[m+31:m] + m := m + 32 + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Load + + + Load contiguous active single-precision (32-bit) floating-point elements from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+m+31:mem_addr+m] + m := m + 32 + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Load contiguous active single-precision (32-bit) floating-point elements from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[m+31:m] + m := m + 32 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Load + + + + Load contiguous active single-precision (32-bit) floating-point elements from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+m+31:mem_addr+m] + m := m + 32 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + Load contiguous active single-precision (32-bit) floating-point elements from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[m+31:m] + m := m + 32 + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Load + + + Load contiguous active single-precision (32-bit) floating-point elements from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+m+31:mem_addr+m] + m := m + 32 + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + Extract 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "a", selected with "imm8", and store the result in "dst". + +CASE imm8[7:0] of +0: dst[127:0] := a[127:0] +1: dst[127:0] := a[255:128] +ESAC +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Extract 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "a", selected with "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +CASE imm8[7:0] of +0: tmp[127:0] := a[127:0] +1: tmp[127:0] := a[255:128] +ESAC +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Extract 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "a", selected with "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +CASE imm8[7:0] of +0: tmp[127:0] := a[127:0] +1: tmp[127:0] := a[255:128] +ESAC +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + Extract 256 bits (composed of 8 packed single-precision (32-bit) floating-point elements) from "a", selected with "imm8", and store the result in "dst". + +CASE imm8[7:0] of +0: dst[255:0] := a[255:0] +1: dst[255:0] := a[511:256] +ESAC +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + + Extract 256 bits (composed of 8 packed single-precision (32-bit) floating-point elements) from "a", selected with "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +CASE imm8[7:0] of +0: tmp[255:0] := a[255:0] +1: tmp[255:0] := a[511:256] +ESAC + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + Extract 256 bits (composed of 8 packed single-precision (32-bit) floating-point elements) from "a", selected with "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +CASE imm8[7:0] of +0: tmp[255:0] := a[255:0] +1: tmp[255:0] := a[511:256] +ESAC + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Miscellaneous + + + Extract 128 bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "a", selected with "imm8", and store the result in "dst". + +CASE imm8[7:0] of +0: dst[127:0] := a[127:0] +1: dst[127:0] := a[255:128] +ESAC +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Miscellaneous + + + + + Extract 128 bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "a", selected with "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +CASE imm8[7:0] of +0: tmp[127:0] := a[127:0] +1: tmp[127:0] := a[255:128] +ESAC + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Miscellaneous + + + + Extract 128 bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "a", selected with "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +CASE imm8[7:0] of +0: tmp[127:0] := a[127:0] +1: tmp[127:0] := a[255:128] +ESAC + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + Extract 128 bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "a", selected with "imm8", and store the result in "dst". + +CASE imm8[7:0] of +0: dst[127:0] := a[127:0] +1: dst[127:0] := a[255:128] +2: dst[127:0] := a[383:256] +3: dst[127:0] := a[511:384] +ESAC +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + + Extract 128 bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "a", selected with "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +CASE imm8[7:0] of +0: tmp[127:0] := a[127:0] +1: tmp[127:0] := a[255:128] +2: tmp[127:0] := a[383:256] +3: tmp[127:0] := a[511:384] +ESAC + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + Extract 128 bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "a", selected with "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +CASE imm8[7:0] of +0: tmp[127:0] := a[127:0] +1: tmp[127:0] := a[255:128] +2: tmp[127:0] := a[383:256] +3: tmp[127:0] := a[511:384] +ESAC + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + Extract 128 bits (composed of 4 packed 32-bit integers) from "a", selected with "imm8", and store the result in "dst". + +CASE imm8[7:0] of +0: dst[127:0] := a[127:0] +1: dst[127:0] := a[255:128] +ESAC +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + + Extract 128 bits (composed of 4 packed 32-bit integers) from "a", selected with "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +CASE imm8[7:0] of +0: dst[127:0] := a[127:0] +1: dst[127:0] := a[255:128] +ESAC +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + Extract 128 bits (composed of 4 packed 32-bit integers) from "a", selected with "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +CASE imm8[7:0] of +0: dst[127:0] := a[127:0] +1: dst[127:0] := a[255:128] +ESAC +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512DQ + Miscellaneous + + + Extract 256 bits (composed of 8 packed 32-bit integers) from "a", selected with "imm8", and store the result in "dst". + +CASE imm8[7:0] of +0: dst[255:0] := a[255:0] +1: dst[255:0] := a[511:256] +ESAC +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512DQ + Miscellaneous + + + + + Extract 256 bits (composed of 8 packed 32-bit integers) from "a", selected with "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +CASE imm8[7:0] of +0: tmp[255:0] := a[255:0] +1: tmp[255:0] := a[511:256] +ESAC + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512DQ + Miscellaneous + + + + Extract 256 bits (composed of 8 packed 32-bit integers) from "a", selected with "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +CASE imm8[7:0] of +0: tmp[255:0] := a[255:0] +1: tmp[255:0] := a[511:256] +ESAC + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512DQ + Miscellaneous + + + Extract 128 bits (composed of 2 packed 64-bit integers) from "a", selected with "imm8", and store the result in "dst". + +CASE imm8[7:0] of +0: dst[127:0] := a[127:0] +1: dst[127:0] := a[255:128] +ESAC +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512DQ + Miscellaneous + + + + + Extract 128 bits (composed of 2 packed 64-bit integers) from "a", selected with "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +CASE imm8[7:0] of +0: tmp[127:0] := a[127:0] +1: tmp[127:0] := a[255:128] +ESAC + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512DQ + Miscellaneous + + + + Extract 128 bits (composed of 2 packed 64-bit integers) from "a", selected with "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +CASE imm8[7:0] of +0: tmp[127:0] := a[127:0] +1: tmp[127:0] := a[255:128] +ESAC + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512DQ + Miscellaneous + + + Extract 128 bits (composed of 2 packed 64-bit integers) from "a", selected with "imm8", and store the result in "dst". + +CASE imm8[7:0] of +0: dst[127:0] := a[127:0] +1: dst[127:0] := a[255:128] +2: dst[127:0] := a[383:256] +3: dst[127:0] := a[511:384] +ESAC +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512DQ + Miscellaneous + + + + + Extract 128 bits (composed of 2 packed 64-bit integers) from "a", selected with "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +CASE imm8[7:0] of +0: tmp[127:0] := a[127:0] +1: tmp[127:0] := a[255:128] +2: tmp[127:0] := a[383:256] +3: tmp[127:0] := a[511:384] +ESAC + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512DQ + Miscellaneous + + + + Extract 128 bits (composed of 2 packed 64-bit integers) from "a", selected with "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +CASE imm8[7:0] of +0: tmp[127:0] := a[127:0] +1: tmp[127:0] := a[255:128] +2: tmp[127:0] := a[383:256] +3: tmp[127:0] := a[511:384] +ESAC + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Fix up packed double-precision (64-bit) floating-point elements in "a" and "b" using packed 64-bit integers in "c", and store the results in "dst". "imm8" is used to set the required flags reporting. + +enum TOKEN_TYPE { + QNAN_TOKEN := 0, + SNAN_TOKEN := 1, + ZERO_VALUE_TOKEN := 2, + ONE_VALUE_TOKEN := 3, + NEG_INF_TOKEN := 4, + POS_INF_TOKEN := 5, + NEG_VALUE_TOKEN := 6, + POS_VALUE_TOKEN := 7 +} +FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]){ + tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] + CASE(tsrc[63:0] of TOKEN_TYPE) + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) of + 0 : dest[63:0] := src1[63:0] + 1 : dest[63:0] := tsrc[63:0] + 2 : dest[63:0] := QNaN(tsrc[63:0]) + 3 : dest[63:0] := QNAN_Indefinite + 4 : dest[63:0] := -INF + 5 : dest[63:0] := +INF + 6 : dest[63:0] := tsrc.sign? –INF : +INF + 7 : dest[63:0] := -0 + 8 : dest[63:0] := +0 + 9 : dest[63:0] := -1 + 10: dest[63:0] := +1 + 11: dest[63:0] := 1⁄2 + 12: dest[63:0] := 90.0 + 13: dest[63:0] := PI/2 + 14: dest[63:0] := MAX_FLOAT + 15: dest[63:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0] of TOKEN_TYPE) + ZERO_VALUE_TOKEN: if imm8[0] then set #ZE + ZERO_VALUE_TOKEN: if imm8[1] then set #IE + ONE_VALUE_TOKEN: if imm8[2] then set #ZE + ONE_VALUE_TOKEN: if imm8[3] then set #IE + SNAN_TOKEN: if imm8[4] then set #IE + NEG_INF_TOKEN: if imm8[5] then set #IE + NEG_VALUE_TOKEN: if imm8[6] then set #IE + POS_INF_TOKEN: if imm8[7] then set #IE + ESAC + RETURN dest[63:0] +} + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := FIXUPIMMPD(a[i+63:i], b[i+63:i], c[i+63:i], imm8[7:0]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + + Fix up packed double-precision (64-bit) floating-point elements in "a" and "b" using packed 64-bit integers in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). "imm8" is used to set the required flags reporting. + +enum TOKEN_TYPE { + QNAN_TOKEN := 0, + SNAN_TOKEN := 1, + ZERO_VALUE_TOKEN := 2, + ONE_VALUE_TOKEN := 3, + NEG_INF_TOKEN := 4, + POS_INF_TOKEN := 5, + NEG_VALUE_TOKEN := 6, + POS_VALUE_TOKEN := 7 +} +FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]){ + tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] + CASE(tsrc[63:0] of TOKEN_TYPE) + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) of + 0 : dest[63:0] := src1[63:0] + 1 : dest[63:0] := tsrc[63:0] + 2 : dest[63:0] := QNaN(tsrc[63:0]) + 3 : dest[63:0] := QNAN_Indefinite + 4 : dest[63:0] := -INF + 5 : dest[63:0] := +INF + 6 : dest[63:0] := tsrc.sign? –INF : +INF + 7 : dest[63:0] := -0 + 8 : dest[63:0] := +0 + 9 : dest[63:0] := -1 + 10: dest[63:0] := +1 + 11: dest[63:0] := 1⁄2 + 12: dest[63:0] := 90.0 + 13: dest[63:0] := PI/2 + 14: dest[63:0] := MAX_FLOAT + 15: dest[63:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0] of TOKEN_TYPE) + ZERO_VALUE_TOKEN: if imm8[0] then set #ZE + ZERO_VALUE_TOKEN: if imm8[1] then set #IE + ONE_VALUE_TOKEN: if imm8[2] then set #ZE + ONE_VALUE_TOKEN: if imm8[3] then set #IE + SNAN_TOKEN: if imm8[4] then set #IE + NEG_INF_TOKEN: if imm8[5] then set #IE + NEG_VALUE_TOKEN: if imm8[6] then set #IE + POS_INF_TOKEN: if imm8[7] then set #IE + ESAC + RETURN dest[63:0] +} + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := FIXUPIMMPD(a[i+63:i], b[i+63:i], c[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + + Fix up packed double-precision (64-bit) floating-point elements in "a" and "b" using packed 64-bit integers in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). "imm8" is used to set the required flags reporting. + +enum TOKEN_TYPE { + QNAN_TOKEN := 0, + SNAN_TOKEN := 1, + ZERO_VALUE_TOKEN := 2, + ONE_VALUE_TOKEN := 3, + NEG_INF_TOKEN := 4, + POS_INF_TOKEN := 5, + NEG_VALUE_TOKEN := 6, + POS_VALUE_TOKEN := 7 +} +FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]){ + tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] + CASE(tsrc[63:0] of TOKEN_TYPE) + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) of + 0 : dest[63:0] := src1[63:0] + 1 : dest[63:0] := tsrc[63:0] + 2 : dest[63:0] := QNaN(tsrc[63:0]) + 3 : dest[63:0] := QNAN_Indefinite + 4 : dest[63:0] := -INF + 5 : dest[63:0] := +INF + 6 : dest[63:0] := tsrc.sign? –INF : +INF + 7 : dest[63:0] := -0 + 8 : dest[63:0] := +0 + 9 : dest[63:0] := -1 + 10: dest[63:0] := +1 + 11: dest[63:0] := 1⁄2 + 12: dest[63:0] := 90.0 + 13: dest[63:0] := PI/2 + 14: dest[63:0] := MAX_FLOAT + 15: dest[63:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0] of TOKEN_TYPE) + ZERO_VALUE_TOKEN: if imm8[0] then set #ZE + ZERO_VALUE_TOKEN: if imm8[1] then set #IE + ONE_VALUE_TOKEN: if imm8[2] then set #ZE + ONE_VALUE_TOKEN: if imm8[3] then set #IE + SNAN_TOKEN: if imm8[4] then set #IE + NEG_INF_TOKEN: if imm8[5] then set #IE + NEG_VALUE_TOKEN: if imm8[6] then set #IE + POS_INF_TOKEN: if imm8[7] then set #IE + ESAC + RETURN dest[63:0] +} + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := FIXUPIMMPD(a[i+63:i], b[i+63:i], c[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Fix up packed double-precision (64-bit) floating-point elements in "a" and "b" using packed 64-bit integers in "c", and store the results in "dst". "imm8" is used to set the required flags reporting. + +enum TOKEN_TYPE { + QNAN_TOKEN := 0, + SNAN_TOKEN := 1, + ZERO_VALUE_TOKEN := 2, + ONE_VALUE_TOKEN := 3, + NEG_INF_TOKEN := 4, + POS_INF_TOKEN := 5, + NEG_VALUE_TOKEN := 6, + POS_VALUE_TOKEN := 7 +} +FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]){ + tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] + CASE(tsrc[63:0] of TOKEN_TYPE) + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) of + 0 : dest[63:0] := src1[63:0] + 1 : dest[63:0] := tsrc[63:0] + 2 : dest[63:0] := QNaN(tsrc[63:0]) + 3 : dest[63:0] := QNAN_Indefinite + 4 : dest[63:0] := -INF + 5 : dest[63:0] := +INF + 6 : dest[63:0] := tsrc.sign? –INF : +INF + 7 : dest[63:0] := -0 + 8 : dest[63:0] := +0 + 9 : dest[63:0] := -1 + 10: dest[63:0] := +1 + 11: dest[63:0] := 1⁄2 + 12: dest[63:0] := 90.0 + 13: dest[63:0] := PI/2 + 14: dest[63:0] := MAX_FLOAT + 15: dest[63:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0] of TOKEN_TYPE) + ZERO_VALUE_TOKEN: if imm8[0] then set #ZE + ZERO_VALUE_TOKEN: if imm8[1] then set #IE + ONE_VALUE_TOKEN: if imm8[2] then set #ZE + ONE_VALUE_TOKEN: if imm8[3] then set #IE + SNAN_TOKEN: if imm8[4] then set #IE + NEG_INF_TOKEN: if imm8[5] then set #IE + NEG_VALUE_TOKEN: if imm8[6] then set #IE + POS_INF_TOKEN: if imm8[7] then set #IE + ESAC + RETURN dest[63:0] +} + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := FIXUPIMMPD(a[i+63:i], b[i+63:i], c[i+63:i], imm8[7:0]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + + Fix up packed double-precision (64-bit) floating-point elements in "a" and "b" using packed 64-bit integers in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). "imm8" is used to set the required flags reporting. + +enum TOKEN_TYPE { + QNAN_TOKEN := 0, + SNAN_TOKEN := 1, + ZERO_VALUE_TOKEN := 2, + ONE_VALUE_TOKEN := 3, + NEG_INF_TOKEN := 4, + POS_INF_TOKEN := 5, + NEG_VALUE_TOKEN := 6, + POS_VALUE_TOKEN := 7 +} +FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]){ + tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] + CASE(tsrc[63:0] of TOKEN_TYPE) + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) of + 0 : dest[63:0] := src1[63:0] + 1 : dest[63:0] := tsrc[63:0] + 2 : dest[63:0] := QNaN(tsrc[63:0]) + 3 : dest[63:0] := QNAN_Indefinite + 4 : dest[63:0] := -INF + 5 : dest[63:0] := +INF + 6 : dest[63:0] := tsrc.sign? –INF : +INF + 7 : dest[63:0] := -0 + 8 : dest[63:0] := +0 + 9 : dest[63:0] := -1 + 10: dest[63:0] := +1 + 11: dest[63:0] := 1⁄2 + 12: dest[63:0] := 90.0 + 13: dest[63:0] := PI/2 + 14: dest[63:0] := MAX_FLOAT + 15: dest[63:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0] of TOKEN_TYPE) + ZERO_VALUE_TOKEN: if imm8[0] then set #ZE + ZERO_VALUE_TOKEN: if imm8[1] then set #IE + ONE_VALUE_TOKEN: if imm8[2] then set #ZE + ONE_VALUE_TOKEN: if imm8[3] then set #IE + SNAN_TOKEN: if imm8[4] then set #IE + NEG_INF_TOKEN: if imm8[5] then set #IE + NEG_VALUE_TOKEN: if imm8[6] then set #IE + POS_INF_TOKEN: if imm8[7] then set #IE + ESAC + RETURN dest[63:0] +} + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := FIXUPIMMPD(a[i+63:i], b[i+63:i], c[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + + Fix up packed double-precision (64-bit) floating-point elements in "a" and "b" using packed 64-bit integers in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). "imm8" is used to set the required flags reporting. + +enum TOKEN_TYPE { + QNAN_TOKEN := 0, + SNAN_TOKEN := 1, + ZERO_VALUE_TOKEN := 2, + ONE_VALUE_TOKEN := 3, + NEG_INF_TOKEN := 4, + POS_INF_TOKEN := 5, + NEG_VALUE_TOKEN := 6, + POS_VALUE_TOKEN := 7 +} +FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]){ + tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] + CASE(tsrc[63:0] of TOKEN_TYPE) + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) of + 0 : dest[63:0] := src1[63:0] + 1 : dest[63:0] := tsrc[63:0] + 2 : dest[63:0] := QNaN(tsrc[63:0]) + 3 : dest[63:0] := QNAN_Indefinite + 4 : dest[63:0] := -INF + 5 : dest[63:0] := +INF + 6 : dest[63:0] := tsrc.sign? –INF : +INF + 7 : dest[63:0] := -0 + 8 : dest[63:0] := +0 + 9 : dest[63:0] := -1 + 10: dest[63:0] := +1 + 11: dest[63:0] := 1⁄2 + 12: dest[63:0] := 90.0 + 13: dest[63:0] := PI/2 + 14: dest[63:0] := MAX_FLOAT + 15: dest[63:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0] of TOKEN_TYPE) + ZERO_VALUE_TOKEN: if imm8[0] then set #ZE + ZERO_VALUE_TOKEN: if imm8[1] then set #IE + ONE_VALUE_TOKEN: if imm8[2] then set #ZE + ONE_VALUE_TOKEN: if imm8[3] then set #IE + SNAN_TOKEN: if imm8[4] then set #IE + NEG_INF_TOKEN: if imm8[5] then set #IE + NEG_VALUE_TOKEN: if imm8[6] then set #IE + POS_INF_TOKEN: if imm8[7] then set #IE + ESAC + RETURN dest[63:0] +} + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := FIXUPIMMPD(a[i+63:i], b[i+63:i], c[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Fix up packed single-precision (32-bit) floating-point elements in "a" and "b" using packed 32-bit integers in "c", and store the results in "dst". "imm8" is used to set the required flags reporting. + +enum TOKEN_TYPE { + QNAN_TOKEN := 0, + SNAN_TOKEN L= 1, + ZERO_VALUE_TOKEN := 2, + ONE_VALUE_TOKEN := 3, + NEG_INF_TOKEN := 4, + POS_INF_TOKEN := 5, + NEG_VALUE_TOKEN := 6, + POS_VALUE_TOKEN := 7 +} +FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]){ + tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] + CASE(tsrc[31:0] of TOKEN_TYPE) + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) of + 0 : dest[31:0] := src1[31:0] + 1 : dest[31:0] := tsrc[31:0] + 2 : dest[31:0] := QNaN(tsrc[31:0]) + 3 : dest[31:0] := QNAN_Indefinite + 4 : dest[31:0] := -INF + 5 : dest[31:0] := +INF + 6 : dest[31:0] := tsrc.sign? –INF : +INF + 7 : dest[31:0] := -0 + 8 : dest[31:0] := +0 + 9 : dest[31:0] := -1 + 10: dest[31:0] := +1 + 11: dest[31:0] := 1⁄2 + 12: dest[31:0] := 90.0 + 13: dest[31:0] := PI/2 + 14: dest[31:0] := MAX_FLOAT + 15: dest[31:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0] of TOKEN_TYPE) + ZERO_VALUE_TOKEN: if imm8[0] then set #ZE + ZERO_VALUE_TOKEN: if imm8[1] then set #IE + ONE_VALUE_TOKEN: if imm8[2] then set #ZE + ONE_VALUE_TOKEN: if imm8[3] then set #IE + SNAN_TOKEN: if imm8[4] then set #IE + NEG_INF_TOKEN: if imm8[5] then set #IE + NEG_VALUE_TOKEN: if imm8[6] then set #IE + POS_INF_TOKEN: if imm8[7] then set #IE + ESAC + RETURN dest[31:0] +} + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := FIXUPIMMPD(a[i+31:i], b[i+31:i], c[i+31:i], imm8[7:0]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + + Fix up packed single-precision (32-bit) floating-point elements in "a" and "b" using packed 32-bit integers in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). "imm8" is used to set the required flags reporting. + +enum TOKEN_TYPE { + QNAN_TOKEN := 0, + SNAN_TOKEN L= 1, + ZERO_VALUE_TOKEN := 2, + ONE_VALUE_TOKEN := 3, + NEG_INF_TOKEN := 4, + POS_INF_TOKEN := 5, + NEG_VALUE_TOKEN := 6, + POS_VALUE_TOKEN := 7 +} +FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]){ + tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] + CASE(tsrc[31:0] of TOKEN_TYPE) + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) of + 0 : dest[31:0] := src1[31:0] + 1 : dest[31:0] := tsrc[31:0] + 2 : dest[31:0] := QNaN(tsrc[31:0]) + 3 : dest[31:0] := QNAN_Indefinite + 4 : dest[31:0] := -INF + 5 : dest[31:0] := +INF + 6 : dest[31:0] := tsrc.sign? –INF : +INF + 7 : dest[31:0] := -0 + 8 : dest[31:0] := +0 + 9 : dest[31:0] := -1 + 10: dest[31:0] := +1 + 11: dest[31:0] := 1⁄2 + 12: dest[31:0] := 90.0 + 13: dest[31:0] := PI/2 + 14: dest[31:0] := MAX_FLOAT + 15: dest[31:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0] of TOKEN_TYPE) + ZERO_VALUE_TOKEN: if imm8[0] then set #ZE + ZERO_VALUE_TOKEN: if imm8[1] then set #IE + ONE_VALUE_TOKEN: if imm8[2] then set #ZE + ONE_VALUE_TOKEN: if imm8[3] then set #IE + SNAN_TOKEN: if imm8[4] then set #IE + NEG_INF_TOKEN: if imm8[5] then set #IE + NEG_VALUE_TOKEN: if imm8[6] then set #IE + POS_INF_TOKEN: if imm8[7] then set #IE + ESAC + RETURN dest[31:0] +} + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := FIXUPIMMPD(a[i+31:i], b[i+31:i], c[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + + Fix up packed single-precision (32-bit) floating-point elements in "a" and "b" using packed 32-bit integers in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). "imm8" is used to set the required flags reporting. + +enum TOKEN_TYPE { + QNAN_TOKEN := 0, + SNAN_TOKEN L= 1, + ZERO_VALUE_TOKEN := 2, + ONE_VALUE_TOKEN := 3, + NEG_INF_TOKEN := 4, + POS_INF_TOKEN := 5, + NEG_VALUE_TOKEN := 6, + POS_VALUE_TOKEN := 7 +} +FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]){ + tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] + CASE(tsrc[31:0] of TOKEN_TYPE) + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) of + 0 : dest[31:0] := src1[31:0] + 1 : dest[31:0] := tsrc[31:0] + 2 : dest[31:0] := QNaN(tsrc[31:0]) + 3 : dest[31:0] := QNAN_Indefinite + 4 : dest[31:0] := -INF + 5 : dest[31:0] := +INF + 6 : dest[31:0] := tsrc.sign? –INF : +INF + 7 : dest[31:0] := -0 + 8 : dest[31:0] := +0 + 9 : dest[31:0] := -1 + 10: dest[31:0] := +1 + 11: dest[31:0] := 1⁄2 + 12: dest[31:0] := 90.0 + 13: dest[31:0] := PI/2 + 14: dest[31:0] := MAX_FLOAT + 15: dest[31:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0] of TOKEN_TYPE) + ZERO_VALUE_TOKEN: if imm8[0] then set #ZE + ZERO_VALUE_TOKEN: if imm8[1] then set #IE + ONE_VALUE_TOKEN: if imm8[2] then set #ZE + ONE_VALUE_TOKEN: if imm8[3] then set #IE + SNAN_TOKEN: if imm8[4] then set #IE + NEG_INF_TOKEN: if imm8[5] then set #IE + NEG_VALUE_TOKEN: if imm8[6] then set #IE + POS_INF_TOKEN: if imm8[7] then set #IE + ESAC + RETURN dest[31:0] +} + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := FIXUPIMMPD(a[i+31:i], b[i+31:i], c[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Fix up packed single-precision (32-bit) floating-point elements in "a" and "b" using packed 32-bit integers in "c", and store the results in "dst". "imm8" is used to set the required flags reporting. + +enum TOKEN_TYPE { + QNAN_TOKEN := 0, + SNAN_TOKEN L= 1, + ZERO_VALUE_TOKEN := 2, + ONE_VALUE_TOKEN := 3, + NEG_INF_TOKEN := 4, + POS_INF_TOKEN := 5, + NEG_VALUE_TOKEN := 6, + POS_VALUE_TOKEN := 7 +} +FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]){ + tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] + CASE(tsrc[31:0] of TOKEN_TYPE) + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) of + 0 : dest[31:0] := src1[31:0] + 1 : dest[31:0] := tsrc[31:0] + 2 : dest[31:0] := QNaN(tsrc[31:0]) + 3 : dest[31:0] := QNAN_Indefinite + 4 : dest[31:0] := -INF + 5 : dest[31:0] := +INF + 6 : dest[31:0] := tsrc.sign? –INF : +INF + 7 : dest[31:0] := -0 + 8 : dest[31:0] := +0 + 9 : dest[31:0] := -1 + 10: dest[31:0] := +1 + 11: dest[31:0] := 1⁄2 + 12: dest[31:0] := 90.0 + 13: dest[31:0] := PI/2 + 14: dest[31:0] := MAX_FLOAT + 15: dest[31:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0] of TOKEN_TYPE) + ZERO_VALUE_TOKEN: if imm8[0] then set #ZE + ZERO_VALUE_TOKEN: if imm8[1] then set #IE + ONE_VALUE_TOKEN: if imm8[2] then set #ZE + ONE_VALUE_TOKEN: if imm8[3] then set #IE + SNAN_TOKEN: if imm8[4] then set #IE + NEG_INF_TOKEN: if imm8[5] then set #IE + NEG_VALUE_TOKEN: if imm8[6] then set #IE + POS_INF_TOKEN: if imm8[7] then set #IE + ESAC + RETURN dest[31:0] +} + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := FIXUPIMMPD(a[i+31:i], b[i+31:i], c[i+31:i], imm8[7:0]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + + Fix up packed single-precision (32-bit) floating-point elements in "a" and "b" using packed 32-bit integers in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). "imm8" is used to set the required flags reporting. + +enum TOKEN_TYPE { + QNAN_TOKEN := 0, + SNAN_TOKEN L= 1, + ZERO_VALUE_TOKEN := 2, + ONE_VALUE_TOKEN := 3, + NEG_INF_TOKEN := 4, + POS_INF_TOKEN := 5, + NEG_VALUE_TOKEN := 6, + POS_VALUE_TOKEN := 7 +} +FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]){ + tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] + CASE(tsrc[31:0] of TOKEN_TYPE) + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) of + 0 : dest[31:0] := src1[31:0] + 1 : dest[31:0] := tsrc[31:0] + 2 : dest[31:0] := QNaN(tsrc[31:0]) + 3 : dest[31:0] := QNAN_Indefinite + 4 : dest[31:0] := -INF + 5 : dest[31:0] := +INF + 6 : dest[31:0] := tsrc.sign? –INF : +INF + 7 : dest[31:0] := -0 + 8 : dest[31:0] := +0 + 9 : dest[31:0] := -1 + 10: dest[31:0] := +1 + 11: dest[31:0] := 1⁄2 + 12: dest[31:0] := 90.0 + 13: dest[31:0] := PI/2 + 14: dest[31:0] := MAX_FLOAT + 15: dest[31:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0] of TOKEN_TYPE) + ZERO_VALUE_TOKEN: if imm8[0] then set #ZE + ZERO_VALUE_TOKEN: if imm8[1] then set #IE + ONE_VALUE_TOKEN: if imm8[2] then set #ZE + ONE_VALUE_TOKEN: if imm8[3] then set #IE + SNAN_TOKEN: if imm8[4] then set #IE + NEG_INF_TOKEN: if imm8[5] then set #IE + NEG_VALUE_TOKEN: if imm8[6] then set #IE + POS_INF_TOKEN: if imm8[7] then set #IE + ESAC + RETURN dest[31:0] +} + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := FIXUPIMMPD(a[i+31:i], b[i+31:i], c[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + + Fix up packed single-precision (32-bit) floating-point elements in "a" and "b" using packed 32-bit integers in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). "imm8" is used to set the required flags reporting. + +enum TOKEN_TYPE { + QNAN_TOKEN := 0, + SNAN_TOKEN L= 1, + ZERO_VALUE_TOKEN := 2, + ONE_VALUE_TOKEN := 3, + NEG_INF_TOKEN := 4, + POS_INF_TOKEN := 5, + NEG_VALUE_TOKEN := 6, + POS_VALUE_TOKEN := 7 +} +FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]){ + tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] + CASE(tsrc[31:0] of TOKEN_TYPE) + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) of + 0 : dest[31:0] := src1[31:0] + 1 : dest[31:0] := tsrc[31:0] + 2 : dest[31:0] := QNaN(tsrc[31:0]) + 3 : dest[31:0] := QNAN_Indefinite + 4 : dest[31:0] := -INF + 5 : dest[31:0] := +INF + 6 : dest[31:0] := tsrc.sign? –INF : +INF + 7 : dest[31:0] := -0 + 8 : dest[31:0] := +0 + 9 : dest[31:0] := -1 + 10: dest[31:0] := +1 + 11: dest[31:0] := 1⁄2 + 12: dest[31:0] := 90.0 + 13: dest[31:0] := PI/2 + 14: dest[31:0] := MAX_FLOAT + 15: dest[31:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0] of TOKEN_TYPE) + ZERO_VALUE_TOKEN: if imm8[0] then set #ZE + ZERO_VALUE_TOKEN: if imm8[1] then set #IE + ONE_VALUE_TOKEN: if imm8[2] then set #ZE + ONE_VALUE_TOKEN: if imm8[3] then set #IE + SNAN_TOKEN: if imm8[4] then set #IE + NEG_INF_TOKEN: if imm8[5] then set #IE + NEG_VALUE_TOKEN: if imm8[6] then set #IE + POS_INF_TOKEN: if imm8[7] then set #IE + ESAC + RETURN dest[31:0] +} + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := FIXUPIMMPD(a[i+31:i], b[i+31:i], c[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := c[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := c[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := c[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := c[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + IF (j is even) + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + FI + ELSE + dst[i+63:i] := c[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + IF (j is even) + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + FI + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 3 + i := j*64 + IF k[j] + IF (j is even) + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + IF (j is even) + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + FI + ELSE + dst[i+63:i] := c[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + IF (j is even) + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + FI + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 1 + i := j*64 + IF k[j] + IF (j is even) + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + IF (j is even) + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + FI + ELSE + dst[i+31:i] := c[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + IF (j is even) + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + FI + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 7 + i := j*32 + IF k[j] + IF (j is even) + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + IF (j is even) + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + FI + ELSE + dst[i+31:i] := c[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + IF (j is even) + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + FI + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 3 + i := j*32 + IF k[j] + IF (j is even) + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := c[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := c[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := c[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := c[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + IF (j is even) + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + FI + ELSE + dst[i+63:i] := c[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + IF (j is even) + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + FI + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 3 + i := j*64 + IF k[j] + IF (j is even) + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + IF (j is even) + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + FI + ELSE + dst[i+63:i] := c[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + IF (j is even) + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + FI + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 1 + i := j*64 + IF k[j] + IF (j is even) + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + IF (j is even) + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + FI + ELSE + dst[i+31:i] := c[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + IF (j is even) + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + FI + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 7 + i := j*32 + IF k[j] + IF (j is even) + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + IF (j is even) + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + FI + ELSE + dst[i+31:i] := c[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + IF (j is even) + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + FI + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 3 + i := j*32 + IF k[j] + IF (j is even) + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := c[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := c[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := c[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := c[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := c[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := c[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := c[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := c[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512VL + AVX512DQ + Miscellaneous + + + Test packed double-precision (64-bit) floating-point elements in "a" for special categories specified by "imm8", and store the results in mask vector "k". + [fpclass_note] + + +FOR j := 0 to 3 + i := j*64 + k[j] := CheckFPClass_FP64(a[i+63:i], imm8[7:0]) +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512VL + AVX512DQ + Miscellaneous + + + + Test packed double-precision (64-bit) floating-point elements in "a" for special categories specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + [fpclass_note] + + +FOR j := 0 to 3 + i := j*64 + IF k1[j] + k[j] := CheckFPClass_FP64(a[i+63:i], imm8[7:0]) + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512DQ + Miscellaneous + + + Test packed double-precision (64-bit) floating-point elements in "a" for special categories specified by "imm8", and store the results in mask vector "k". + [fpclass_note] + + +FOR j := 0 to 7 + i := j*64 + k[j] := CheckFPClass_FP64(a[i+63:i], imm8[7:0]) +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512DQ + Miscellaneous + + + + Test packed double-precision (64-bit) floating-point elements in "a" for special categories specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + [fpclass_note] + + +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := CheckFPClass_FP64(a[i+63:i], imm8[7:0]) + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512VL + AVX512DQ + Miscellaneous + + + Test packed double-precision (64-bit) floating-point elements in "a" for special categories specified by "imm8", and store the results in mask vector "k". + [fpclass_note] + + +FOR j := 0 to 1 + i := j*64 + k[j] := CheckFPClass_FP64(a[i+63:i], imm8[7:0]) +ENDFOR +k[MAX:2] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512VL + AVX512DQ + Miscellaneous + + + + Test packed double-precision (64-bit) floating-point elements in "a" for special categories specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + [fpclass_note] + + +FOR j := 0 to 1 + i := j*64 + IF k1[j] + k[j] := CheckFPClass_FP64(a[i+63:i], imm8[7:0]) + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:2] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512VL + AVX512DQ + Miscellaneous + + + Test packed single-precision (32-bit) floating-point elements in "a" for special categories specified by "imm8", and store the results in mask vector "k". + [fpclass_note] + + +FOR j := 0 to 7 + i := j*32 + k[j] := CheckFPClass_FP32(a[i+31:i], imm8[7:0]) +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512VL + AVX512DQ + Miscellaneous + + + + Test packed single-precision (32-bit) floating-point elements in "a" for special categories specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + [fpclass_note] + + +FOR j := 0 to 7 + i := j*32 + IF k1[j] + k[j] := CheckFPClass_FP32(a[i+31:i], imm8[7:0]) + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512DQ + Miscellaneous + + + Test packed single-precision (32-bit) floating-point elements in "a" for special categories specified by "imm8", and store the results in mask vector "k". + [fpclass_note] + + +FOR j := 0 to 15 + i := j*32 + k[j] := CheckFPClass_FP32(a[i+31:i], imm8[7:0]) +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512DQ + Miscellaneous + + + + Test packed single-precision (32-bit) floating-point elements in "a" for special categories specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + [fpclass_note] + + +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := CheckFPClass_FP32(a[i+31:i], imm8[7:0]) + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512VL + AVX512DQ + Miscellaneous + + + Test packed single-precision (32-bit) floating-point elements in "a" for special categories specified by "imm8", and store the results in mask vector "k". + [fpclass_note] + + +FOR j := 0 to 3 + i := j*32 + k[j] := CheckFPClass_FP32(a[i+31:i], imm8[7:0]) +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512VL + AVX512DQ + Miscellaneous + + + + Test packed single-precision (32-bit) floating-point elements in "a" for special categories specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + [fpclass_note] + + +FOR j := 0 to 3 + i := j*32 + IF k1[j] + k[j] := CheckFPClass_FP32(a[i+31:i], imm8[7:0]) + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512DQ + Miscellaneous + + + Test the lower double-precision (64-bit) floating-point element in "a" for special categories specified by "imm8", and store the result in mask vector "k". + [fpclass_note] + + +k[0] := CheckFPClass_FP64(a[63:0], imm8[7:0]) +k[MAX:1] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512DQ + Miscellaneous + + + + Test the lower double-precision (64-bit) floating-point element in "a" for special categories specified by "imm8", and store the result in mask vector "k" using zeromask "k1" (the element is zeroed out when mask bit 0 is not set). + [fpclass_note] + + +IF k1[0] + k[0] := CheckFPClass_FP64(a[63:0], imm8[7:0]) +ELSE + k[0] := 0 +FI +k[MAX:1] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512DQ + Miscellaneous + + + Test the lower single-precision (32-bit) floating-point element in "a" for special categories specified by "imm8", and store the result in mask vector "k. + [fpclass_note] + + +k[0] := CheckFPClass_FP32(a[31:0], imm8[7:0]) +k[MAX:1] := 0 + + +
immintrin.h
+
+ + Floating Point + Mask + AVX512DQ + Miscellaneous + + + + Test the lower single-precision (32-bit) floating-point element in "a" for special categories specified by "imm8", and store the result in mask vector "k" using zeromask "k1" (the element is zeroed out when mask bit 0 is not set). + [fpclass_note] + + +IF k1[0] + k[0] := CheckFPClass_FP32(a[31:0], imm8[7:0]) +ELSE + k[0] := 0 +FI +k[MAX:1] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Load + + + + + + + Gather double-precision (64-bit) floating-point elements from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 3 + i := j*64 + m := j*32 + IF k[j] + dst[i+63:i] := MEM[base_addr + SignExtend(vindex[m+31:m])*scale] + k[j] := 0 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +k[MAX:4] := 0 +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Load + + + + + + + Gather double-precision (64-bit) floating-point elements from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 1 + i := j*64 + m := j*32 + IF k[j] + dst[i+63:i] := MEM[base_addr + SignExtend(vindex[m+31:m])*scale] + k[j] := 0 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +k[MAX:2] := 0 +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Load + + + + + + + Gather single-precision (32-bit) floating-point elements from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[base_addr + SignExtend(vindex[i+31:i])*scale] + k[j] := 0 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +k[MAX:8] := 0 +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Load + + + + + + + Gather single-precision (32-bit) floating-point elements from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[base_addr + SignExtend(vindex[i+31:i])*scale] + k[j] := 0 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +k[MAX:4] := 0 +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Load + + + + + + + Gather double-precision (64-bit) floating-point elements from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[base_addr + SignExtend(vindex[i+63:i])*scale] + k[j] := 0 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +k[MAX:4] := 0 +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Load + + + + + + + Gather double-precision (64-bit) floating-point elements from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[base_addr + SignExtend(vindex[i+63:i])*scale] + k[j] := 0 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +k[MAX:2] := 0 +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Load + + + + + + + Gather single-precision (32-bit) floating-point elements from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 3 + i := j*32 + m := j*64 + IF k[j] + dst[i+31:i] := MEM[base_addr + SignExtend(vindex[i+63:i])*scale] + k[j] := 0 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +k[MAX:4] := 0 +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Load + + + + + + + Gather single-precision (32-bit) floating-point elements from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 1 + i := j*32 + m := j*64 + IF k[j] + dst[i+31:i] := MEM[base_addr + SignExtend(vindex[i+63:i])*scale] + k[j] := 0 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +k[MAX:2] := 0 +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + Convert the exponent of each packed double-precision (64-bit) floating-point element in "a" to a double-precision (64-bit) floating-point number representing the integer exponent, and store the results in "dst". This intrinsic essentially calculates "floor(log2(x))" for each element. + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := ConvertExpFP64(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Convert the exponent of each packed double-precision (64-bit) floating-point element in "a" to a double-precision (64-bit) floating-point number representing the integer exponent, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element. + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := ConvertExpFP64(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + Convert the exponent of each packed double-precision (64-bit) floating-point element in "a" to a double-precision (64-bit) floating-point number representing the integer exponent, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element. + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := ConvertExpFP64(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + Convert the exponent of each packed double-precision (64-bit) floating-point element in "a" to a double-precision (64-bit) floating-point number representing the integer exponent, and store the results in "dst". This intrinsic essentially calculates "floor(log2(x))" for each element. + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := ConvertExpFP64(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Convert the exponent of each packed double-precision (64-bit) floating-point element in "a" to a double-precision (64-bit) floating-point number representing the integer exponent, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element. + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := ConvertExpFP64(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + Convert the exponent of each packed double-precision (64-bit) floating-point element in "a" to a double-precision (64-bit) floating-point number representing the integer exponent, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element. + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := ConvertExpFP64(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + Convert the exponent of each packed single-precision (32-bit) floating-point element in "a" to a single-precision (32-bit) floating-point number representing the integer exponent, and store the results in "dst". This intrinsic essentially calculates "floor(log2(x))" for each element. + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := ConvertExpFP32(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Convert the exponent of each packed single-precision (32-bit) floating-point element in "a" to a single-precision (32-bit) floating-point number representing the integer exponent, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element. + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := ConvertExpFP32(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + Convert the exponent of each packed single-precision (32-bit) floating-point element in "a" to a single-precision (32-bit) floating-point number representing the integer exponent, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element. + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := ConvertExpFP32(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + Convert the exponent of each packed single-precision (32-bit) floating-point element in "a" to a single-precision (32-bit) floating-point number representing the integer exponent, and store the results in "dst". This intrinsic essentially calculates "floor(log2(x))" for each element. + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ConvertExpFP32(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Convert the exponent of each packed single-precision (32-bit) floating-point element in "a" to a single-precision (32-bit) floating-point number representing the integer exponent, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element. + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := ConvertExpFP32(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + Convert the exponent of each packed single-precision (32-bit) floating-point element in "a" to a single-precision (32-bit) floating-point number representing the integer exponent, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element. + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := ConvertExpFP32(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Normalize the mantissas of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note] + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := GetNormalizedMantissa(a[i+63:i], sc, interv) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + + Normalize the mantissas of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note] + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := GetNormalizedMantissa(a[i+63:i], sc, interv) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Normalize the mantissas of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note] + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := GetNormalizedMantissa(a[i+63:i], sc, interv) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Normalize the mantissas of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note] + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := GetNormalizedMantissa(a[i+63:i], sc, interv) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + + Normalize the mantissas of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note] + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := GetNormalizedMantissa(a[i+63:i], sc, interv) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Normalize the mantissas of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note] + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := GetNormalizedMantissa(a[i+63:i], sc, interv) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Normalize the mantissas of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note] + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := GetNormalizedMantissa(a[i+31:i], sc, interv) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + + Normalize the mantissas of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note] + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := GetNormalizedMantissa(a[i+31:i], sc, interv) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Normalize the mantissas of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note] + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := GetNormalizedMantissa(a[i+31:i], sc, interv) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Normalize the mantissas of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note] + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := GetNormalizedMantissa(a[i+31:i], sc, interv) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + + Normalize the mantissas of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note] + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := GetNormalizedMantissa(a[i+31:i], sc, interv) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Normalize the mantissas of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note] + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := GetNormalizedMantissa(a[i+31:i], sc, interv) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Copy "a" to "dst", then insert 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "b" into "dst" at the location specified by "imm8". + +dst[255:0] := a[255:0] +CASE (imm8[1:0]) of +0: dst[127:0] := b[127:0] +1: dst[255:128] := b[127:0] +ESAC +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + + Copy "a" to "tmp", then insert 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + + +tmp[255:0] := a[255:0] +CASE (imm8[1:0]) of +0: tmp[127:0] := b[127:0] +1: tmp[255:128] := b[127:0] +ESAC +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Copy "a" to "tmp", then insert 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +tmp[255:0] := a[255:0] +CASE (imm8[1:0]) of +0: tmp[127:0] := b[127:0] +1: tmp[255:128] := b[127:0] +ESAC +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + Copy "a" to "dst", then insert 256 bits (composed of 8 packed single-precision (32-bit) floating-point elements) from "b" into "dst" at the location specified by "imm8". + + +dst[511:0] := a[511:0] +CASE (imm8[7:0]) OF +0: dst[255:0] := b[255:0] +1: dst[511:256] := b[255:0] +ESAC +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + + + Copy "a" to "tmp", then insert 256 bits (composed of 8 packed single-precision (32-bit) floating-point elements) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + + +tmp[511:0] := a[511:0] +CASE (imm8[7:0]) OF +0: tmp[255:0] := b[255:0] +1: tmp[511:256] := b[255:0] +ESAC +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + + Copy "a" to "tmp", then insert 256 bits (composed of 8 packed single-precision (32-bit) floating-point elements) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +tmp[511:0] := a[511:0] +CASE (imm8[7:0]) OF +0: tmp[255:0] := b[255:0] +1: tmp[511:256] := b[255:0] +ESAC +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Miscellaneous + + + + Copy "a" to "dst", then insert 128 bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "b" into "dst" at the location specified by "imm8". + +dst[255:0] := a[255:0] +CASE imm8[7:0] of +0: dst[127:0] := b[127:0] +1: dst[255:128] := b[127:0] +ESAC +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Miscellaneous + + + + + + Copy "a" to "tmp", then insert 128 bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + + +tmp[255:0] := a[255:0] +CASE (imm8[1:0]) of +0: tmp[127:0] := b[127:0] +1: tmp[255:128] := b[127:0] +ESAC +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Miscellaneous + + + + + Copy "a" to "tmp", then insert 128 bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +tmp[255:0] := a[255:0] +CASE (imm8[1:0]) of +0: tmp[127:0] := b[127:0] +1: tmp[255:128] := b[127:0] +ESAC +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + Copy "a" to "dst", then insert 128 bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "b" into "dst" at the location specified by "imm8". + +dst[511:0] := a[511:0] +CASE imm8[7:0] of +0: dst[127:0] := b[127:0] +1: dst[255:128] := b[127:0] +2: dst[383:256] := b[127:0] +3: dst[511:384] := b[127:0] +ESAC +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + + + Copy "a" to "tmp", then insert 128 bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + + +tmp[511:0] := a[511:0] +CASE (imm8[1:0]) of +0: tmp[127:0] := b[127:0] +1: tmp[255:128] := b[127:0] +2: tmp[383:256] := b[127:0] +3: tmp[511:384] := b[127:0] +ESAC +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + + Copy "a" to "tmp", then insert 128 bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +tmp[511:0] := a[511:0] +CASE (imm8[1:0]) of +0: tmp[127:0] := b[127:0] +1: tmp[255:128] := b[127:0] +2: tmp[383:256] := b[127:0] +3: tmp[511:384] := b[127:0] +ESAC +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + AVX512VL + AVX512F + Miscellaneous + + + + Copy "a" to "dst", then insert 128 bits (composed of 4 packed 32-bit integers) from "b" into "dst" at the location specified by "imm8". + +dst[255:0] := a[255:0] +CASE (imm8[1:0]) of +0: dst[127:0] := b[127:0] +1: dst[255:128] := b[127:0] +ESAC +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + AVX512VL + AVX512F + Miscellaneous + + + + + + Copy "a" to "tmp", then insert 128 bits (composed of 4 packed 32-bit integers) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + + +tmp[255:0] := a[255:0] +CASE (imm8[1:0]) of +0: tmp[127:0] := b[127:0] +1: tmp[255:128] := b[127:0] +ESAC +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + AVX512VL + AVX512F + Miscellaneous + + + + + Copy "a" to "tmp", then insert 128 bits (composed of 4 packed 32-bit integers) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +tmp[255:0] := a[255:0] +CASE (imm8[1:0]) of +0: tmp[127:0] := b[127:0] +1: tmp[255:128] := b[127:0] +ESAC +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + AVX512DQ + Miscellaneous + + + + Copy "a" to "dst", then insert 256 bits (composed of 8 packed 32-bit integers) from "b" into "dst" at the location specified by "imm8". + +dst[511:0] := a[511:0] +CASE imm8[7:0] of +0: dst[255:0] := b[255:0] +1: dst[511:256] := b[255:0] +ESAC +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + AVX512DQ + Miscellaneous + + + + + + Copy "a" to "tmp", then insert 256 bits (composed of 8 packed 32-bit integers) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + + +tmp[511:0] := a[511:0] +CASE (imm8[7:0]) OF +0: tmp[255:0] := b[255:0] +1: tmp[511:256] := b[255:0] +ESAC +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + AVX512DQ + Miscellaneous + + + + + Copy "a" to "tmp", then insert 256 bits (composed of 8 packed 32-bit integers) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +tmp[511:0] := a[511:0] +CASE (imm8[7:0]) OF +0: tmp[255:0] := b[255:0] +1: tmp[511:256] := b[255:0] +ESAC +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + AVX512VL + AVX512DQ + Miscellaneous + + + + Copy "a" to "dst", then insert 128 bits (composed of 2 packed 64-bit integers) from "b" into "dst" at the location specified by "imm8". + +dst[255:0] := a[255:0] +CASE imm8[7:0] of +0: dst[127:0] := b[127:0] +1: dst[255:128] := b[127:0] +ESAC +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + AVX512VL + AVX512DQ + Miscellaneous + + + + + + Copy "a" to "tmp", then insert 128 bits (composed of 2 packed 64-bit integers) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + + +tmp[255:0] := a[255:0] +CASE (imm8[1:0]) of +0: tmp[127:0] := b[127:0] +1: tmp[255:128] := b[127:0] +ESAC +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + AVX512VL + AVX512DQ + Miscellaneous + + + + + Copy "a" to "tmp", then insert 128 bits (composed of 2 packed 64-bit integers) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +tmp[255:0] := a[255:0] +CASE (imm8[1:0]) of +0: tmp[127:0] := b[127:0] +1: tmp[255:128] := b[127:0] +ESAC +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + AVX512DQ + Miscellaneous + + + + Copy "a" to "dst", then insert 128 bits (composed of 2 packed 64-bit integers) from "b" into "dst" at the location specified by "imm8". + +dst[511:0] := a[511:0] +CASE imm8[7:0] of +0: dst[127:0] := b[127:0] +1: dst[255:128] := b[127:0] +2: dst[383:256] := b[127:0] +3: dst[511:384] := b[127:0] +ESAC +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + AVX512DQ + Miscellaneous + + + + + + Copy "a" to "tmp", then insert 128 bits (composed of 2 packed 64-bit integers) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + + +tmp[511:0] := a[511:0] +CASE (imm8[1:0]) of +0: tmp[127:0] := b[127:0] +1: tmp[255:128] := b[127:0] +2: tmp[383:256] := b[127:0] +3: tmp[511:384] := b[127:0] +ESAC +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + AVX512DQ + Miscellaneous + + + + + Copy "a" to "tmp", then insert 128 bits (composed of 2 packed 64-bit integers) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +tmp[511:0] := a[511:0] +CASE (imm8[1:0]) of +0: tmp[127:0] := b[127:0] +1: tmp[255:128] := b[127:0] +2: tmp[383:256] := b[127:0] +3: tmp[511:384] := b[127:0] +ESAC +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Load + + + + Load packed double-precision (64-bit) floating-point elements from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Move + + + + Move packed double-precision (64-bit) floating-point elements from "a" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Store + + + + Store packed double-precision (64-bit) floating-point elements from "a" into memory using writemask "k". + "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 3 + i := j*64 + IF k[j] + MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i] + FI +ENDFOR + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Load + + + Load packed double-precision (64-bit) floating-point elements from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Move + + + Move packed double-precision (64-bit) floating-point elements from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Load + + + + Load packed double-precision (64-bit) floating-point elements from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Move + + + + Move packed double-precision (64-bit) floating-point elements from "a" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Store + + + + Store packed double-precision (64-bit) floating-point elements from "a" into memory using writemask "k". + "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 1 + i := j*64 + IF k[j] + MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i] + FI +ENDFOR + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Load + + + Load packed double-precision (64-bit) floating-point elements from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Move + + + Move packed double-precision (64-bit) floating-point elements from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Load + + + + Load packed single-precision (32-bit) floating-point elements from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Move + + + + Move packed single-precision (32-bit) floating-point elements from "a" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Store + + + + Store packed single-precision (32-bit) floating-point elements from "a" into memory using writemask "k". + "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 7 + i := j*32 + IF k[j] + MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i] + FI +ENDFOR + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Load + + + Load packed single-precision (32-bit) floating-point elements from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Move + + + Move packed single-precision (32-bit) floating-point elements from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Load + + + + Load packed single-precision (32-bit) floating-point elements from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Move + + + + Move packed single-precision (32-bit) floating-point elements from "a" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Store + + + + Store packed single-precision (32-bit) floating-point elements from "a" into memory using writemask "k". + "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 3 + i := j*32 + IF k[j] + MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i] + FI +ENDFOR + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Load + + + Load packed single-precision (32-bit) floating-point elements from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Move + + + Move packed single-precision (32-bit) floating-point elements from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Move + + + + Duplicate even-indexed double-precision (64-bit) floating-point elements from "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp[63:0] := a[63:0] +tmp[127:64] := a[63:0] +tmp[191:128] := a[191:128] +tmp[255:192] := a[191:128] +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Move + + + Duplicate even-indexed double-precision (64-bit) floating-point elements from "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp[63:0] := a[63:0] +tmp[127:64] := a[63:0] +tmp[191:128] := a[191:128] +tmp[255:192] := a[191:128] +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Move + + + + Duplicate even-indexed double-precision (64-bit) floating-point elements from "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp[63:0] := a[63:0] +tmp[127:64] := a[63:0] +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Move + + + Duplicate even-indexed double-precision (64-bit) floating-point elements from "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp[63:0] := a[63:0] +tmp[127:64] := a[63:0] +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Load + + + + Load packed 32-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Move + + + + Move packed 32-bit integers from "a" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Store + + + + Store packed 32-bit integers from "a" into memory using writemask "k". + "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated. + + +FOR j := 0 to 7 + i := j*32 + IF k[j] + MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i] + FI +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Load + + + Load packed 32-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Move + + + Move packed 32-bit integers from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Load + + + + Load packed 32-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Move + + + + Move packed 32-bit integers from "a" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Store + + + + Store packed 32-bit integers from "a" into memory using writemask "k". + "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + + +FOR j := 0 to 3 + i := j*32 + IF k[j] + MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i] + FI +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Load + + + Load packed 32-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Move + + + Move packed 32-bit integers from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Load + + + + Load packed 64-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Move + + + + Move packed 64-bit integers from "a" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Store + + + + Store packed 64-bit integers from "a" into memory using writemask "k". + "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated. + + +FOR j := 0 to 3 + i := j*64 + IF k[j] + MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i] + FI +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Load + + + Load packed 64-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated. + + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Move + + + Move packed 64-bit integers from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Load + + + + Load packed 64-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Move + + + + Move packed 64-bit integers from "a" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Store + + + + Store packed 64-bit integers from "a" into memory using writemask "k". + "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + + +FOR j := 0 to 1 + i := j*64 + IF k[j] + MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i] + FI +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Load + + + Load packed 64-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Move + + + Move packed 64-bit integers from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Load + + + + Load packed 16-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := MEM[mem_addr+i+15:mem_addr+i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Move + + + + Move packed 16-bit integers from "a" into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Store + + + + Store packed 16-bit integers from "a" into memory using writemask "k". + "mem_addr" does not need to be aligned on any particular boundary. + + +FOR j := 0 to 15 + i := j*16 + IF k[j] + MEM[mem_addr+i+15:mem_addr+i] := a[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Load + + + Load packed 16-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := MEM[mem_addr+i+15:mem_addr+i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Move + + + Move packed 16-bit integers from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Load + + + + Load packed 16-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := MEM[mem_addr+i+15:mem_addr+i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Move + + + + Move packed 16-bit integers from "a" into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Store + + + + Store packed 16-bit integers from "a" into memory using writemask "k". + "mem_addr" does not need to be aligned on any particular boundary. + + +FOR j := 0 to 31 + i := j*16 + IF k[j] + MEM[mem_addr+i+15:mem_addr+i] := a[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Load + + + Load packed 16-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := MEM[mem_addr+i+15:mem_addr+i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Move + + + Move packed 16-bit integers from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Load + + + + Load packed 16-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := MEM[mem_addr+i+15:mem_addr+i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Move + + + + Move packed 16-bit integers from "a" into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Store + + + + Store packed 16-bit integers from "a" into memory using writemask "k". + "mem_addr" does not need to be aligned on any particular boundary. + + +FOR j := 0 to 7 + i := j*16 + IF k[j] + MEM[mem_addr+i+15:mem_addr+i] := a[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Load + + + Load packed 16-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := MEM[mem_addr+i+15:mem_addr+i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Move + + + Move packed 16-bit integers from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Load + + + + Load packed 32-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Store + + + + Store packed 32-bit integers from "a" into memory using writemask "k". + "mem_addr" does not need to be aligned on any particular boundary. + + +FOR j := 0 to 7 + i := j*32 + IF k[j] + MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i] + FI +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Load + + + Load packed 32-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Load + + + + Load packed 32-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Store + + + + Store packed 32-bit integers from "a" into memory using writemask "k". + "mem_addr" does not need to be aligned on any particular boundary. + + +FOR j := 0 to 3 + i := j*32 + IF k[j] + MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i] + FI +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Load + + + Load packed 32-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Load + + + + Load packed 64-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Store + + + + Store packed 64-bit integers from "a" into memory using writemask "k". + "mem_addr" does not need to be aligned on any particular boundary. + + +FOR j := 0 to 3 + i := j*64 + IF k[j] + MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i] + FI +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Load + + + Load packed 64-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Load + + + + Load packed 64-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Store + + + + Store packed 64-bit integers from "a" into memory using writemask "k". + "mem_addr" does not need to be aligned on any particular boundary. + + +FOR j := 0 to 1 + i := j*64 + IF k[j] + MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i] + FI +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Load + + + Load packed 64-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Load + + + + Load packed 8-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := MEM[mem_addr+i+7:mem_addr+i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Move + + + + Move packed 8-bit integers from "a" into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Store + + + + Store packed 8-bit integers from "a" into memory using writemask "k". + "mem_addr" does not need to be aligned on any particular boundary. + + +FOR j := 0 to 31 + i := j*8 + IF k[j] + MEM[mem_addr+i+7:mem_addr+i] := a[i+7:i] + FI +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Load + + + Load packed 8-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := MEM[mem_addr+i+7:mem_addr+i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Move + + + Move packed 8-bit integers from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Load + + + + Load packed 8-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := MEM[mem_addr+i+7:mem_addr+i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Move + + + + Move packed 8-bit integers from "a" into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Store + + + + Store packed 8-bit integers from "a" into memory using writemask "k". + "mem_addr" does not need to be aligned on any particular boundary. + + +FOR j := 0 to 63 + i := j*8 + IF k[j] + MEM[mem_addr+i+7:mem_addr+i] := a[i+7:i] + FI +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512BW + Load + + + Load packed 8-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := MEM[mem_addr+i+7:mem_addr+i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Move + + + Move packed 8-bit integers from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Load + + + + Load packed 8-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := MEM[mem_addr+i+7:mem_addr+i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Move + + + + Move packed 8-bit integers from "a" into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Store + + + + Store packed 8-bit integers from "a" into memory using writemask "k". + "mem_addr" does not need to be aligned on any particular boundary. + + +FOR j := 0 to 15 + i := j*8 + IF k[j] + MEM[mem_addr+i+7:mem_addr+i] := a[i+7:i] + FI +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Load + + + Load packed 8-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := MEM[mem_addr+i+7:mem_addr+i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Move + + + Move packed 8-bit integers from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Move + + + + Duplicate odd-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp[31:0] := a[63:32] +tmp[63:32] := a[63:32] +tmp[95:64] := a[127:96] +tmp[127:96] := a[127:96] +tmp[159:128] := a[191:160] +tmp[191:160] := a[191:160] +tmp[223:192] := a[255:224] +tmp[255:224] := a[255:224] +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Move + + + Duplicate odd-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp[31:0] := a[63:32] +tmp[63:32] := a[63:32] +tmp[95:64] := a[127:96] +tmp[127:96] := a[127:96] +tmp[159:128] := a[191:160] +tmp[191:160] := a[191:160] +tmp[223:192] := a[255:224] +tmp[255:224] := a[255:224] +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Move + + + + Duplicate odd-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp[31:0] := a[63:32] +tmp[63:32] := a[63:32] +tmp[95:64] := a[127:96] +tmp[127:96] := a[127:96] +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Move + + + Duplicate odd-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp[31:0] := a[63:32] +tmp[63:32] := a[63:32] +tmp[95:64] := a[127:96] +tmp[127:96] := a[127:96] +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Move + + + + Duplicate even-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp[31:0] := a[31:0] +tmp[63:32] := a[31:0] +tmp[95:64] := a[95:64] +tmp[127:96] := a[95:64] +tmp[159:128] := a[159:128] +tmp[191:160] := a[159:128] +tmp[223:192] := a[223:192] +tmp[255:224] := a[223:192] +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Move + + + Duplicate even-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp[31:0] := a[31:0] +tmp[63:32] := a[31:0] +tmp[95:64] := a[95:64] +tmp[127:96] := a[95:64] +tmp[159:128] := a[159:128] +tmp[191:160] := a[159:128] +tmp[223:192] := a[223:192] +tmp[255:224] := a[223:192] +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Move + + + + Duplicate even-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp[31:0] := a[31:0] +tmp[63:32] := a[31:0] +tmp[95:64] := a[95:64] +tmp[127:96] := a[95:64] +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Move + + + Duplicate even-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp[31:0] := a[31:0] +tmp[63:32] := a[31:0] +tmp[95:64] := a[95:64] +tmp[127:96] := a[95:64] +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Load + + + + Load packed double-precision (64-bit) floating-point elements from memoy into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Store + + + + Store packed double-precision (64-bit) floating-point elements from "a" into memory using writemask "k". + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 3 + i := j*64 + IF k[j] + MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i] + FI +ENDFOR + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Load + + + Load packed double-precision (64-bit) floating-point elements from memoy into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Load + + + + Load packed double-precision (64-bit) floating-point elements from memoy into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Store + + + + Store packed double-precision (64-bit) floating-point elements from "a" into memory using writemask "k". + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 1 + i := j*64 + IF k[j] + MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i] + FI +ENDFOR + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Load + + + Load packed double-precision (64-bit) floating-point elements from memoy into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Load + + + + Load packed single-precision (32-bit) floating-point elements from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Store + + + + Store packed single-precision (32-bit) floating-point elements from "a" into memory using writemask "k". + "mem_addr" does not need to be aligned on any particular boundary. + + +FOR j := 0 to 7 + i := j*32 + IF k[j] + MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i] + FI +ENDFOR + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Load + + + Load packed single-precision (32-bit) floating-point elements from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Load + + + + Load packed single-precision (32-bit) floating-point elements from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Store + + + + Store packed single-precision (32-bit) floating-point elements from "a" into memory using writemask "k". + "mem_addr" does not need to be aligned on any particular boundary. + + +FOR j := 0 to 3 + i := j*32 + IF k[j] + MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i] + FI +ENDFOR + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Load + + + Load packed single-precision (32-bit) floating-point elements from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] * b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] * b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] * b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] * b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). RM. + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] * b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] * b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). RM. + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] * b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] * b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Logical + + + + + Compute the bitwise OR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] BITWISE OR b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Logical + + + + Compute the bitwise OR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] BITWISE OR b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Logical + + + + + Compute the bitwise OR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] BITWISE OR b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Logical + + + + Compute the bitwise OR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] BITWISE OR b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Logical + + + Compute the bitwise OR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst". + + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := a[i+63:i] BITWISE OR b[i+63:i] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Logical + + + + + Compute the bitwise OR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] BITWISE OR b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Logical + + + + Compute the bitwise OR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] BITWISE OR b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Logical + + + + + Compute the bitwise OR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] BITWISE OR b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Logical + + + + Compute the bitwise OR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] BITWISE OR b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Logical + + + + + Compute the bitwise OR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] BITWISE OR b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Logical + + + + Compute the bitwise OR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] BITWISE OR b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Logical + + + Compute the bitwise OR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst". + + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := a[i+31:i] BITWISE OR b[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Logical + + + + + Compute the bitwise OR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] BITWISE OR b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Logical + + + + Compute the bitwise OR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] BITWISE OR b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Compute the absolute value of packed 8-bit integers in "a", and store the unsigned results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := ABS(a[i+7:i]) + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + Compute the absolute value of packed 8-bit integers in "a", and store the unsigned results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := ABS(a[i+7:i]) + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + Compute the absolute value of packed 8-bit integers in "a", and store the unsigned results in "dst". + +FOR j := 0 to 63 + i := j*8 + dst[i+7:i] := ABS(a[i+7:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + Compute the absolute value of packed 8-bit integers in "a", and store the unsigned results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := ABS(a[i+7:i]) + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + Compute the absolute value of packed 8-bit integers in "a", and store the unsigned results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := ABS(a[i+7:i]) + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Compute the absolute value of packed 8-bit integers in "a", and store the unsigned results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := ABS(a[i+7:i]) + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + Compute the absolute value of packed 8-bit integers in "a", and store the unsigned results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := ABS(a[i+7:i]) + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + Compute the absolute value of packed 32-bit integers in "a", and store the unsigned results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := ABS(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + Compute the absolute value of packed 32-bit integers in "a", and store the unsigned results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := ABS(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + Compute the absolute value of packed 32-bit integers in "a", and store the unsigned results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := ABS(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + Compute the absolute value of packed 32-bit integers in "a", and store the unsigned results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := ABS(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + Compute the absolute value of packed 64-bit integers in "a", and store the unsigned results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := ABS(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + Compute the absolute value of packed 64-bit integers in "a", and store the unsigned results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := ABS(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + Compute the absolute value of packed 64-bit integers in "a", and store the unsigned results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := ABS(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + Compute the absolute value of packed 64-bit integers in "a", and store the unsigned results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := ABS(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + Compute the absolute value of packed 64-bit integers in "a", and store the unsigned results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := ABS(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + Compute the absolute value of packed 64-bit integers in "a", and store the unsigned results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := ABS(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Compute the absolute value of packed 16-bit integers in "a", and store the unsigned results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := ABS(a[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + Compute the absolute value of packed 16-bit integers in "a", and store the unsigned results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := ABS(a[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + Compute the absolute value of packed 16-bit integers in "a", and store the unsigned results in "dst". + +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := ABS(a[i+15:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + Compute the absolute value of packed 16-bit integers in "a", and store the unsigned results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := ABS(a[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + Compute the absolute value of packed 16-bit integers in "a", and store the unsigned results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := ABS(a[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Compute the absolute value of packed 16-bit integers in "a", and store the unsigned results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := ABS(a[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + Compute the absolute value of packed 16-bit integers in "a", and store the unsigned results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := ABS(a[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Convert + Miscellaneous + + + + + Convert packed 32-bit integers from "a" and "b" to packed 16-bit integers using signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp_dst[15:0] := Saturate_Int32_To_Int16 (a[31:0]) +tmp_dst[31:16] := Saturate_Int32_To_Int16 (a[63:32]) +tmp_dst[47:32] := Saturate_Int32_To_Int16 (a[95:64]) +tmp_dst[63:48] := Saturate_Int32_To_Int16 (a[127:96]) +tmp_dst[79:64] := Saturate_Int32_To_Int16 (b[31:0]) +tmp_dst[95:80] := Saturate_Int32_To_Int16 (b[63:32]) +tmp_dst[111:96] := Saturate_Int32_To_Int16 (b[95:64]) +tmp_dst[127:112] := Saturate_Int32_To_Int16 (b[127:96]) +tmp_dst[143:128] := Saturate_Int32_To_Int16 (a[159:128]) +tmp_dst[159:144] := Saturate_Int32_To_Int16 (a[191:160]) +tmp_dst[175:160] := Saturate_Int32_To_Int16 (a[223:192]) +tmp_dst[191:176] := Saturate_Int32_To_Int16 (a[255:224]) +tmp_dst[207:192] := Saturate_Int32_To_Int16 (b[159:128]) +tmp_dst[223:208] := Saturate_Int32_To_Int16 (b[191:160]) +tmp_dst[239:224] := Saturate_Int32_To_Int16 (b[223:192]) +tmp_dst[255:240] := Saturate_Int32_To_Int16 (b[255:224]) + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Convert + Miscellaneous + + + + Convert packed 32-bit integers from "a" and "b" to packed 16-bit integers using signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +tmp_dst[15:0] := Saturate_Int32_To_Int16 (a[31:0]) +tmp_dst[31:16] := Saturate_Int32_To_Int16 (a[63:32]) +tmp_dst[47:32] := Saturate_Int32_To_Int16 (a[95:64]) +tmp_dst[63:48] := Saturate_Int32_To_Int16 (a[127:96]) +tmp_dst[79:64] := Saturate_Int32_To_Int16 (b[31:0]) +tmp_dst[95:80] := Saturate_Int32_To_Int16 (b[63:32]) +tmp_dst[111:96] := Saturate_Int32_To_Int16 (b[95:64]) +tmp_dst[127:112] := Saturate_Int32_To_Int16 (b[127:96]) +tmp_dst[143:128] := Saturate_Int32_To_Int16 (a[159:128]) +tmp_dst[159:144] := Saturate_Int32_To_Int16 (a[191:160]) +tmp_dst[175:160] := Saturate_Int32_To_Int16 (a[223:192]) +tmp_dst[191:176] := Saturate_Int32_To_Int16 (a[255:224]) +tmp_dst[207:192] := Saturate_Int32_To_Int16 (b[159:128]) +tmp_dst[223:208] := Saturate_Int32_To_Int16 (b[191:160]) +tmp_dst[239:224] := Saturate_Int32_To_Int16 (b[223:192]) +tmp_dst[255:240] := Saturate_Int32_To_Int16 (b[255:224]) + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Convert + Miscellaneous + + + + + Convert packed 32-bit integers from "a" and "b" to packed 16-bit integers using signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp_dst[15:0] := Saturate_Int32_To_Int16 (a[31:0]) +tmp_dst[31:16] := Saturate_Int32_To_Int16 (a[63:32]) +tmp_dst[47:32] := Saturate_Int32_To_Int16 (a[95:64]) +tmp_dst[63:48] := Saturate_Int32_To_Int16 (a[127:96]) +tmp_dst[79:64] := Saturate_Int32_To_Int16 (b[31:0]) +tmp_dst[95:80] := Saturate_Int32_To_Int16 (b[63:32]) +tmp_dst[111:96] := Saturate_Int32_To_Int16 (b[95:64]) +tmp_dst[127:112] := Saturate_Int32_To_Int16 (b[127:96]) +tmp_dst[143:128] := Saturate_Int32_To_Int16 (a[159:128]) +tmp_dst[159:144] := Saturate_Int32_To_Int16 (a[191:160]) +tmp_dst[175:160] := Saturate_Int32_To_Int16 (a[223:192]) +tmp_dst[191:176] := Saturate_Int32_To_Int16 (a[255:224]) +tmp_dst[207:192] := Saturate_Int32_To_Int16 (b[159:128]) +tmp_dst[223:208] := Saturate_Int32_To_Int16 (b[191:160]) +tmp_dst[239:224] := Saturate_Int32_To_Int16 (b[223:192]) +tmp_dst[255:240] := Saturate_Int32_To_Int16 (b[255:224]) +tmp_dst[271:256] := Saturate_Int32_To_Int16 (a[287:256]) +tmp_dst[287:272] := Saturate_Int32_To_Int16 (a[319:288]) +tmp_dst[303:288] := Saturate_Int32_To_Int16 (a[351:320]) +tmp_dst[319:304] := Saturate_Int32_To_Int16 (a[383:352]) +tmp_dst[335:320] := Saturate_Int32_To_Int16 (b[287:256]) +tmp_dst[351:336] := Saturate_Int32_To_Int16 (b[319:288]) +tmp_dst[367:352] := Saturate_Int32_To_Int16 (b[351:320]) +tmp_dst[383:368] := Saturate_Int32_To_Int16 (b[383:352]) +tmp_dst[399:384] := Saturate_Int32_To_Int16 (a[415:384]) +tmp_dst[415:400] := Saturate_Int32_To_Int16 (a[447:416]) +tmp_dst[431:416] := Saturate_Int32_To_Int16 (a[479:448]) +tmp_dst[447:432] := Saturate_Int32_To_Int16 (a[511:480]) +tmp_dst[463:448] := Saturate_Int32_To_Int16 (b[415:384]) +tmp_dst[479:464] := Saturate_Int32_To_Int16 (b[447:416]) +tmp_dst[495:480] := Saturate_Int32_To_Int16 (b[479:448]) +tmp_dst[511:496] := Saturate_Int32_To_Int16 (b[511:480]) + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Convert + Miscellaneous + + + + Convert packed 32-bit integers from "a" and "b" to packed 16-bit integers using signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +tmp_dst[15:0] := Saturate_Int32_To_Int16 (a[31:0]) +tmp_dst[31:16] := Saturate_Int32_To_Int16 (a[63:32]) +tmp_dst[47:32] := Saturate_Int32_To_Int16 (a[95:64]) +tmp_dst[63:48] := Saturate_Int32_To_Int16 (a[127:96]) +tmp_dst[79:64] := Saturate_Int32_To_Int16 (b[31:0]) +tmp_dst[95:80] := Saturate_Int32_To_Int16 (b[63:32]) +tmp_dst[111:96] := Saturate_Int32_To_Int16 (b[95:64]) +tmp_dst[127:112] := Saturate_Int32_To_Int16 (b[127:96]) +tmp_dst[143:128] := Saturate_Int32_To_Int16 (a[159:128]) +tmp_dst[159:144] := Saturate_Int32_To_Int16 (a[191:160]) +tmp_dst[175:160] := Saturate_Int32_To_Int16 (a[223:192]) +tmp_dst[191:176] := Saturate_Int32_To_Int16 (a[255:224]) +tmp_dst[207:192] := Saturate_Int32_To_Int16 (b[159:128]) +tmp_dst[223:208] := Saturate_Int32_To_Int16 (b[191:160]) +tmp_dst[239:224] := Saturate_Int32_To_Int16 (b[223:192]) +tmp_dst[255:240] := Saturate_Int32_To_Int16 (b[255:224]) +tmp_dst[271:256] := Saturate_Int32_To_Int16 (a[287:256]) +tmp_dst[287:272] := Saturate_Int32_To_Int16 (a[319:288]) +tmp_dst[303:288] := Saturate_Int32_To_Int16 (a[351:320]) +tmp_dst[319:304] := Saturate_Int32_To_Int16 (a[383:352]) +tmp_dst[335:320] := Saturate_Int32_To_Int16 (b[287:256]) +tmp_dst[351:336] := Saturate_Int32_To_Int16 (b[319:288]) +tmp_dst[367:352] := Saturate_Int32_To_Int16 (b[351:320]) +tmp_dst[383:368] := Saturate_Int32_To_Int16 (b[383:352]) +tmp_dst[399:384] := Saturate_Int32_To_Int16 (a[415:384]) +tmp_dst[415:400] := Saturate_Int32_To_Int16 (a[447:416]) +tmp_dst[431:416] := Saturate_Int32_To_Int16 (a[479:448]) +tmp_dst[447:432] := Saturate_Int32_To_Int16 (a[511:480]) +tmp_dst[463:448] := Saturate_Int32_To_Int16 (b[415:384]) +tmp_dst[479:464] := Saturate_Int32_To_Int16 (b[447:416]) +tmp_dst[495:480] := Saturate_Int32_To_Int16 (b[479:448]) +tmp_dst[511:496] := Saturate_Int32_To_Int16 (b[511:480]) + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Convert + Miscellaneous + + + Convert packed 32-bit integers from "a" and "b" to packed 16-bit integers using signed saturation, and store the results in "dst". + +dst[15:0] := Saturate_Int32_To_Int16 (a[31:0]) +dst[31:16] := Saturate_Int32_To_Int16 (a[63:32]) +dst[47:32] := Saturate_Int32_To_Int16 (a[95:64]) +dst[63:48] := Saturate_Int32_To_Int16 (a[127:96]) +dst[79:64] := Saturate_Int32_To_Int16 (b[31:0]) +dst[95:80] := Saturate_Int32_To_Int16 (b[63:32]) +dst[111:96] := Saturate_Int32_To_Int16 (b[95:64]) +dst[127:112] := Saturate_Int32_To_Int16 (b[127:96]) +dst[143:128] := Saturate_Int32_To_Int16 (a[159:128]) +dst[159:144] := Saturate_Int32_To_Int16 (a[191:160]) +dst[175:160] := Saturate_Int32_To_Int16 (a[223:192]) +dst[191:176] := Saturate_Int32_To_Int16 (a[255:224]) +dst[207:192] := Saturate_Int32_To_Int16 (b[159:128]) +dst[223:208] := Saturate_Int32_To_Int16 (b[191:160]) +dst[239:224] := Saturate_Int32_To_Int16 (b[223:192]) +dst[255:240] := Saturate_Int32_To_Int16 (b[255:224]) +dst[271:256] := Saturate_Int32_To_Int16 (a[287:256]) +dst[287:272] := Saturate_Int32_To_Int16 (a[319:288]) +dst[303:288] := Saturate_Int32_To_Int16 (a[351:320]) +dst[319:304] := Saturate_Int32_To_Int16 (a[383:352]) +dst[335:320] := Saturate_Int32_To_Int16 (b[287:256]) +dst[351:336] := Saturate_Int32_To_Int16 (b[319:288]) +dst[367:352] := Saturate_Int32_To_Int16 (b[351:320]) +dst[383:368] := Saturate_Int32_To_Int16 (b[383:352]) +dst[399:384] := Saturate_Int32_To_Int16 (a[415:384]) +dst[415:400] := Saturate_Int32_To_Int16 (a[447:416]) +dst[431:416] := Saturate_Int32_To_Int16 (a[479:448]) +dst[447:432] := Saturate_Int32_To_Int16 (a[511:480]) +dst[463:448] := Saturate_Int32_To_Int16 (b[415:384]) +dst[479:464] := Saturate_Int32_To_Int16 (b[447:416]) +dst[495:480] := Saturate_Int32_To_Int16 (b[479:448]) +dst[511:496] := Saturate_Int32_To_Int16 (b[511:480]) +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Convert + Miscellaneous + + + + + Convert packed 32-bit integers from "a" and "b" to packed 16-bit integers using signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp_dst[15:0] := Saturate_Int32_To_Int16 (a[31:0]) +tmp_dst[31:16] := Saturate_Int32_To_Int16 (a[63:32]) +tmp_dst[47:32] := Saturate_Int32_To_Int16 (a[95:64]) +tmp_dst[63:48] := Saturate_Int32_To_Int16 (a[127:96]) +tmp_dst[79:64] := Saturate_Int32_To_Int16 (b[31:0]) +tmp_dst[95:80] := Saturate_Int32_To_Int16 (b[63:32]) +tmp_dst[111:96] := Saturate_Int32_To_Int16 (b[95:64]) +tmp_dst[127:112] := Saturate_Int32_To_Int16 (b[127:96]) + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Convert + Miscellaneous + + + + Convert packed 32-bit integers from "a" and "b" to packed 16-bit integers using signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +tmp_dst[15:0] := Saturate_Int32_To_Int16 (a[31:0]) +tmp_dst[31:16] := Saturate_Int32_To_Int16 (a[63:32]) +tmp_dst[47:32] := Saturate_Int32_To_Int16 (a[95:64]) +tmp_dst[63:48] := Saturate_Int32_To_Int16 (a[127:96]) +tmp_dst[79:64] := Saturate_Int32_To_Int16 (b[31:0]) +tmp_dst[95:80] := Saturate_Int32_To_Int16 (b[63:32]) +tmp_dst[111:96] := Saturate_Int32_To_Int16 (b[95:64]) +tmp_dst[127:112] := Saturate_Int32_To_Int16 (b[127:96]) + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Convert + Miscellaneous + + + + + Convert packed 16-bit integers from "a" and "b" to packed 8-bit integers using signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp_dst[7:0] := Saturate_Int16_To_Int8 (a[15:0]) +tmp_dst[15:8] := Saturate_Int16_To_Int8 (a[31:16]) +tmp_dst[23:16] := Saturate_Int16_To_Int8 (a[47:32]) +tmp_dst[31:24] := Saturate_Int16_To_Int8 (a[63:48]) +tmp_dst[39:32] := Saturate_Int16_To_Int8 (a[79:64]) +tmp_dst[47:40] := Saturate_Int16_To_Int8 (a[95:80]) +tmp_dst[55:48] := Saturate_Int16_To_Int8 (a[111:96]) +tmp_dst[63:56] := Saturate_Int16_To_Int8 (a[127:112]) +tmp_dst[71:64] := Saturate_Int16_To_Int8 (b[15:0]) +tmp_dst[79:72] := Saturate_Int16_To_Int8 (b[31:16]) +tmp_dst[87:80] := Saturate_Int16_To_Int8 (b[47:32]) +tmp_dst[95:88] := Saturate_Int16_To_Int8 (b[63:48]) +tmp_dst[103:96] := Saturate_Int16_To_Int8 (b[79:64]) +tmp_dst[111:104] := Saturate_Int16_To_Int8 (b[95:80]) +tmp_dst[119:112] := Saturate_Int16_To_Int8 (b[111:96]) +tmp_dst[127:120] := Saturate_Int16_To_Int8 (b[127:112]) +tmp_dst[135:128] := Saturate_Int16_To_Int8 (a[143:128]) +tmp_dst[143:136] := Saturate_Int16_To_Int8 (a[159:144]) +tmp_dst[151:144] := Saturate_Int16_To_Int8 (a[175:160]) +tmp_dst[159:152] := Saturate_Int16_To_Int8 (a[191:176]) +tmp_dst[167:160] := Saturate_Int16_To_Int8 (a[207:192]) +tmp_dst[175:168] := Saturate_Int16_To_Int8 (a[223:208]) +tmp_dst[183:176] := Saturate_Int16_To_Int8 (a[239:224]) +tmp_dst[191:184] := Saturate_Int16_To_Int8 (a[255:240]) +tmp_dst[199:192] := Saturate_Int16_To_Int8 (b[143:128]) +tmp_dst[207:200] := Saturate_Int16_To_Int8 (b[159:144]) +tmp_dst[215:208] := Saturate_Int16_To_Int8 (b[175:160]) +tmp_dst[223:216] := Saturate_Int16_To_Int8 (b[191:176]) +tmp_dst[231:224] := Saturate_Int16_To_Int8 (b[207:192]) +tmp_dst[239:232] := Saturate_Int16_To_Int8 (b[223:208]) +tmp_dst[247:240] := Saturate_Int16_To_Int8 (b[239:224]) +tmp_dst[255:248] := Saturate_Int16_To_Int8 (b[255:240]) + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Convert + Miscellaneous + + + + Convert packed 16-bit integers from "a" and "b" to packed 8-bit integers using signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +tmp_dst[7:0] := Saturate_Int16_To_Int8 (a[15:0]) +tmp_dst[15:8] := Saturate_Int16_To_Int8 (a[31:16]) +tmp_dst[23:16] := Saturate_Int16_To_Int8 (a[47:32]) +tmp_dst[31:24] := Saturate_Int16_To_Int8 (a[63:48]) +tmp_dst[39:32] := Saturate_Int16_To_Int8 (a[79:64]) +tmp_dst[47:40] := Saturate_Int16_To_Int8 (a[95:80]) +tmp_dst[55:48] := Saturate_Int16_To_Int8 (a[111:96]) +tmp_dst[63:56] := Saturate_Int16_To_Int8 (a[127:112]) +tmp_dst[71:64] := Saturate_Int16_To_Int8 (b[15:0]) +tmp_dst[79:72] := Saturate_Int16_To_Int8 (b[31:16]) +tmp_dst[87:80] := Saturate_Int16_To_Int8 (b[47:32]) +tmp_dst[95:88] := Saturate_Int16_To_Int8 (b[63:48]) +tmp_dst[103:96] := Saturate_Int16_To_Int8 (b[79:64]) +tmp_dst[111:104] := Saturate_Int16_To_Int8 (b[95:80]) +tmp_dst[119:112] := Saturate_Int16_To_Int8 (b[111:96]) +tmp_dst[127:120] := Saturate_Int16_To_Int8 (b[127:112]) +tmp_dst[135:128] := Saturate_Int16_To_Int8 (a[143:128]) +tmp_dst[143:136] := Saturate_Int16_To_Int8 (a[159:144]) +tmp_dst[151:144] := Saturate_Int16_To_Int8 (a[175:160]) +tmp_dst[159:152] := Saturate_Int16_To_Int8 (a[191:176]) +tmp_dst[167:160] := Saturate_Int16_To_Int8 (a[207:192]) +tmp_dst[175:168] := Saturate_Int16_To_Int8 (a[223:208]) +tmp_dst[183:176] := Saturate_Int16_To_Int8 (a[239:224]) +tmp_dst[191:184] := Saturate_Int16_To_Int8 (a[255:240]) +tmp_dst[199:192] := Saturate_Int16_To_Int8 (b[143:128]) +tmp_dst[207:200] := Saturate_Int16_To_Int8 (b[159:144]) +tmp_dst[215:208] := Saturate_Int16_To_Int8 (b[175:160]) +tmp_dst[223:216] := Saturate_Int16_To_Int8 (b[191:176]) +tmp_dst[231:224] := Saturate_Int16_To_Int8 (b[207:192]) +tmp_dst[239:232] := Saturate_Int16_To_Int8 (b[223:208]) +tmp_dst[247:240] := Saturate_Int16_To_Int8 (b[239:224]) +tmp_dst[255:248] := Saturate_Int16_To_Int8 (b[255:240]) + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Convert + Miscellaneous + + + + + Convert packed 16-bit integers from "a" and "b" to packed 8-bit integers using signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp_dst[7:0] := Saturate_Int16_To_Int8 (a[15:0]) +tmp_dst[15:8] := Saturate_Int16_To_Int8 (a[31:16]) +tmp_dst[23:16] := Saturate_Int16_To_Int8 (a[47:32]) +tmp_dst[31:24] := Saturate_Int16_To_Int8 (a[63:48]) +tmp_dst[39:32] := Saturate_Int16_To_Int8 (a[79:64]) +tmp_dst[47:40] := Saturate_Int16_To_Int8 (a[95:80]) +tmp_dst[55:48] := Saturate_Int16_To_Int8 (a[111:96]) +tmp_dst[63:56] := Saturate_Int16_To_Int8 (a[127:112]) +tmp_dst[71:64] := Saturate_Int16_To_Int8 (b[15:0]) +tmp_dst[79:72] := Saturate_Int16_To_Int8 (b[31:16]) +tmp_dst[87:80] := Saturate_Int16_To_Int8 (b[47:32]) +tmp_dst[95:88] := Saturate_Int16_To_Int8 (b[63:48]) +tmp_dst[103:96] := Saturate_Int16_To_Int8 (b[79:64]) +tmp_dst[111:104] := Saturate_Int16_To_Int8 (b[95:80]) +tmp_dst[119:112] := Saturate_Int16_To_Int8 (b[111:96]) +tmp_dst[127:120] := Saturate_Int16_To_Int8 (b[127:112]) +tmp_dst[135:128] := Saturate_Int16_To_Int8 (a[143:128]) +tmp_dst[143:136] := Saturate_Int16_To_Int8 (a[159:144]) +tmp_dst[151:144] := Saturate_Int16_To_Int8 (a[175:160]) +tmp_dst[159:152] := Saturate_Int16_To_Int8 (a[191:176]) +tmp_dst[167:160] := Saturate_Int16_To_Int8 (a[207:192]) +tmp_dst[175:168] := Saturate_Int16_To_Int8 (a[223:208]) +tmp_dst[183:176] := Saturate_Int16_To_Int8 (a[239:224]) +tmp_dst[191:184] := Saturate_Int16_To_Int8 (a[255:240]) +tmp_dst[199:192] := Saturate_Int16_To_Int8 (b[143:128]) +tmp_dst[207:200] := Saturate_Int16_To_Int8 (b[159:144]) +tmp_dst[215:208] := Saturate_Int16_To_Int8 (b[175:160]) +tmp_dst[223:216] := Saturate_Int16_To_Int8 (b[191:176]) +tmp_dst[231:224] := Saturate_Int16_To_Int8 (b[207:192]) +tmp_dst[239:232] := Saturate_Int16_To_Int8 (b[223:208]) +tmp_dst[247:240] := Saturate_Int16_To_Int8 (b[239:224]) +tmp_dst[255:248] := Saturate_Int16_To_Int8 (b[255:240]) +tmp_dst[263:256] := Saturate_Int16_To_Int8 (a[271:256]) +tmp_dst[271:264] := Saturate_Int16_To_Int8 (a[287:272]) +tmp_dst[279:272] := Saturate_Int16_To_Int8 (a[303:288]) +tmp_dst[287:280] := Saturate_Int16_To_Int8 (a[319:304]) +tmp_dst[295:288] := Saturate_Int16_To_Int8 (a[335:320]) +tmp_dst[303:296] := Saturate_Int16_To_Int8 (a[351:336]) +tmp_dst[311:304] := Saturate_Int16_To_Int8 (a[367:352]) +tmp_dst[319:312] := Saturate_Int16_To_Int8 (a[383:368]) +tmp_dst[327:320] := Saturate_Int16_To_Int8 (b[271:256]) +tmp_dst[335:328] := Saturate_Int16_To_Int8 (b[287:272]) +tmp_dst[343:336] := Saturate_Int16_To_Int8 (b[303:288]) +tmp_dst[351:344] := Saturate_Int16_To_Int8 (b[319:304]) +tmp_dst[359:352] := Saturate_Int16_To_Int8 (b[335:320]) +tmp_dst[367:360] := Saturate_Int16_To_Int8 (b[351:336]) +tmp_dst[375:368] := Saturate_Int16_To_Int8 (b[367:352]) +tmp_dst[383:376] := Saturate_Int16_To_Int8 (b[383:368]) +tmp_dst[391:384] := Saturate_Int16_To_Int8 (a[399:384]) +tmp_dst[399:392] := Saturate_Int16_To_Int8 (a[415:400]) +tmp_dst[407:400] := Saturate_Int16_To_Int8 (a[431:416]) +tmp_dst[415:408] := Saturate_Int16_To_Int8 (a[447:432]) +tmp_dst[423:416] := Saturate_Int16_To_Int8 (a[463:448]) +tmp_dst[431:424] := Saturate_Int16_To_Int8 (a[479:464]) +tmp_dst[439:432] := Saturate_Int16_To_Int8 (a[495:480]) +tmp_dst[447:440] := Saturate_Int16_To_Int8 (a[511:496]) +tmp_dst[455:448] := Saturate_Int16_To_Int8 (b[399:384]) +tmp_dst[463:456] := Saturate_Int16_To_Int8 (b[415:400]) +tmp_dst[471:464] := Saturate_Int16_To_Int8 (b[431:416]) +tmp_dst[479:472] := Saturate_Int16_To_Int8 (b[447:432]) +tmp_dst[487:480] := Saturate_Int16_To_Int8 (b[463:448]) +tmp_dst[495:488] := Saturate_Int16_To_Int8 (b[479:464]) +tmp_dst[503:496] := Saturate_Int16_To_Int8 (b[495:480]) +tmp_dst[511:504] := Saturate_Int16_To_Int8 (b[511:496]) + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Convert + Miscellaneous + + + + Convert packed 16-bit integers from "a" and "b" to packed 8-bit integers using signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +tmp_dst[7:0] := Saturate_Int16_To_Int8 (a[15:0]) +tmp_dst[15:8] := Saturate_Int16_To_Int8 (a[31:16]) +tmp_dst[23:16] := Saturate_Int16_To_Int8 (a[47:32]) +tmp_dst[31:24] := Saturate_Int16_To_Int8 (a[63:48]) +tmp_dst[39:32] := Saturate_Int16_To_Int8 (a[79:64]) +tmp_dst[47:40] := Saturate_Int16_To_Int8 (a[95:80]) +tmp_dst[55:48] := Saturate_Int16_To_Int8 (a[111:96]) +tmp_dst[63:56] := Saturate_Int16_To_Int8 (a[127:112]) +tmp_dst[71:64] := Saturate_Int16_To_Int8 (b[15:0]) +tmp_dst[79:72] := Saturate_Int16_To_Int8 (b[31:16]) +tmp_dst[87:80] := Saturate_Int16_To_Int8 (b[47:32]) +tmp_dst[95:88] := Saturate_Int16_To_Int8 (b[63:48]) +tmp_dst[103:96] := Saturate_Int16_To_Int8 (b[79:64]) +tmp_dst[111:104] := Saturate_Int16_To_Int8 (b[95:80]) +tmp_dst[119:112] := Saturate_Int16_To_Int8 (b[111:96]) +tmp_dst[127:120] := Saturate_Int16_To_Int8 (b[127:112]) +tmp_dst[135:128] := Saturate_Int16_To_Int8 (a[143:128]) +tmp_dst[143:136] := Saturate_Int16_To_Int8 (a[159:144]) +tmp_dst[151:144] := Saturate_Int16_To_Int8 (a[175:160]) +tmp_dst[159:152] := Saturate_Int16_To_Int8 (a[191:176]) +tmp_dst[167:160] := Saturate_Int16_To_Int8 (a[207:192]) +tmp_dst[175:168] := Saturate_Int16_To_Int8 (a[223:208]) +tmp_dst[183:176] := Saturate_Int16_To_Int8 (a[239:224]) +tmp_dst[191:184] := Saturate_Int16_To_Int8 (a[255:240]) +tmp_dst[199:192] := Saturate_Int16_To_Int8 (b[143:128]) +tmp_dst[207:200] := Saturate_Int16_To_Int8 (b[159:144]) +tmp_dst[215:208] := Saturate_Int16_To_Int8 (b[175:160]) +tmp_dst[223:216] := Saturate_Int16_To_Int8 (b[191:176]) +tmp_dst[231:224] := Saturate_Int16_To_Int8 (b[207:192]) +tmp_dst[239:232] := Saturate_Int16_To_Int8 (b[223:208]) +tmp_dst[247:240] := Saturate_Int16_To_Int8 (b[239:224]) +tmp_dst[255:248] := Saturate_Int16_To_Int8 (b[255:240]) +tmp_dst[263:256] := Saturate_Int16_To_Int8 (a[271:256]) +tmp_dst[271:264] := Saturate_Int16_To_Int8 (a[287:272]) +tmp_dst[279:272] := Saturate_Int16_To_Int8 (a[303:288]) +tmp_dst[287:280] := Saturate_Int16_To_Int8 (a[319:304]) +tmp_dst[295:288] := Saturate_Int16_To_Int8 (a[335:320]) +tmp_dst[303:296] := Saturate_Int16_To_Int8 (a[351:336]) +tmp_dst[311:304] := Saturate_Int16_To_Int8 (a[367:352]) +tmp_dst[319:312] := Saturate_Int16_To_Int8 (a[383:368]) +tmp_dst[327:320] := Saturate_Int16_To_Int8 (b[271:256]) +tmp_dst[335:328] := Saturate_Int16_To_Int8 (b[287:272]) +tmp_dst[343:336] := Saturate_Int16_To_Int8 (b[303:288]) +tmp_dst[351:344] := Saturate_Int16_To_Int8 (b[319:304]) +tmp_dst[359:352] := Saturate_Int16_To_Int8 (b[335:320]) +tmp_dst[367:360] := Saturate_Int16_To_Int8 (b[351:336]) +tmp_dst[375:368] := Saturate_Int16_To_Int8 (b[367:352]) +tmp_dst[383:376] := Saturate_Int16_To_Int8 (b[383:368]) +tmp_dst[391:384] := Saturate_Int16_To_Int8 (a[399:384]) +tmp_dst[399:392] := Saturate_Int16_To_Int8 (a[415:400]) +tmp_dst[407:400] := Saturate_Int16_To_Int8 (a[431:416]) +tmp_dst[415:408] := Saturate_Int16_To_Int8 (a[447:432]) +tmp_dst[423:416] := Saturate_Int16_To_Int8 (a[463:448]) +tmp_dst[431:424] := Saturate_Int16_To_Int8 (a[479:464]) +tmp_dst[439:432] := Saturate_Int16_To_Int8 (a[495:480]) +tmp_dst[447:440] := Saturate_Int16_To_Int8 (a[511:496]) +tmp_dst[455:448] := Saturate_Int16_To_Int8 (b[399:384]) +tmp_dst[463:456] := Saturate_Int16_To_Int8 (b[415:400]) +tmp_dst[471:464] := Saturate_Int16_To_Int8 (b[431:416]) +tmp_dst[479:472] := Saturate_Int16_To_Int8 (b[447:432]) +tmp_dst[487:480] := Saturate_Int16_To_Int8 (b[463:448]) +tmp_dst[495:488] := Saturate_Int16_To_Int8 (b[479:464]) +tmp_dst[503:496] := Saturate_Int16_To_Int8 (b[495:480]) +tmp_dst[511:504] := Saturate_Int16_To_Int8 (b[511:496]) + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Convert + Miscellaneous + + + Convert packed 16-bit integers from "a" and "b" to packed 8-bit integers using signed saturation, and store the results in "dst". + + +dst[7:0] := Saturate_Int16_To_Int8 (a[15:0]) +dst[15:8] := Saturate_Int16_To_Int8 (a[31:16]) +dst[23:16] := Saturate_Int16_To_Int8 (a[47:32]) +dst[31:24] := Saturate_Int16_To_Int8 (a[63:48]) +dst[39:32] := Saturate_Int16_To_Int8 (a[79:64]) +dst[47:40] := Saturate_Int16_To_Int8 (a[95:80]) +dst[55:48] := Saturate_Int16_To_Int8 (a[111:96]) +dst[63:56] := Saturate_Int16_To_Int8 (a[127:112]) +dst[71:64] := Saturate_Int16_To_Int8 (b[15:0]) +dst[79:72] := Saturate_Int16_To_Int8 (b[31:16]) +dst[87:80] := Saturate_Int16_To_Int8 (b[47:32]) +dst[95:88] := Saturate_Int16_To_Int8 (b[63:48]) +dst[103:96] := Saturate_Int16_To_Int8 (b[79:64]) +dst[111:104] := Saturate_Int16_To_Int8 (b[95:80]) +dst[119:112] := Saturate_Int16_To_Int8 (b[111:96]) +dst[127:120] := Saturate_Int16_To_Int8 (b[127:112]) +dst[135:128] := Saturate_Int16_To_Int8 (a[143:128]) +dst[143:136] := Saturate_Int16_To_Int8 (a[159:144]) +dst[151:144] := Saturate_Int16_To_Int8 (a[175:160]) +dst[159:152] := Saturate_Int16_To_Int8 (a[191:176]) +dst[167:160] := Saturate_Int16_To_Int8 (a[207:192]) +dst[175:168] := Saturate_Int16_To_Int8 (a[223:208]) +dst[183:176] := Saturate_Int16_To_Int8 (a[239:224]) +dst[191:184] := Saturate_Int16_To_Int8 (a[255:240]) +dst[199:192] := Saturate_Int16_To_Int8 (b[143:128]) +dst[207:200] := Saturate_Int16_To_Int8 (b[159:144]) +dst[215:208] := Saturate_Int16_To_Int8 (b[175:160]) +dst[223:216] := Saturate_Int16_To_Int8 (b[191:176]) +dst[231:224] := Saturate_Int16_To_Int8 (b[207:192]) +dst[239:232] := Saturate_Int16_To_Int8 (b[223:208]) +dst[247:240] := Saturate_Int16_To_Int8 (b[239:224]) +dst[255:248] := Saturate_Int16_To_Int8 (b[255:240]) +dst[263:256] := Saturate_Int16_To_Int8 (a[271:256]) +dst[271:264] := Saturate_Int16_To_Int8 (a[287:272]) +dst[279:272] := Saturate_Int16_To_Int8 (a[303:288]) +dst[287:280] := Saturate_Int16_To_Int8 (a[319:304]) +dst[295:288] := Saturate_Int16_To_Int8 (a[335:320]) +dst[303:296] := Saturate_Int16_To_Int8 (a[351:336]) +dst[311:304] := Saturate_Int16_To_Int8 (a[367:352]) +dst[319:312] := Saturate_Int16_To_Int8 (a[383:368]) +dst[327:320] := Saturate_Int16_To_Int8 (b[271:256]) +dst[335:328] := Saturate_Int16_To_Int8 (b[287:272]) +dst[343:336] := Saturate_Int16_To_Int8 (b[303:288]) +dst[351:344] := Saturate_Int16_To_Int8 (b[319:304]) +dst[359:352] := Saturate_Int16_To_Int8 (b[335:320]) +dst[367:360] := Saturate_Int16_To_Int8 (b[351:336]) +dst[375:368] := Saturate_Int16_To_Int8 (b[367:352]) +dst[383:376] := Saturate_Int16_To_Int8 (b[383:368]) +dst[391:384] := Saturate_Int16_To_Int8 (a[399:384]) +dst[399:392] := Saturate_Int16_To_Int8 (a[415:400]) +dst[407:400] := Saturate_Int16_To_Int8 (a[431:416]) +dst[415:408] := Saturate_Int16_To_Int8 (a[447:432]) +dst[423:416] := Saturate_Int16_To_Int8 (a[463:448]) +dst[431:424] := Saturate_Int16_To_Int8 (a[479:464]) +dst[439:432] := Saturate_Int16_To_Int8 (a[495:480]) +dst[447:440] := Saturate_Int16_To_Int8 (a[511:496]) +dst[455:448] := Saturate_Int16_To_Int8 (b[399:384]) +dst[463:456] := Saturate_Int16_To_Int8 (b[415:400]) +dst[471:464] := Saturate_Int16_To_Int8 (b[431:416]) +dst[479:472] := Saturate_Int16_To_Int8 (b[447:432]) +dst[487:480] := Saturate_Int16_To_Int8 (b[463:448]) +dst[495:488] := Saturate_Int16_To_Int8 (b[479:464]) +dst[503:496] := Saturate_Int16_To_Int8 (b[495:480]) +dst[511:504] := Saturate_Int16_To_Int8 (b[511:496]) +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Convert + Miscellaneous + + + + + Convert packed 16-bit integers from "a" and "b" to packed 8-bit integers using signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp_dst[7:0] := Saturate_Int16_To_Int8 (a[15:0]) +tmp_dst[15:8] := Saturate_Int16_To_Int8 (a[31:16]) +tmp_dst[23:16] := Saturate_Int16_To_Int8 (a[47:32]) +tmp_dst[31:24] := Saturate_Int16_To_Int8 (a[63:48]) +tmp_dst[39:32] := Saturate_Int16_To_Int8 (a[79:64]) +tmp_dst[47:40] := Saturate_Int16_To_Int8 (a[95:80]) +tmp_dst[55:48] := Saturate_Int16_To_Int8 (a[111:96]) +tmp_dst[63:56] := Saturate_Int16_To_Int8 (a[127:112]) +tmp_dst[71:64] := Saturate_Int16_To_Int8 (b[15:0]) +tmp_dst[79:72] := Saturate_Int16_To_Int8 (b[31:16]) +tmp_dst[87:80] := Saturate_Int16_To_Int8 (b[47:32]) +tmp_dst[95:88] := Saturate_Int16_To_Int8 (b[63:48]) +tmp_dst[103:96] := Saturate_Int16_To_Int8 (b[79:64]) +tmp_dst[111:104] := Saturate_Int16_To_Int8 (b[95:80]) +tmp_dst[119:112] := Saturate_Int16_To_Int8 (b[111:96]) +tmp_dst[127:120] := Saturate_Int16_To_Int8 (b[127:112]) + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Convert + Miscellaneous + + + + Convert packed 16-bit integers from "a" and "b" to packed 8-bit integers using signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +tmp_dst[7:0] := Saturate_Int16_To_Int8 (a[15:0]) +tmp_dst[15:8] := Saturate_Int16_To_Int8 (a[31:16]) +tmp_dst[23:16] := Saturate_Int16_To_Int8 (a[47:32]) +tmp_dst[31:24] := Saturate_Int16_To_Int8 (a[63:48]) +tmp_dst[39:32] := Saturate_Int16_To_Int8 (a[79:64]) +tmp_dst[47:40] := Saturate_Int16_To_Int8 (a[95:80]) +tmp_dst[55:48] := Saturate_Int16_To_Int8 (a[111:96]) +tmp_dst[63:56] := Saturate_Int16_To_Int8 (a[127:112]) +tmp_dst[71:64] := Saturate_Int16_To_Int8 (b[15:0]) +tmp_dst[79:72] := Saturate_Int16_To_Int8 (b[31:16]) +tmp_dst[87:80] := Saturate_Int16_To_Int8 (b[47:32]) +tmp_dst[95:88] := Saturate_Int16_To_Int8 (b[63:48]) +tmp_dst[103:96] := Saturate_Int16_To_Int8 (b[79:64]) +tmp_dst[111:104] := Saturate_Int16_To_Int8 (b[95:80]) +tmp_dst[119:112] := Saturate_Int16_To_Int8 (b[111:96]) +tmp_dst[127:120] := Saturate_Int16_To_Int8 (b[127:112]) + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Convert + Miscellaneous + + + + + Convert packed 32-bit integers from "a" and "b" to packed 16-bit integers using unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp_dst[15:0] := Saturate_Int32_To_UnsignedInt16 (a[31:0]) +tmp_dst[31:16] := Saturate_Int32_To_UnsignedInt16 (a[63:32]) +tmp_dst[47:32] := Saturate_Int32_To_UnsignedInt16 (a[95:64]) +tmp_dst[63:48] := Saturate_Int32_To_UnsignedInt16 (a[127:96]) +tmp_dst[79:64] := Saturate_Int32_To_UnsignedInt16 (b[31:0]) +tmp_dst[95:80] := Saturate_Int32_To_UnsignedInt16 (b[63:32]) +tmp_dst[111:96] := Saturate_Int32_To_UnsignedInt16 (b[95:64]) +tmp_dst[127:112] := Saturate_Int32_To_UnsignedInt16 (b[127:96]) +tmp_dst[143:128] := Saturate_Int32_To_UnsignedInt16 (a[159:128]) +tmp_dst[159:144] := Saturate_Int32_To_UnsignedInt16 (a[191:160]) +tmp_dst[175:160] := Saturate_Int32_To_UnsignedInt16 (a[223:192]) +tmp_dst[191:176] := Saturate_Int32_To_UnsignedInt16 (a[255:224]) +tmp_dst[207:192] := Saturate_Int32_To_UnsignedInt16 (b[159:128]) +tmp_dst[223:208] := Saturate_Int32_To_UnsignedInt16 (b[191:160]) +tmp_dst[239:224] := Saturate_Int32_To_UnsignedInt16 (b[223:192]) +tmp_dst[255:240] := Saturate_Int32_To_UnsignedInt16 (b[255:224]) + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Convert + Miscellaneous + + + + Convert packed 32-bit integers from "a" and "b" to packed 16-bit integers using unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp_dst[15:0] := Saturate_Int32_To_UnsignedInt16 (a[31:0]) +tmp_dst[31:16] := Saturate_Int32_To_UnsignedInt16 (a[63:32]) +tmp_dst[47:32] := Saturate_Int32_To_UnsignedInt16 (a[95:64]) +tmp_dst[63:48] := Saturate_Int32_To_UnsignedInt16 (a[127:96]) +tmp_dst[79:64] := Saturate_Int32_To_UnsignedInt16 (b[31:0]) +tmp_dst[95:80] := Saturate_Int32_To_UnsignedInt16 (b[63:32]) +tmp_dst[111:96] := Saturate_Int32_To_UnsignedInt16 (b[95:64]) +tmp_dst[127:112] := Saturate_Int32_To_UnsignedInt16 (b[127:96]) +tmp_dst[143:128] := Saturate_Int32_To_UnsignedInt16 (a[159:128]) +tmp_dst[159:144] := Saturate_Int32_To_UnsignedInt16 (a[191:160]) +tmp_dst[175:160] := Saturate_Int32_To_UnsignedInt16 (a[223:192]) +tmp_dst[191:176] := Saturate_Int32_To_UnsignedInt16 (a[255:224]) +tmp_dst[207:192] := Saturate_Int32_To_UnsignedInt16 (b[159:128]) +tmp_dst[223:208] := Saturate_Int32_To_UnsignedInt16 (b[191:160]) +tmp_dst[239:224] := Saturate_Int32_To_UnsignedInt16 (b[223:192]) +tmp_dst[255:240] := Saturate_Int32_To_UnsignedInt16 (b[255:224]) + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Convert + Miscellaneous + + + + + Convert packed 32-bit integers from "a" and "b" to packed 16-bit integers using unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp_dst[15:0] := Saturate_Int32_To_UnsignedInt16 (a[31:0]) +tmp_dst[31:16] := Saturate_Int32_To_UnsignedInt16 (a[63:32]) +tmp_dst[47:32] := Saturate_Int32_To_UnsignedInt16 (a[95:64]) +tmp_dst[63:48] := Saturate_Int32_To_UnsignedInt16 (a[127:96]) +tmp_dst[79:64] := Saturate_Int32_To_UnsignedInt16 (b[31:0]) +tmp_dst[95:80] := Saturate_Int32_To_UnsignedInt16 (b[63:32]) +tmp_dst[111:96] := Saturate_Int32_To_UnsignedInt16 (b[95:64]) +tmp_dst[127:112] := Saturate_Int32_To_UnsignedInt16 (b[127:96]) +tmp_dst[143:128] := Saturate_Int32_To_UnsignedInt16 (a[159:128]) +tmp_dst[159:144] := Saturate_Int32_To_UnsignedInt16 (a[191:160]) +tmp_dst[175:160] := Saturate_Int32_To_UnsignedInt16 (a[223:192]) +tmp_dst[191:176] := Saturate_Int32_To_UnsignedInt16 (a[255:224]) +tmp_dst[207:192] := Saturate_Int32_To_UnsignedInt16 (b[159:128]) +tmp_dst[223:208] := Saturate_Int32_To_UnsignedInt16 (b[191:160]) +tmp_dst[239:224] := Saturate_Int32_To_UnsignedInt16 (b[223:192]) +tmp_dst[255:240] := Saturate_Int32_To_UnsignedInt16 (b[255:224]) +tmp_dst[271:256] := Saturate_Int32_To_UnsignedInt16 (a[287:256]) +tmp_dst[287:272] := Saturate_Int32_To_UnsignedInt16 (a[319:288]) +tmp_dst[303:288] := Saturate_Int32_To_UnsignedInt16 (a[351:320]) +tmp_dst[319:304] := Saturate_Int32_To_UnsignedInt16 (a[383:352]) +tmp_dst[335:320] := Saturate_Int32_To_UnsignedInt16 (b[287:256]) +tmp_dst[351:336] := Saturate_Int32_To_UnsignedInt16 (b[319:288]) +tmp_dst[367:352] := Saturate_Int32_To_UnsignedInt16 (b[351:320]) +tmp_dst[383:368] := Saturate_Int32_To_UnsignedInt16 (b[383:352]) +tmp_dst[399:384] := Saturate_Int32_To_UnsignedInt16 (a[415:384]) +tmp_dst[415:400] := Saturate_Int32_To_UnsignedInt16 (a[447:416]) +tmp_dst[431:416] := Saturate_Int32_To_UnsignedInt16 (a[479:448]) +tmp_dst[447:432] := Saturate_Int32_To_UnsignedInt16 (a[511:480]) +tmp_dst[463:448] := Saturate_Int32_To_UnsignedInt16 (b[415:384]) +tmp_dst[479:464] := Saturate_Int32_To_UnsignedInt16 (b[447:416]) +tmp_dst[495:480] := Saturate_Int32_To_UnsignedInt16 (b[479:448]) +tmp_dst[511:496] := Saturate_Int32_To_UnsignedInt16 (b[511:480]) + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Convert + Miscellaneous + + + + Convert packed 32-bit integers from "a" and "b" to packed 16-bit integers using unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp_dst[15:0] := Saturate_Int32_To_UnsignedInt16 (a[31:0]) +tmp_dst[31:16] := Saturate_Int32_To_UnsignedInt16 (a[63:32]) +tmp_dst[47:32] := Saturate_Int32_To_UnsignedInt16 (a[95:64]) +tmp_dst[63:48] := Saturate_Int32_To_UnsignedInt16 (a[127:96]) +tmp_dst[79:64] := Saturate_Int32_To_UnsignedInt16 (b[31:0]) +tmp_dst[95:80] := Saturate_Int32_To_UnsignedInt16 (b[63:32]) +tmp_dst[111:96] := Saturate_Int32_To_UnsignedInt16 (b[95:64]) +tmp_dst[127:112] := Saturate_Int32_To_UnsignedInt16 (b[127:96]) +tmp_dst[143:128] := Saturate_Int32_To_UnsignedInt16 (a[159:128]) +tmp_dst[159:144] := Saturate_Int32_To_UnsignedInt16 (a[191:160]) +tmp_dst[175:160] := Saturate_Int32_To_UnsignedInt16 (a[223:192]) +tmp_dst[191:176] := Saturate_Int32_To_UnsignedInt16 (a[255:224]) +tmp_dst[207:192] := Saturate_Int32_To_UnsignedInt16 (b[159:128]) +tmp_dst[223:208] := Saturate_Int32_To_UnsignedInt16 (b[191:160]) +tmp_dst[239:224] := Saturate_Int32_To_UnsignedInt16 (b[223:192]) +tmp_dst[255:240] := Saturate_Int32_To_UnsignedInt16 (b[255:224]) +tmp_dst[271:256] := Saturate_Int32_To_UnsignedInt16 (a[287:256]) +tmp_dst[287:272] := Saturate_Int32_To_UnsignedInt16 (a[319:288]) +tmp_dst[303:288] := Saturate_Int32_To_UnsignedInt16 (a[351:320]) +tmp_dst[319:304] := Saturate_Int32_To_UnsignedInt16 (a[383:352]) +tmp_dst[335:320] := Saturate_Int32_To_UnsignedInt16 (b[287:256]) +tmp_dst[351:336] := Saturate_Int32_To_UnsignedInt16 (b[319:288]) +tmp_dst[367:352] := Saturate_Int32_To_UnsignedInt16 (b[351:320]) +tmp_dst[383:368] := Saturate_Int32_To_UnsignedInt16 (b[383:352]) +tmp_dst[399:384] := Saturate_Int32_To_UnsignedInt16 (a[415:384]) +tmp_dst[415:400] := Saturate_Int32_To_UnsignedInt16 (a[447:416]) +tmp_dst[431:416] := Saturate_Int32_To_UnsignedInt16 (a[479:448]) +tmp_dst[447:432] := Saturate_Int32_To_UnsignedInt16 (a[511:480]) +tmp_dst[463:448] := Saturate_Int32_To_UnsignedInt16 (b[415:384]) +tmp_dst[479:464] := Saturate_Int32_To_UnsignedInt16 (b[447:416]) +tmp_dst[495:480] := Saturate_Int32_To_UnsignedInt16 (b[479:448]) +tmp_dst[511:496] := Saturate_Int32_To_UnsignedInt16 (b[511:480]) + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Convert + Miscellaneous + + + Convert packed 32-bit integers from "a" and "b" to packed 16-bit integers using unsigned saturation, and store the results in "dst". + +dst[15:0] := Saturate_Int32_To_UnsignedInt16 (a[31:0]) +dst[31:16] := Saturate_Int32_To_UnsignedInt16 (a[63:32]) +dst[47:32] := Saturate_Int32_To_UnsignedInt16 (a[95:64]) +dst[63:48] := Saturate_Int32_To_UnsignedInt16 (a[127:96]) +dst[79:64] := Saturate_Int32_To_UnsignedInt16 (b[31:0]) +dst[95:80] := Saturate_Int32_To_UnsignedInt16 (b[63:32]) +dst[111:96] := Saturate_Int32_To_UnsignedInt16 (b[95:64]) +dst[127:112] := Saturate_Int32_To_UnsignedInt16 (b[127:96]) +dst[143:128] := Saturate_Int32_To_UnsignedInt16 (a[159:128]) +dst[159:144] := Saturate_Int32_To_UnsignedInt16 (a[191:160]) +dst[175:160] := Saturate_Int32_To_UnsignedInt16 (a[223:192]) +dst[191:176] := Saturate_Int32_To_UnsignedInt16 (a[255:224]) +dst[207:192] := Saturate_Int32_To_UnsignedInt16 (b[159:128]) +dst[223:208] := Saturate_Int32_To_UnsignedInt16 (b[191:160]) +dst[239:224] := Saturate_Int32_To_UnsignedInt16 (b[223:192]) +dst[255:240] := Saturate_Int32_To_UnsignedInt16 (b[255:224]) +dst[271:256] := Saturate_Int32_To_UnsignedInt16 (a[287:256]) +dst[287:272] := Saturate_Int32_To_UnsignedInt16 (a[319:288]) +dst[303:288] := Saturate_Int32_To_UnsignedInt16 (a[351:320]) +dst[319:304] := Saturate_Int32_To_UnsignedInt16 (a[383:352]) +dst[335:320] := Saturate_Int32_To_UnsignedInt16 (b[287:256]) +dst[351:336] := Saturate_Int32_To_UnsignedInt16 (b[319:288]) +dst[367:352] := Saturate_Int32_To_UnsignedInt16 (b[351:320]) +dst[383:368] := Saturate_Int32_To_UnsignedInt16 (b[383:352]) +dst[399:384] := Saturate_Int32_To_UnsignedInt16 (a[415:384]) +dst[415:400] := Saturate_Int32_To_UnsignedInt16 (a[447:416]) +dst[431:416] := Saturate_Int32_To_UnsignedInt16 (a[479:448]) +dst[447:432] := Saturate_Int32_To_UnsignedInt16 (a[511:480]) +dst[463:448] := Saturate_Int32_To_UnsignedInt16 (b[415:384]) +dst[479:464] := Saturate_Int32_To_UnsignedInt16 (b[447:416]) +dst[495:480] := Saturate_Int32_To_UnsignedInt16 (b[479:448]) +dst[511:496] := Saturate_Int32_To_UnsignedInt16 (b[511:480]) +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Convert + Miscellaneous + + + + + Convert packed 32-bit integers from "a" and "b" to packed 16-bit integers using unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp_dst[15:0] := Saturate_Int32_To_UnsignedInt16 (a[31:0]) +tmp_dst[31:16] := Saturate_Int32_To_UnsignedInt16 (a[63:32]) +tmp_dst[47:32] := Saturate_Int32_To_UnsignedInt16 (a[95:64]) +tmp_dst[63:48] := Saturate_Int32_To_UnsignedInt16 (a[127:96]) +tmp_dst[79:64] := Saturate_Int32_To_UnsignedInt16 (b[31:0]) +tmp_dst[95:80] := Saturate_Int32_To_UnsignedInt16 (b[63:32]) +tmp_dst[111:96] := Saturate_Int32_To_UnsignedInt16 (b[95:64]) +tmp_dst[127:112] := Saturate_Int32_To_UnsignedInt16 (b[127:96]) + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Convert + Miscellaneous + + + + Convert packed 32-bit integers from "a" and "b" to packed 16-bit integers using unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp_dst[15:0] := Saturate_Int32_To_UnsignedInt16 (a[31:0]) +tmp_dst[31:16] := Saturate_Int32_To_UnsignedInt16 (a[63:32]) +tmp_dst[47:32] := Saturate_Int32_To_UnsignedInt16 (a[95:64]) +tmp_dst[63:48] := Saturate_Int32_To_UnsignedInt16 (a[127:96]) +tmp_dst[79:64] := Saturate_Int32_To_UnsignedInt16 (b[31:0]) +tmp_dst[95:80] := Saturate_Int32_To_UnsignedInt16 (b[63:32]) +tmp_dst[111:96] := Saturate_Int32_To_UnsignedInt16 (b[95:64]) +tmp_dst[127:112] := Saturate_Int32_To_UnsignedInt16 (b[127:96]) + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Convert + Miscellaneous + + + + + Convert packed 16-bit integers from "a" and "b" to packed 8-bit integers using unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp_dst[7:0] := Saturate_Int16_To_UnsignedInt8 (a[15:0]) +tmp_dst[15:8] := Saturate_Int16_To_UnsignedInt8 (a[31:16]) +tmp_dst[23:16] := Saturate_Int16_To_UnsignedInt8 (a[47:32]) +tmp_dst[31:24] := Saturate_Int16_To_UnsignedInt8 (a[63:48]) +tmp_dst[39:32] := Saturate_Int16_To_UnsignedInt8 (a[79:64]) +tmp_dst[47:40] := Saturate_Int16_To_UnsignedInt8 (a[95:80]) +tmp_dst[55:48] := Saturate_Int16_To_UnsignedInt8 (a[111:96]) +tmp_dst[63:56] := Saturate_Int16_To_UnsignedInt8 (a[127:112]) +tmp_dst[71:64] := Saturate_Int16_To_UnsignedInt8 (b[15:0]) +tmp_dst[79:72] := Saturate_Int16_To_UnsignedInt8 (b[31:16]) +tmp_dst[87:80] := Saturate_Int16_To_UnsignedInt8 (b[47:32]) +tmp_dst[95:88] := Saturate_Int16_To_UnsignedInt8 (b[63:48]) +tmp_dst[103:96] := Saturate_Int16_To_UnsignedInt8 (b[79:64]) +tmp_dst[111:104] := Saturate_Int16_To_UnsignedInt8 (b[95:80]) +tmp_dst[119:112] := Saturate_Int16_To_UnsignedInt8 (b[111:96]) +tmp_dst[127:120] := Saturate_Int16_To_UnsignedInt8 (b[127:112]) +tmp_dst[135:128] := Saturate_Int16_To_UnsignedInt8 (a[143:128]) +tmp_dst[143:136] := Saturate_Int16_To_UnsignedInt8 (a[159:144]) +tmp_dst[151:144] := Saturate_Int16_To_UnsignedInt8 (a[175:160]) +tmp_dst[159:152] := Saturate_Int16_To_UnsignedInt8 (a[191:176]) +tmp_dst[167:160] := Saturate_Int16_To_UnsignedInt8 (a[207:192]) +tmp_dst[175:168] := Saturate_Int16_To_UnsignedInt8 (a[223:208]) +tmp_dst[183:176] := Saturate_Int16_To_UnsignedInt8 (a[239:224]) +tmp_dst[191:184] := Saturate_Int16_To_UnsignedInt8 (a[255:240]) +tmp_dst[199:192] := Saturate_Int16_To_UnsignedInt8 (b[143:128]) +tmp_dst[207:200] := Saturate_Int16_To_UnsignedInt8 (b[159:144]) +tmp_dst[215:208] := Saturate_Int16_To_UnsignedInt8 (b[175:160]) +tmp_dst[223:216] := Saturate_Int16_To_UnsignedInt8 (b[191:176]) +tmp_dst[231:224] := Saturate_Int16_To_UnsignedInt8 (b[207:192]) +tmp_dst[239:232] := Saturate_Int16_To_UnsignedInt8 (b[223:208]) +tmp_dst[247:240] := Saturate_Int16_To_UnsignedInt8 (b[239:224]) +tmp_dst[255:248] := Saturate_Int16_To_UnsignedInt8 (b[255:240]) + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Convert + Miscellaneous + + + + Convert packed 16-bit integers from "a" and "b" to packed 8-bit integers using unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp_dst[7:0] := Saturate_Int16_To_UnsignedInt8 (a[15:0]) +tmp_dst[15:8] := Saturate_Int16_To_UnsignedInt8 (a[31:16]) +tmp_dst[23:16] := Saturate_Int16_To_UnsignedInt8 (a[47:32]) +tmp_dst[31:24] := Saturate_Int16_To_UnsignedInt8 (a[63:48]) +tmp_dst[39:32] := Saturate_Int16_To_UnsignedInt8 (a[79:64]) +tmp_dst[47:40] := Saturate_Int16_To_UnsignedInt8 (a[95:80]) +tmp_dst[55:48] := Saturate_Int16_To_UnsignedInt8 (a[111:96]) +tmp_dst[63:56] := Saturate_Int16_To_UnsignedInt8 (a[127:112]) +tmp_dst[71:64] := Saturate_Int16_To_UnsignedInt8 (b[15:0]) +tmp_dst[79:72] := Saturate_Int16_To_UnsignedInt8 (b[31:16]) +tmp_dst[87:80] := Saturate_Int16_To_UnsignedInt8 (b[47:32]) +tmp_dst[95:88] := Saturate_Int16_To_UnsignedInt8 (b[63:48]) +tmp_dst[103:96] := Saturate_Int16_To_UnsignedInt8 (b[79:64]) +tmp_dst[111:104] := Saturate_Int16_To_UnsignedInt8 (b[95:80]) +tmp_dst[119:112] := Saturate_Int16_To_UnsignedInt8 (b[111:96]) +tmp_dst[127:120] := Saturate_Int16_To_UnsignedInt8 (b[127:112]) +tmp_dst[135:128] := Saturate_Int16_To_UnsignedInt8 (a[143:128]) +tmp_dst[143:136] := Saturate_Int16_To_UnsignedInt8 (a[159:144]) +tmp_dst[151:144] := Saturate_Int16_To_UnsignedInt8 (a[175:160]) +tmp_dst[159:152] := Saturate_Int16_To_UnsignedInt8 (a[191:176]) +tmp_dst[167:160] := Saturate_Int16_To_UnsignedInt8 (a[207:192]) +tmp_dst[175:168] := Saturate_Int16_To_UnsignedInt8 (a[223:208]) +tmp_dst[183:176] := Saturate_Int16_To_UnsignedInt8 (a[239:224]) +tmp_dst[191:184] := Saturate_Int16_To_UnsignedInt8 (a[255:240]) +tmp_dst[199:192] := Saturate_Int16_To_UnsignedInt8 (b[143:128]) +tmp_dst[207:200] := Saturate_Int16_To_UnsignedInt8 (b[159:144]) +tmp_dst[215:208] := Saturate_Int16_To_UnsignedInt8 (b[175:160]) +tmp_dst[223:216] := Saturate_Int16_To_UnsignedInt8 (b[191:176]) +tmp_dst[231:224] := Saturate_Int16_To_UnsignedInt8 (b[207:192]) +tmp_dst[239:232] := Saturate_Int16_To_UnsignedInt8 (b[223:208]) +tmp_dst[247:240] := Saturate_Int16_To_UnsignedInt8 (b[239:224]) +tmp_dst[255:248] := Saturate_Int16_To_UnsignedInt8 (b[255:240]) + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Convert + Miscellaneous + + + + + Convert packed 16-bit integers from "a" and "b" to packed 8-bit integers using unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp_dst[7:0] := Saturate_Int16_To_UnsignedInt8 (a[15:0]) +tmp_dst[15:8] := Saturate_Int16_To_UnsignedInt8 (a[31:16]) +tmp_dst[23:16] := Saturate_Int16_To_UnsignedInt8 (a[47:32]) +tmp_dst[31:24] := Saturate_Int16_To_UnsignedInt8 (a[63:48]) +tmp_dst[39:32] := Saturate_Int16_To_UnsignedInt8 (a[79:64]) +tmp_dst[47:40] := Saturate_Int16_To_UnsignedInt8 (a[95:80]) +tmp_dst[55:48] := Saturate_Int16_To_UnsignedInt8 (a[111:96]) +tmp_dst[63:56] := Saturate_Int16_To_UnsignedInt8 (a[127:112]) +tmp_dst[71:64] := Saturate_Int16_To_UnsignedInt8 (b[15:0]) +tmp_dst[79:72] := Saturate_Int16_To_UnsignedInt8 (b[31:16]) +tmp_dst[87:80] := Saturate_Int16_To_UnsignedInt8 (b[47:32]) +tmp_dst[95:88] := Saturate_Int16_To_UnsignedInt8 (b[63:48]) +tmp_dst[103:96] := Saturate_Int16_To_UnsignedInt8 (b[79:64]) +tmp_dst[111:104] := Saturate_Int16_To_UnsignedInt8 (b[95:80]) +tmp_dst[119:112] := Saturate_Int16_To_UnsignedInt8 (b[111:96]) +tmp_dst[127:120] := Saturate_Int16_To_UnsignedInt8 (b[127:112]) +tmp_dst[135:128] := Saturate_Int16_To_UnsignedInt8 (a[143:128]) +tmp_dst[143:136] := Saturate_Int16_To_UnsignedInt8 (a[159:144]) +tmp_dst[151:144] := Saturate_Int16_To_UnsignedInt8 (a[175:160]) +tmp_dst[159:152] := Saturate_Int16_To_UnsignedInt8 (a[191:176]) +tmp_dst[167:160] := Saturate_Int16_To_UnsignedInt8 (a[207:192]) +tmp_dst[175:168] := Saturate_Int16_To_UnsignedInt8 (a[223:208]) +tmp_dst[183:176] := Saturate_Int16_To_UnsignedInt8 (a[239:224]) +tmp_dst[191:184] := Saturate_Int16_To_UnsignedInt8 (a[255:240]) +tmp_dst[199:192] := Saturate_Int16_To_UnsignedInt8 (b[143:128]) +tmp_dst[207:200] := Saturate_Int16_To_UnsignedInt8 (b[159:144]) +tmp_dst[215:208] := Saturate_Int16_To_UnsignedInt8 (b[175:160]) +tmp_dst[223:216] := Saturate_Int16_To_UnsignedInt8 (b[191:176]) +tmp_dst[231:224] := Saturate_Int16_To_UnsignedInt8 (b[207:192]) +tmp_dst[239:232] := Saturate_Int16_To_UnsignedInt8 (b[223:208]) +tmp_dst[247:240] := Saturate_Int16_To_UnsignedInt8 (b[239:224]) +tmp_dst[255:248] := Saturate_Int16_To_UnsignedInt8 (b[255:240]) +tmp_dst[263:256] := Saturate_Int16_To_UnsignedInt8 (a[271:256]) +tmp_dst[271:264] := Saturate_Int16_To_UnsignedInt8 (a[287:272]) +tmp_dst[279:272] := Saturate_Int16_To_UnsignedInt8 (a[303:288]) +tmp_dst[287:280] := Saturate_Int16_To_UnsignedInt8 (a[319:304]) +tmp_dst[295:288] := Saturate_Int16_To_UnsignedInt8 (a[335:320]) +tmp_dst[303:296] := Saturate_Int16_To_UnsignedInt8 (a[351:336]) +tmp_dst[311:304] := Saturate_Int16_To_UnsignedInt8 (a[367:352]) +tmp_dst[319:312] := Saturate_Int16_To_UnsignedInt8 (a[383:368]) +tmp_dst[327:320] := Saturate_Int16_To_UnsignedInt8 (b[271:256]) +tmp_dst[335:328] := Saturate_Int16_To_UnsignedInt8 (b[287:272]) +tmp_dst[343:336] := Saturate_Int16_To_UnsignedInt8 (b[303:288]) +tmp_dst[351:344] := Saturate_Int16_To_UnsignedInt8 (b[319:304]) +tmp_dst[359:352] := Saturate_Int16_To_UnsignedInt8 (b[335:320]) +tmp_dst[367:360] := Saturate_Int16_To_UnsignedInt8 (b[351:336]) +tmp_dst[375:368] := Saturate_Int16_To_UnsignedInt8 (b[367:352]) +tmp_dst[383:376] := Saturate_Int16_To_UnsignedInt8 (b[383:368]) +tmp_dst[391:384] := Saturate_Int16_To_UnsignedInt8 (a[399:384]) +tmp_dst[399:392] := Saturate_Int16_To_UnsignedInt8 (a[415:400]) +tmp_dst[407:400] := Saturate_Int16_To_UnsignedInt8 (a[431:416]) +tmp_dst[415:408] := Saturate_Int16_To_UnsignedInt8 (a[447:432]) +tmp_dst[423:416] := Saturate_Int16_To_UnsignedInt8 (a[463:448]) +tmp_dst[431:424] := Saturate_Int16_To_UnsignedInt8 (a[479:464]) +tmp_dst[439:432] := Saturate_Int16_To_UnsignedInt8 (a[495:480]) +tmp_dst[447:440] := Saturate_Int16_To_UnsignedInt8 (a[511:496]) +tmp_dst[455:448] := Saturate_Int16_To_UnsignedInt8 (b[399:384]) +tmp_dst[463:456] := Saturate_Int16_To_UnsignedInt8 (b[415:400]) +tmp_dst[471:464] := Saturate_Int16_To_UnsignedInt8 (b[431:416]) +tmp_dst[479:472] := Saturate_Int16_To_UnsignedInt8 (b[447:432]) +tmp_dst[487:480] := Saturate_Int16_To_UnsignedInt8 (b[463:448]) +tmp_dst[495:488] := Saturate_Int16_To_UnsignedInt8 (b[479:464]) +tmp_dst[503:496] := Saturate_Int16_To_UnsignedInt8 (b[495:480]) +tmp_dst[511:504] := Saturate_Int16_To_UnsignedInt8 (b[511:496]) + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Convert + Miscellaneous + + + + Convert packed 16-bit integers from "a" and "b" to packed 8-bit integers using unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp_dst[7:0] := Saturate_Int16_To_UnsignedInt8 (a[15:0]) +tmp_dst[15:8] := Saturate_Int16_To_UnsignedInt8 (a[31:16]) +tmp_dst[23:16] := Saturate_Int16_To_UnsignedInt8 (a[47:32]) +tmp_dst[31:24] := Saturate_Int16_To_UnsignedInt8 (a[63:48]) +tmp_dst[39:32] := Saturate_Int16_To_UnsignedInt8 (a[79:64]) +tmp_dst[47:40] := Saturate_Int16_To_UnsignedInt8 (a[95:80]) +tmp_dst[55:48] := Saturate_Int16_To_UnsignedInt8 (a[111:96]) +tmp_dst[63:56] := Saturate_Int16_To_UnsignedInt8 (a[127:112]) +tmp_dst[71:64] := Saturate_Int16_To_UnsignedInt8 (b[15:0]) +tmp_dst[79:72] := Saturate_Int16_To_UnsignedInt8 (b[31:16]) +tmp_dst[87:80] := Saturate_Int16_To_UnsignedInt8 (b[47:32]) +tmp_dst[95:88] := Saturate_Int16_To_UnsignedInt8 (b[63:48]) +tmp_dst[103:96] := Saturate_Int16_To_UnsignedInt8 (b[79:64]) +tmp_dst[111:104] := Saturate_Int16_To_UnsignedInt8 (b[95:80]) +tmp_dst[119:112] := Saturate_Int16_To_UnsignedInt8 (b[111:96]) +tmp_dst[127:120] := Saturate_Int16_To_UnsignedInt8 (b[127:112]) +tmp_dst[135:128] := Saturate_Int16_To_UnsignedInt8 (a[143:128]) +tmp_dst[143:136] := Saturate_Int16_To_UnsignedInt8 (a[159:144]) +tmp_dst[151:144] := Saturate_Int16_To_UnsignedInt8 (a[175:160]) +tmp_dst[159:152] := Saturate_Int16_To_UnsignedInt8 (a[191:176]) +tmp_dst[167:160] := Saturate_Int16_To_UnsignedInt8 (a[207:192]) +tmp_dst[175:168] := Saturate_Int16_To_UnsignedInt8 (a[223:208]) +tmp_dst[183:176] := Saturate_Int16_To_UnsignedInt8 (a[239:224]) +tmp_dst[191:184] := Saturate_Int16_To_UnsignedInt8 (a[255:240]) +tmp_dst[199:192] := Saturate_Int16_To_UnsignedInt8 (b[143:128]) +tmp_dst[207:200] := Saturate_Int16_To_UnsignedInt8 (b[159:144]) +tmp_dst[215:208] := Saturate_Int16_To_UnsignedInt8 (b[175:160]) +tmp_dst[223:216] := Saturate_Int16_To_UnsignedInt8 (b[191:176]) +tmp_dst[231:224] := Saturate_Int16_To_UnsignedInt8 (b[207:192]) +tmp_dst[239:232] := Saturate_Int16_To_UnsignedInt8 (b[223:208]) +tmp_dst[247:240] := Saturate_Int16_To_UnsignedInt8 (b[239:224]) +tmp_dst[255:248] := Saturate_Int16_To_UnsignedInt8 (b[255:240]) +tmp_dst[263:256] := Saturate_Int16_To_UnsignedInt8 (a[271:256]) +tmp_dst[271:264] := Saturate_Int16_To_UnsignedInt8 (a[287:272]) +tmp_dst[279:272] := Saturate_Int16_To_UnsignedInt8 (a[303:288]) +tmp_dst[287:280] := Saturate_Int16_To_UnsignedInt8 (a[319:304]) +tmp_dst[295:288] := Saturate_Int16_To_UnsignedInt8 (a[335:320]) +tmp_dst[303:296] := Saturate_Int16_To_UnsignedInt8 (a[351:336]) +tmp_dst[311:304] := Saturate_Int16_To_UnsignedInt8 (a[367:352]) +tmp_dst[319:312] := Saturate_Int16_To_UnsignedInt8 (a[383:368]) +tmp_dst[327:320] := Saturate_Int16_To_UnsignedInt8 (b[271:256]) +tmp_dst[335:328] := Saturate_Int16_To_UnsignedInt8 (b[287:272]) +tmp_dst[343:336] := Saturate_Int16_To_UnsignedInt8 (b[303:288]) +tmp_dst[351:344] := Saturate_Int16_To_UnsignedInt8 (b[319:304]) +tmp_dst[359:352] := Saturate_Int16_To_UnsignedInt8 (b[335:320]) +tmp_dst[367:360] := Saturate_Int16_To_UnsignedInt8 (b[351:336]) +tmp_dst[375:368] := Saturate_Int16_To_UnsignedInt8 (b[367:352]) +tmp_dst[383:376] := Saturate_Int16_To_UnsignedInt8 (b[383:368]) +tmp_dst[391:384] := Saturate_Int16_To_UnsignedInt8 (a[399:384]) +tmp_dst[399:392] := Saturate_Int16_To_UnsignedInt8 (a[415:400]) +tmp_dst[407:400] := Saturate_Int16_To_UnsignedInt8 (a[431:416]) +tmp_dst[415:408] := Saturate_Int16_To_UnsignedInt8 (a[447:432]) +tmp_dst[423:416] := Saturate_Int16_To_UnsignedInt8 (a[463:448]) +tmp_dst[431:424] := Saturate_Int16_To_UnsignedInt8 (a[479:464]) +tmp_dst[439:432] := Saturate_Int16_To_UnsignedInt8 (a[495:480]) +tmp_dst[447:440] := Saturate_Int16_To_UnsignedInt8 (a[511:496]) +tmp_dst[455:448] := Saturate_Int16_To_UnsignedInt8 (b[399:384]) +tmp_dst[463:456] := Saturate_Int16_To_UnsignedInt8 (b[415:400]) +tmp_dst[471:464] := Saturate_Int16_To_UnsignedInt8 (b[431:416]) +tmp_dst[479:472] := Saturate_Int16_To_UnsignedInt8 (b[447:432]) +tmp_dst[487:480] := Saturate_Int16_To_UnsignedInt8 (b[463:448]) +tmp_dst[495:488] := Saturate_Int16_To_UnsignedInt8 (b[479:464]) +tmp_dst[503:496] := Saturate_Int16_To_UnsignedInt8 (b[495:480]) +tmp_dst[511:504] := Saturate_Int16_To_UnsignedInt8 (b[511:496]) + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Convert + Miscellaneous + + + Convert packed 16-bit integers from "a" and "b" to packed 8-bit integers using unsigned saturation, and store the results in "dst". + +dst[7:0] := Saturate_Int16_To_UnsignedInt8 (a[15:0]) +dst[15:8] := Saturate_Int16_To_UnsignedInt8 (a[31:16]) +dst[23:16] := Saturate_Int16_To_UnsignedInt8 (a[47:32]) +dst[31:24] := Saturate_Int16_To_UnsignedInt8 (a[63:48]) +dst[39:32] := Saturate_Int16_To_UnsignedInt8 (a[79:64]) +dst[47:40] := Saturate_Int16_To_UnsignedInt8 (a[95:80]) +dst[55:48] := Saturate_Int16_To_UnsignedInt8 (a[111:96]) +dst[63:56] := Saturate_Int16_To_UnsignedInt8 (a[127:112]) +dst[71:64] := Saturate_Int16_To_UnsignedInt8 (b[15:0]) +dst[79:72] := Saturate_Int16_To_UnsignedInt8 (b[31:16]) +dst[87:80] := Saturate_Int16_To_UnsignedInt8 (b[47:32]) +dst[95:88] := Saturate_Int16_To_UnsignedInt8 (b[63:48]) +dst[103:96] := Saturate_Int16_To_UnsignedInt8 (b[79:64]) +dst[111:104] := Saturate_Int16_To_UnsignedInt8 (b[95:80]) +dst[119:112] := Saturate_Int16_To_UnsignedInt8 (b[111:96]) +dst[127:120] := Saturate_Int16_To_UnsignedInt8 (b[127:112]) +dst[135:128] := Saturate_Int16_To_UnsignedInt8 (a[143:128]) +dst[143:136] := Saturate_Int16_To_UnsignedInt8 (a[159:144]) +dst[151:144] := Saturate_Int16_To_UnsignedInt8 (a[175:160]) +dst[159:152] := Saturate_Int16_To_UnsignedInt8 (a[191:176]) +dst[167:160] := Saturate_Int16_To_UnsignedInt8 (a[207:192]) +dst[175:168] := Saturate_Int16_To_UnsignedInt8 (a[223:208]) +dst[183:176] := Saturate_Int16_To_UnsignedInt8 (a[239:224]) +dst[191:184] := Saturate_Int16_To_UnsignedInt8 (a[255:240]) +dst[199:192] := Saturate_Int16_To_UnsignedInt8 (b[143:128]) +dst[207:200] := Saturate_Int16_To_UnsignedInt8 (b[159:144]) +dst[215:208] := Saturate_Int16_To_UnsignedInt8 (b[175:160]) +dst[223:216] := Saturate_Int16_To_UnsignedInt8 (b[191:176]) +dst[231:224] := Saturate_Int16_To_UnsignedInt8 (b[207:192]) +dst[239:232] := Saturate_Int16_To_UnsignedInt8 (b[223:208]) +dst[247:240] := Saturate_Int16_To_UnsignedInt8 (b[239:224]) +dst[255:248] := Saturate_Int16_To_UnsignedInt8 (b[255:240]) +dst[263:256] := Saturate_Int16_To_UnsignedInt8 (a[271:256]) +dst[271:264] := Saturate_Int16_To_UnsignedInt8 (a[287:272]) +dst[279:272] := Saturate_Int16_To_UnsignedInt8 (a[303:288]) +dst[287:280] := Saturate_Int16_To_UnsignedInt8 (a[319:304]) +dst[295:288] := Saturate_Int16_To_UnsignedInt8 (a[335:320]) +dst[303:296] := Saturate_Int16_To_UnsignedInt8 (a[351:336]) +dst[311:304] := Saturate_Int16_To_UnsignedInt8 (a[367:352]) +dst[319:312] := Saturate_Int16_To_UnsignedInt8 (a[383:368]) +dst[327:320] := Saturate_Int16_To_UnsignedInt8 (b[271:256]) +dst[335:328] := Saturate_Int16_To_UnsignedInt8 (b[287:272]) +dst[343:336] := Saturate_Int16_To_UnsignedInt8 (b[303:288]) +dst[351:344] := Saturate_Int16_To_UnsignedInt8 (b[319:304]) +dst[359:352] := Saturate_Int16_To_UnsignedInt8 (b[335:320]) +dst[367:360] := Saturate_Int16_To_UnsignedInt8 (b[351:336]) +dst[375:368] := Saturate_Int16_To_UnsignedInt8 (b[367:352]) +dst[383:376] := Saturate_Int16_To_UnsignedInt8 (b[383:368]) +dst[391:384] := Saturate_Int16_To_UnsignedInt8 (a[399:384]) +dst[399:392] := Saturate_Int16_To_UnsignedInt8 (a[415:400]) +dst[407:400] := Saturate_Int16_To_UnsignedInt8 (a[431:416]) +dst[415:408] := Saturate_Int16_To_UnsignedInt8 (a[447:432]) +dst[423:416] := Saturate_Int16_To_UnsignedInt8 (a[463:448]) +dst[431:424] := Saturate_Int16_To_UnsignedInt8 (a[479:464]) +dst[439:432] := Saturate_Int16_To_UnsignedInt8 (a[495:480]) +dst[447:440] := Saturate_Int16_To_UnsignedInt8 (a[511:496]) +dst[455:448] := Saturate_Int16_To_UnsignedInt8 (b[399:384]) +dst[463:456] := Saturate_Int16_To_UnsignedInt8 (b[415:400]) +dst[471:464] := Saturate_Int16_To_UnsignedInt8 (b[431:416]) +dst[479:472] := Saturate_Int16_To_UnsignedInt8 (b[447:432]) +dst[487:480] := Saturate_Int16_To_UnsignedInt8 (b[463:448]) +dst[495:488] := Saturate_Int16_To_UnsignedInt8 (b[479:464]) +dst[503:496] := Saturate_Int16_To_UnsignedInt8 (b[495:480]) +dst[511:504] := Saturate_Int16_To_UnsignedInt8 (b[511:496]) +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Convert + Miscellaneous + + + + + Convert packed 16-bit integers from "a" and "b" to packed 8-bit integers using unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp_dst[7:0] := Saturate_Int16_To_UnsignedInt8 (a[15:0]) +tmp_dst[15:8] := Saturate_Int16_To_UnsignedInt8 (a[31:16]) +tmp_dst[23:16] := Saturate_Int16_To_UnsignedInt8 (a[47:32]) +tmp_dst[31:24] := Saturate_Int16_To_UnsignedInt8 (a[63:48]) +tmp_dst[39:32] := Saturate_Int16_To_UnsignedInt8 (a[79:64]) +tmp_dst[47:40] := Saturate_Int16_To_UnsignedInt8 (a[95:80]) +tmp_dst[55:48] := Saturate_Int16_To_UnsignedInt8 (a[111:96]) +tmp_dst[63:56] := Saturate_Int16_To_UnsignedInt8 (a[127:112]) +tmp_dst[71:64] := Saturate_Int16_To_UnsignedInt8 (b[15:0]) +tmp_dst[79:72] := Saturate_Int16_To_UnsignedInt8 (b[31:16]) +tmp_dst[87:80] := Saturate_Int16_To_UnsignedInt8 (b[47:32]) +tmp_dst[95:88] := Saturate_Int16_To_UnsignedInt8 (b[63:48]) +tmp_dst[103:96] := Saturate_Int16_To_UnsignedInt8 (b[79:64]) +tmp_dst[111:104] := Saturate_Int16_To_UnsignedInt8 (b[95:80]) +tmp_dst[119:112] := Saturate_Int16_To_UnsignedInt8 (b[111:96]) +tmp_dst[127:120] := Saturate_Int16_To_UnsignedInt8 (b[127:112]) + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Convert + Miscellaneous + + + + Convert packed 16-bit integers from "a" and "b" to packed 8-bit integers using unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp_dst[7:0] := Saturate_Int16_To_UnsignedInt8 (a[15:0]) +tmp_dst[15:8] := Saturate_Int16_To_UnsignedInt8 (a[31:16]) +tmp_dst[23:16] := Saturate_Int16_To_UnsignedInt8 (a[47:32]) +tmp_dst[31:24] := Saturate_Int16_To_UnsignedInt8 (a[63:48]) +tmp_dst[39:32] := Saturate_Int16_To_UnsignedInt8 (a[79:64]) +tmp_dst[47:40] := Saturate_Int16_To_UnsignedInt8 (a[95:80]) +tmp_dst[55:48] := Saturate_Int16_To_UnsignedInt8 (a[111:96]) +tmp_dst[63:56] := Saturate_Int16_To_UnsignedInt8 (a[127:112]) +tmp_dst[71:64] := Saturate_Int16_To_UnsignedInt8 (b[15:0]) +tmp_dst[79:72] := Saturate_Int16_To_UnsignedInt8 (b[31:16]) +tmp_dst[87:80] := Saturate_Int16_To_UnsignedInt8 (b[47:32]) +tmp_dst[95:88] := Saturate_Int16_To_UnsignedInt8 (b[63:48]) +tmp_dst[103:96] := Saturate_Int16_To_UnsignedInt8 (b[79:64]) +tmp_dst[111:104] := Saturate_Int16_To_UnsignedInt8 (b[95:80]) +tmp_dst[119:112] := Saturate_Int16_To_UnsignedInt8 (b[111:96]) +tmp_dst[127:120] := Saturate_Int16_To_UnsignedInt8 (b[127:112]) + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Add packed 8-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := a[i+7:i] + b[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Add packed 8-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := a[i+7:i] + b[i+7:i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + Add packed 8-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 63 + i := j*8 + dst[i+7:i] := a[i+7:i] + b[i+7:i] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + + Add packed 8-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := a[i+7:i] + b[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + Add packed 8-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := a[i+7:i] + b[i+7:i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Add packed 8-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := a[i+7:i] + b[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Add packed 8-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := a[i+7:i] + b[i+7:i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + + Add packed 32-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + Add packed 32-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + + Add packed 32-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + Add packed 32-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + + Add packed 64-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + Add packed 64-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + b[i+63:i] + ELSE + dst[i+63:i] :=0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + + Add packed 64-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + Add packed 64-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Add packed 8-bit integers in "a" and "b" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := Saturate_To_Int8( a[i+7:i] + b[i+7:i] ) + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Add packed 8-bit integers in "a" and "b" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := Saturate_To_Int8( a[i+7:i] + b[i+7:i] ) + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + Add packed 8-bit integers in "a" and "b" using saturation, and store the results in "dst". + +FOR j := 0 to 63 + i := j*8 + dst[i+7:i] := Saturate_To_Int8( a[i+7:i] + b[i+7:i] ) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + + Add packed 8-bit integers in "a" and "b" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := Saturate_To_Int8( a[i+7:i] + b[i+7:i] ) + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + Add packed 8-bit integers in "a" and "b" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := Saturate_To_Int8( a[i+7:i] + b[i+7:i] ) + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Add packed 8-bit integers in "a" and "b" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := Saturate_To_Int8( a[i+7:i] + b[i+7:i] ) + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Add packed 8-bit integers in "a" and "b" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := Saturate_To_Int8( a[i+7:i] + b[i+7:i] ) + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Add packed 16-bit integers in "a" and "b" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := Saturate_To_Int16( a[i+15:i] + b[i+15:i] ) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Add packed 16-bit integers in "a" and "b" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := Saturate_To_Int16( a[i+15:i] + b[i+15:i] ) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + Add packed 16-bit integers in "a" and "b" using saturation, and store the results in "dst". + +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := Saturate_To_Int16( a[i+15:i] + b[i+15:i] ) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + + Add packed 16-bit integers in "a" and "b" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := Saturate_To_Int16( a[i+15:i] + b[i+15:i] ) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + Add packed 16-bit integers in "a" and "b" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := Saturate_To_Int16( a[i+15:i] + b[i+15:i] ) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Add packed 16-bit integers in "a" and "b" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := Saturate_To_Int16( a[i+15:i] + b[i+15:i] ) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Add packed 16-bit integers in "a" and "b" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := Saturate_To_Int16( a[i+15:i] + b[i+15:i] ) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Add packed unsigned 8-bit integers in "a" and "b" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := Saturate_To_UnsignedInt8( a[i+7:i] + b[i+7:i] ) + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Add packed unsigned 8-bit integers in "a" and "b" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := Saturate_To_UnsignedInt8( a[i+7:i] + b[i+7:i] ) + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + Add packed unsigned 8-bit integers in "a" and "b" using saturation, and store the results in "dst". + +FOR j := 0 to 63 + i := j*8 + dst[i+7:i] := Saturate_To_UnsignedInt8( a[i+7:i] + b[i+7:i] ) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + + Add packed unsigned 8-bit integers in "a" and "b" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := Saturate_To_UnsignedInt8( a[i+7:i] + b[i+7:i] ) + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + Add packed unsigned 8-bit integers in "a" and "b" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := Saturate_To_UnsignedInt8( a[i+7:i] + b[i+7:i] ) + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Add packed unsigned 8-bit integers in "a" and "b" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := Saturate_To_UnsignedInt8( a[i+7:i] + b[i+7:i] ) + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Add packed unsigned 8-bit integers in "a" and "b" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := Saturate_To_UnsignedInt8( a[i+7:i] + b[i+7:i] ) + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Add packed unsigned 16-bit integers in "a" and "b" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := Saturate_To_UnsignedInt16( a[i+15:i] + b[i+15:i] ) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Add packed unsigned 16-bit integers in "a" and "b" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := Saturate_To_UnsignedInt16( a[i+15:i] + b[i+15:i] ) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + Add packed unsigned 16-bit integers in "a" and "b" using saturation, and store the results in "dst". + +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := Saturate_To_UnsignedInt16( a[i+15:i] + b[i+15:i] ) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + + Add packed unsigned 16-bit integers in "a" and "b" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := Saturate_To_UnsignedInt16( a[i+15:i] + b[i+15:i] ) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + Add packed unsigned 16-bit integers in "a" and "b" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := Saturate_To_UnsignedInt16( a[i+15:i] + b[i+15:i] ) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Add packed unsigned 16-bit integers in "a" and "b" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := Saturate_To_UnsignedInt16( a[i+15:i] + b[i+15:i] ) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Add packed unsigned 16-bit integers in "a" and "b" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := Saturate_To_UnsignedInt16( a[i+15:i] + b[i+15:i] ) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Add packed 16-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := a[i+15:i] + b[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Add packed 16-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := a[i+15:i] + b[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + Add packed 16-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := a[i+15:i] + b[i+15:i] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + + Add packed 16-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := a[i+15:i] + b[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + Add packed 16-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := a[i+15:i] + b[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Add packed 16-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := a[i+15:i] + b[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Add packed 16-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := a[i+15:i] + b[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + + + Concatenate pairs of 16-byte blocks in "a" and "b" into a 32-byte temporary result, shift the result right by "count" bytes, and store the low 16 bytes in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*128 + tmp[255:0] := ((a[i+127:i] << 128) OR b[i+127:i]) >> (count[7:0]*8) + tmp_dst[i+127:i] := tmp[127:0] +ENDFOR + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + + Concatenate pairs of 16-byte blocks in "a" and "b" into a 32-byte temporary result, shift the result right by "count" bytes, and store the low 16 bytes in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*128 + tmp[255:0] := ((a[i+127:i] << 128) OR b[i+127:i]) >> (count[7:0]*8) + tmp_dst[i+127:i] := tmp[127:0] +ENDFOR + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Miscellaneous + + + + Concatenate pairs of 16-byte blocks in "a" and "b" into a 32-byte temporary result, shift the result right by "count" bytes, and store the low 16 bytes in "dst". + +FOR j := 0 to 3 + i := j*128 + tmp[255:0] := ((a[i+127:i] << 128) OR b[i+127:i]) >> (count[7:0]*8) + dst[i+127:i] := tmp[127:0] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Miscellaneous + + + + + + Concatenate pairs of 16-byte blocks in "a" and "b" into a 32-byte temporary result, shift the result right by "count" bytes, and store the low 16 bytes in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*128 + tmp[255:0] := ((a[i+127:i] << 128) OR b[i+127:i]) >> (count[7:0]*8) + tmp_dst[i+127:i] := tmp[127:0] +ENDFOR + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Miscellaneous + + + + + Concatenate pairs of 16-byte blocks in "a" and "b" into a 32-byte temporary result, shift the result right by "count" bytes, and store the low 16 bytes in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*128 + tmp[255:0] := ((a[i+127:i] << 128) OR b[i+127:i]) >> (count[7:0]*8) + tmp_dst[i+127:i] := tmp[127:0] +ENDFOR + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + + + Concatenate pairs of 16-byte blocks in "a" and "b" into a 32-byte temporary result, shift the result right by "count" bytes, and store the low 16 bytes in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp_dst[255:0] := ((a[127:0] << 128) OR b[127:0]) >> (count[7:0]*8) + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + + Concatenate pairs of 16-byte blocks in "a" and "b" into a 32-byte temporary result, shift the result right by "count" bytes, and store the low 16 bytes in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp_dst[255:0] := ((a[127:0] << 128) OR b[127:0]) >> (count[7:0]*8) + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Logical + + + + + Compute the bitwise AND of packed 32-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] BITWISE AND b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Logical + + + + Compute the bitwise AND of packed 32-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] BITWISE AND b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Logical + + + + + Compute the bitwise AND of packed 32-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] AND b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Logical + + + + Compute the bitwise AND of packed 32-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] AND b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Logical + + + + + Compute the bitwise NOT of packed 32-bit integers in "a" and then AND with "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := ((NOT a[i+31:i]) AND b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Logical + + + + Compute the bitwise NOT of packed 32-bit integers in "a" and then AND with "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := (NOT a[i+31:i]) AND b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Logical + + + + + Compute the bitwise NOT of packed 32-bit integers in "a" and then AND with "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := ((NOT a[i+31:i]) AND b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Logical + + + + Compute the bitwise NOT of packed 32-bit integers in "a" and then AND with "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := (NOT a[i+31:i]) AND b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Logical + + + + + Compute the bitwise NOT of packed 64-bit integers in "a" and then AND with "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Logical + + + + Compute the bitwise NOT of packed 64-bit integers in "a" and then AND with "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := (NOT a[i+63:i]) AND b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Logical + + + + + Compute the bitwise NOT of packed 64-bit integers in "a" and then AND with "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Logical + + + + Compute the bitwise NOT of packed 64-bit integers in "a" and then AND with "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := (NOT a[i+63:i]) AND b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Logical + + + + + Compute the bitwise AND of packed 64-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] AND b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Logical + + + + Compute the bitwise AND of packed 64-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] AND b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Logical + + + + + Compute the bitwise AND of packed 64-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] AND b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Logical + + + + Compute the bitwise AND of packed 64-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] AND b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Average packed unsigned 8-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) >> 1 + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Average packed unsigned 8-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) >> 1 + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + Average packed unsigned 8-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 63 + i := j*8 + dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) >> 1 +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + + Average packed unsigned 8-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) >> 1 + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + Average packed unsigned 8-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) >> 1 + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Average packed unsigned 8-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) >> 1 + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Average packed unsigned 8-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) >> 1 + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Average packed unsigned 16-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) >> 1 + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Average packed unsigned 16-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) >> 1 + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + Average packed unsigned 16-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) >> 1 +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + + Average packed unsigned 16-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) >> 1 + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + Average packed unsigned 16-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) >> 1 + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Average packed unsigned 16-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) >> 1 + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Average packed unsigned 16-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) >> 1 + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + Blend packed 8-bit integers from "a" and "b" using control mask "k", and store the results in "dst". + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := b[i+7:i] + ELSE + dst[i+7:i] := a[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Miscellaneous + + + + Blend packed 8-bit integers from "a" and "b" using control mask "k", and store the results in "dst". + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := b[i+7:i] + ELSE + dst[i+7:i] := a[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + Blend packed 8-bit integers from "a" and "b" using control mask "k", and store the results in "dst". + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := b[i+7:i] + ELSE + dst[i+7:i] := a[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + Blend packed 32-bit integers from "a" and "b" using control mask "k", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := b[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + Blend packed 32-bit integers from "a" and "b" using control mask "k", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := b[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + Blend packed 64-bit integers from "a" and "b" using control mask "k", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := b[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + Blend packed 64-bit integers from "a" and "b" using control mask "k", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := b[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + Blend packed 16-bit integers from "a" and "b" using control mask "k", and store the results in "dst". + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := b[i+15:i] + ELSE + dst[i+15:i] := a[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Miscellaneous + + + + Blend packed 16-bit integers from "a" and "b" using control mask "k", and store the results in "dst". + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := b[i+15:i] + ELSE + dst[i+15:i] := a[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + Blend packed 16-bit integers from "a" and "b" using control mask "k", and store the results in "dst". + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := b[i+15:i] + ELSE + dst[i+15:i] := a[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + Broadcast the low packed 8-bit integer from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := a[7:0] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Set + + + + Broadcast 8-bit integer "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := a[7:0] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + Broadcast the low packed 8-bit integer from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := a[7:0] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Set + + + Broadcast 8-bit integer "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := a[7:0] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Miscellaneous + + Broadcast the low packed 8-bit integer from "a" to all elements of "dst". + +FOR j := 0 to 63 + i := j*8 + dst[i+7:i] := a[7:0] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Miscellaneous + + + + Broadcast the low packed 8-bit integer from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := a[7:0] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Set + + + + Broadcast 8-bit integer "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := a[7:0] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Miscellaneous + + + Broadcast the low packed 8-bit integer from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := a[7:0] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Set + + + Broadcast 8-bit integer "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := a[7:0] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + Broadcast the low packed 8-bit integer from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := a[7:0] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Set + + + + Broadcast 8-bit integer "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := a[7:0] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + Broadcast the low packed 8-bit integer from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := a[7:0] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Set + + + Broadcast 8-bit integer "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := a[7:0] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + Broadcast the low packed 32-bit integer from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[31:0] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Set + + + + Broadcast 32-bit integer "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[31:0] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + Broadcast the low packed 32-bit integer from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[31:0] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Set + + + Broadcast 32-bit integer "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[31:0] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + Broadcast the low packed 32-bit integer from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[31:0] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Set + + + + Broadcast 32-bit integer "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[31:0] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + Broadcast the low packed 32-bit integer from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[31:0] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Set + + + Broadcast 32-bit integer "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[31:0] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512CD + Miscellaneous + + Broadcast the low 8-bits from input mask "k" to all 64-bit elements of "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := ZeroExtend(k[7:0]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512CD + Miscellaneous + + Broadcast the low 8-bits from input mask "k" to all 64-bit elements of "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := ZeroExtend(k[7:0]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512CD + Miscellaneous + + Broadcast the low 16-bits from input mask "k" to all 32-bit elements of "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := ZeroExtend(k[15:0]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512CD + Miscellaneous + + Broadcast the low 16-bits from input mask "k" to all 32-bit elements of "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ZeroExtend(k[15:0]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + Broadcast the low packed 64-bit integer from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[63:0] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Set + + + + Broadcast 64-bit integer "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[63:0] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + Broadcast the low packed 64-bit integer from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[63:0] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Set + + + Broadcast 64-bit integer "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[63:0] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + Broadcast the low packed 64-bit integer from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[63:0] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Set + + + + Broadcast 64-bit integer "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[63:0] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + Broadcast the low packed 64-bit integer from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[63:0] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Set + + + Broadcast 64-bit integer "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[63:0] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + Broadcast the low packed 16-bit integer from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := a[15:0] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Set + + + + Broadcast the low packed 16-bit integer from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := a[15:0] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + Broadcast the low packed 16-bit integer from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := a[15:0] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Set + + + Broadcast 16-bit integer "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := a[15:0] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Miscellaneous + + Broadcast the low packed 16-bit integer from "a" to all elements of "dst". + +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := a[15:0] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Miscellaneous + + + + Broadcast the low packed 16-bit integer from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := a[15:0] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Set + + + + Broadcast 16-bit integer "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := a[15:0] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Miscellaneous + + + Broadcast the low packed 16-bit integer from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := a[15:0] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Set + + + Broadcast the low packed 16-bit integer from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := a[15:0] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + Broadcast the low packed 16-bit integer from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := a[15:0] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Set + + + + Broadcast the low packed 16-bit integer from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := a[15:0] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + Broadcast the low packed 16-bit integer from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := a[15:0] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Set + + + Broadcast the low packed 16-bit integer from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := a[15:0] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed 8-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + +CASE (imm8[7:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 31 + i := j*8 + k[j] := ( a[i+7:i] OP b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compare packed 8-bit integers in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 31 + i := j*8 + k[j] := ( a[i+7:i] == b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compare packed 8-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 31 + i := j*8 + k[j] := ( a[i+7:i] >= b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compare packed 8-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k". + +FOR j := 0 to 31 + i := j*8 + k[j] := ( a[i+7:i] > b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compare packed 8-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 31 + i := j*8 + k[j] := ( a[i+7:i] <= b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compare packed 8-bit integers in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 31 + i := j*8 + k[j] := ( a[i+7:i] < b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compare packed 8-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 31 + i := j*8 + k[j] := ( a[i+7:i] != b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + + Compare packed 8-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +CASE (imm8[7:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 31 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] OP b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed 8-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] == b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed 8-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] >= b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed 8-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] > b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed 8-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] <= b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed 8-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] < b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed 8-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] != b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + + Compare packed 8-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + +CASE (imm8[7:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 63 + i := j*8 + k[j] := ( a[i+7:i] OP b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + Compare packed 8-bit integers in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 63 + i := j*8 + k[j] := ( a[i+7:i] == b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + Compare packed 8-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 63 + i := j*8 + k[j] := ( a[i+7:i] >= b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + Compare packed 8-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k". + +FOR j := 0 to 63 + i := j*8 + k[j] := ( a[i+7:i] > b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + Compare packed 8-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 63 + i := j*8 + k[j] := ( a[i+7:i] <= b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + Compare packed 8-bit integers in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 63 + i := j*8 + k[j] := ( a[i+7:i] < b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + Compare packed 8-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 63 + i := j*8 + k[j] := ( a[i+7:i] != b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + + + Compare packed 8-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +CASE (imm8[7:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 63 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] OP b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + + Compare packed 8-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] == b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + + Compare packed 8-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] >= b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + + Compare packed 8-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] > b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + + Compare packed 8-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] <= b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + + Compare packed 8-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] < b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + + Compare packed 8-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] != b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed 8-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + +CASE (imm8[7:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 15 + i := j*8 + k[j] := ( a[i+7:i] OP b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compare packed 8-bit integers in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*8 + k[j] := ( a[i+7:i] == b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compare packed 8-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*8 + k[j] := ( a[i+7:i] >= b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compare packed 8-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*8 + k[j] := ( a[i+7:i] > b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compare packed 8-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*8 + k[j] := ( a[i+7:i] <= b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compare packed 8-bit integers in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*8 + k[j] := ( a[i+7:i] < b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compare packed 8-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*8 + k[j] := ( a[i+7:i] != b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + + Compare packed 8-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +CASE (imm8[7:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 15 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] OP b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed 8-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] == b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed 8-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] >= b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed 8-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] > b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed 8-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] <= b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed 8-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] < b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed 8-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] != b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed 32-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + +CASE (imm8[7:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 7 + i := j*32 + k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compare packed 32-bit integers in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*32 + k[j] := ( a[i+31:i] == b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compare packed 32-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*32 + k[j] := ( a[i+31:i] >= b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compare packed 32-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*32 + k[j] := ( a[i+31:i] > b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compare packed 32-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*32 + k[j] := ( a[i+31:i] <= b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compare packed 32-bit integers in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*32 + k[j] := ( a[i+31:i] < b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compare packed 32-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*32 + k[j] := ( a[i+31:i] != b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + + Compare packed 32-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +CASE (imm8[7:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 7 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed 32-bit integers in "a" and "b" for equality, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] == b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed 32-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] >= b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed 32-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] > b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed 32-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] <= b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed 32-bit integers in "a" and "b" for less-than, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] < b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed 32-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] != b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed 32-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + +CASE (imm8[7:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 3 + i := j*32 + k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compare packed 32-bit integers in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 3 + i := j*32 + k[j] := ( a[i+31:i] == b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compare packed 32-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 3 + i := j*32 + k[j] := ( a[i+31:i] >= b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compare packed 32-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k". + +FOR j := 0 to 3 + i := j*32 + k[j] := ( a[i+31:i] > b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compare packed 32-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 3 + i := j*32 + k[j] := ( a[i+31:i] <= b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compare packed 32-bit integers in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 3 + i := j*32 + k[j] := ( a[i+31:i] < b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compare packed 32-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 3 + i := j*32 + k[j] := ( a[i+31:i] != b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + + Compare packed 32-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +CASE (imm8[7:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 3 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed 32-bit integers in "a" and "b" for equality, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] == b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed 32-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] >= b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + parameter varname='b' type='__m128i'/> + Compare packed 32-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] > b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + parameter varname='b' type='__m128i'/> + Compare packed 32-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] <= b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + parameter varname='b' type='__m128i'/> + Compare packed 32-bit integers in "a" and "b" for less-than, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] < b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + parameter varname='b' type='__m128i'/> + Compare packed 32-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] != b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed 64-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + +CASE (imm8[7:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 3 + i := j*64 + k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compare packed 64-bit integers in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 3 + i := j*64 + k[j] := ( a[i+63:i] == b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compare packed 64-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 3 + i := j*64 + k[j] := ( a[i+63:i] >= b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compare packed 64-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k". + +FOR j := 0 to 3 + i := j*64 + k[j] := ( a[i+63:i] > b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compare packed 64-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 3 + i := j*64 + k[j] := ( a[i+63:i] <= b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compare packed 64-bit integers in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 3 + i := j*64 + k[j] := ( a[i+63:i] < b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compare packed 64-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 3 + i := j*64 + k[j] := ( a[i+63:i] != b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + + Compare packed 64-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +CASE (imm8[7:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 3 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed 64-bit integers in "a" and "b" for equality, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] == b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed 64-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] >= b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed 64-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] > b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed 64-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] <= b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed 64-bit integers in "a" and "b" for less-than, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] < b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed 64-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] != b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed 64-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + +CASE (imm8[7:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 1 + i := j*64 + k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:2] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compare packed 64-bit integers in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 1 + i := j*64 + k[j] := ( a[i+63:i] == b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:2] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compare packed 64-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 1 + i := j*64 + k[j] := ( a[i+63:i] >= b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:2] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compare packed 64-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k". + +FOR j := 0 to 1 + i := j*64 + k[j] := ( a[i+63:i] > b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:2] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compare packed 64-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 1 + i := j*64 + k[j] := ( a[i+63:i] <= b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:2] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compare packed 64-bit integers in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 1 + i := j*64 + k[j] := ( a[i+63:i] < b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:2] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compare packed 64-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 1 + i := j*64 + k[j] := ( a[i+63:i] != b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:2] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + + Compare packed 64-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +CASE (imm8[7:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 1 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:2] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed 64-bit integers in "a" and "b" for equality, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] == b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:2] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed 64-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] >= b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:2] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed 64-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] > b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:2] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed 64-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] <= b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:2] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed 64-bit integers in "a" and "b" for less-than, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] < b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:2] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed 64-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] != b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:2] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed unsigned 8-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + +CASE (imm8[7:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 31 + i := j*8 + k[j] := ( a[i+7:i] OP b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compare packed unsigned 8-bit integers in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 31 + i := j*8 + k[j] := ( a[i+7:i] == b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compare packed unsigned 8-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 31 + i := j*8 + k[j] := ( a[i+7:i] >= b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compare packed unsigned 8-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k". + +FOR j := 0 to 31 + i := j*8 + k[j] := ( a[i+7:i] > b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compare packed unsigned 8-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 31 + i := j*8 + k[j] := ( a[i+7:i] <= b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compare packed unsigned 8-bit integers in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 31 + i := j*8 + k[j] := ( a[i+7:i] < b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compare packed unsigned 8-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 31 + i := j*8 + k[j] := ( a[i+7:i] != b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + + Compare packed unsigned 8-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +CASE (imm8[7:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 31 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] OP b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed unsigned 8-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] == b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed unsigned 8-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] >= b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed unsigned 8-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] > b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed unsigned 8-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] <= b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed unsigned 8-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] < b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed unsigned 8-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] != b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + + Compare packed unsigned 8-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + +CASE (imm8[7:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 63 + i := j*8 + k[j] := ( a[i+7:i] OP b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + Compare packed unsigned 8-bit integers in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 63 + i := j*8 + k[j] := ( a[i+7:i] == b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + Compare packed unsigned 8-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 63 + i := j*8 + k[j] := ( a[i+7:i] >= b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + Compare packed unsigned 8-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k". + +FOR j := 0 to 63 + i := j*8 + k[j] := ( a[i+7:i] > b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + Compare packed unsigned 8-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 63 + i := j*8 + k[j] := ( a[i+7:i] <= b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + Compare packed unsigned 8-bit integers in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 63 + i := j*8 + k[j] := ( a[i+7:i] < b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + Compare packed unsigned 8-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 63 + i := j*8 + k[j] := ( a[i+7:i] != b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + + + Compare packed unsigned 8-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +CASE (imm8[7:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 63 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] OP b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + + Compare packed unsigned 8-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] == b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + + Compare packed unsigned 8-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] >= b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + + Compare packed unsigned 8-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] > b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + + Compare packed unsigned 8-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] <= b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + + Compare packed unsigned 8-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] < b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + + Compare packed unsigned 8-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] != b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed unsigned 8-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + +CASE (imm8[7:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 15 + i := j*8 + k[j] := ( a[i+7:i] OP b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compare packed unsigned 8-bit integers in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*8 + k[j] := ( a[i+7:i] == b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compare packed unsigned 8-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*8 + k[j] := ( a[i+7:i] >= b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compare packed unsigned 8-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*8 + k[j] := ( a[i+7:i] > b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compare packed unsigned 8-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*8 + k[j] := ( a[i+7:i] <= b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compare packed unsigned 8-bit integers in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*8 + k[j] := ( a[i+7:i] < b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compare packed unsigned 8-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*8 + k[j] := ( a[i+7:i] != b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + + Compare packed unsigned 8-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +CASE (imm8[7:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 15 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] OP b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed unsigned 8-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] == b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed unsigned 8-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] >= b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed unsigned 8-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] > b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed unsigned 8-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] <= b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed unsigned 8-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] < b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed unsigned 8-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] != b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed unsigned 32-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + +CASE (imm8[7:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 7 + i := j*32 + k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compare packed unsigned 32-bit integers in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*32 + k[j] := ( a[i+31:i] == b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compare packed unsigned 32-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*32 + k[j] := ( a[i+31:i] >= b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compare packed unsigned 32-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*32 + k[j] := ( a[i+31:i] > b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compare packed unsigned 32-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*32 + k[j] := ( a[i+31:i] <= b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compare packed unsigned 32-bit integers in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*32 + k[j] := ( a[i+31:i] < b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compare packed unsigned 32-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*32 + k[j] := ( a[i+31:i] != b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + + Compare packed unsigned 32-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +CASE (imm8[7:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 7 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed unsigned 32-bit integers in "a" and "b" for equality, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] == b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed unsigned 32-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] >= b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed unsigned 32-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] > b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed unsigned 32-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] <= b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed unsigned 32-bit integers in "a" and "b" for less-than, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] < b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed unsigned 32-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] != b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed unsigned 32-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + +CASE (imm8[7:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 3 + i := j*32 + k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compare packed unsigned 32-bit integers in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 3 + i := j*32 + k[j] := ( a[i+31:i] == b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compare packed unsigned 32-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 3 + i := j*32 + k[j] := ( a[i+31:i] >= b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compare packed unsigned 32-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k". + +FOR j := 0 to 3 + i := j*32 + k[j] := ( a[i+31:i] > b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compare packed unsigned 32-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 3 + i := j*32 + k[j] := ( a[i+31:i] <= b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compare packed unsigned 32-bit integers in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 3 + i := j*32 + k[j] := ( a[i+31:i] < b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compare packed unsigned 32-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 3 + i := j*32 + k[j] := ( a[i+31:i] != b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + + Compare packed unsigned 32-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +CASE (imm8[7:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 3 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed unsigned 32-bit integers in "a" and "b" for equality, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] == b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed unsigned 32-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] >= b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + parameter varname='b' type='__m128i'/> + Compare packed unsigned 32-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] > b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + parameter varname='b' type='__m128i'/> + Compare packed unsigned 32-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] <= b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + parameter varname='b' type='__m128i'/> + Compare packed unsigned 32-bit integers in "a" and "b" for less-than, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] < b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + parameter varname='b' type='__m128i'/> + Compare packed unsigned 32-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] != b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed unsigned 64-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + +CASE (imm8[7:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 3 + i := j*64 + k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compare packed unsigned 64-bit integers in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 3 + i := j*64 + k[j] := ( a[i+63:i] == b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compare packed unsigned 64-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 3 + i := j*64 + k[j] := ( a[i+63:i] >= b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compare packed unsigned 64-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k". + +FOR j := 0 to 3 + i := j*64 + k[j] := ( a[i+63:i] > b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compare packed unsigned 64-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 3 + i := j*64 + k[j] := ( a[i+63:i] <= b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compare packed unsigned 64-bit integers in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 3 + i := j*64 + k[j] := ( a[i+63:i] < b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compare packed unsigned 64-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 3 + i := j*64 + k[j] := ( a[i+63:i] != b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + + Compare packed unsigned 64-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +CASE (imm8[7:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 3 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed unsigned 64-bit integers in "a" and "b" for equality, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] == b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed unsigned 64-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] >= b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed unsigned 64-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] > b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed unsigned 64-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] <= b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed unsigned 64-bit integers in "a" and "b" for less-than, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] < b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed unsigned 64-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] != b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed unsigned 64-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + +CASE (imm8[7:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 1 + i := j*64 + k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:2] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compare packed unsigned 64-bit integers in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 1 + i := j*64 + k[j] := ( a[i+63:i] == b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:2] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compare packed unsigned 64-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 1 + i := j*64 + k[j] := ( a[i+63:i] >= b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:2] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compare packed unsigned 64-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k". + +FOR j := 0 to 1 + i := j*64 + k[j] := ( a[i+63:i] > b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:2] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compare packed unsigned 64-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 1 + i := j*64 + k[j] := ( a[i+63:i] <= b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:2] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compare packed unsigned 64-bit integers in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 1 + i := j*64 + k[j] := ( a[i+63:i] < b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:2] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compare packed unsigned 64-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 1 + i := j*64 + k[j] := ( a[i+63:i] != b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:2] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + + Compare packed unsigned 64-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +CASE (imm8[7:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 1 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:2] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed unsigned 64-bit integers in "a" and "b" for equality, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] == b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:2] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed unsigned 64-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] >= b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:2] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed unsigned 64-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] > b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:2] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed unsigned 64-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] <= b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:2] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed unsigned 64-bit integers in "a" and "b" for less-than, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] < b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:2] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compare packed unsigned 64-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] != b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:2] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed unsigned 16-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + +CASE (imm8[7:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 15 + i := j*16 + k[j] := ( a[i+15:i] OP b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compare packed unsigned 16-bit integers in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*16 + k[j] := ( a[i+15:i] == b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compare packed unsigned 16-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*16 + k[j] := ( a[i+15:i] >= b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compare packed unsigned 16-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*16 + k[j] := ( a[i+15:i] > b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compare packed unsigned 16-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*16 + k[j] := ( a[i+15:i] <= b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compare packed unsigned 16-bit integers in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*16 + k[j] := ( a[i+15:i] < b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compare packed unsigned 16-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*16 + k[j] := ( a[i+15:i] != b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + + Compare packed unsigned 16-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +CASE (imm8[7:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 15 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] OP b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed unsigned 16-bit integers in "a" and "b" for equality, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] == b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed unsigned 16-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] >= b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed unsigned 16-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] > b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed unsigned 16-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] <= b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed unsigned 16-bit integers in "a" and "b" for less-than, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] < b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed unsigned 16-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] != b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + + Compare packed unsigned 16-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + +CASE (imm8[7:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 31 + i := j*16 + k[j] := ( a[i+15:i] OP b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + Compare packed unsigned 16-bit integers in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 31 + i := j*16 + k[j] := ( a[i+15:i] == b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + Compare packed unsigned 16-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 31 + i := j*16 + k[j] := ( a[i+15:i] >= b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + Compare packed unsigned 16-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k". + +FOR j := 0 to 31 + i := j*16 + k[j] := ( a[i+15:i] > b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + Compare packed unsigned 16-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 31 + i := j*16 + k[j] := ( a[i+15:i] <= b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + Compare packed unsigned 16-bit integers in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 31 + i := j*16 + k[j] := ( a[i+15:i] < b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + Compare packed unsigned 16-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 31 + i := j*16 + k[j] := ( a[i+15:i] != b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + + + Compare packed unsigned 16-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +CASE (imm8[7:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 31 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] OP b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + + Compare packed unsigned 16-bit integers in "a" and "b" for equality, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] == b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + + Compare packed unsigned 16-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] >= b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + + Compare packed unsigned 16-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] > b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + + Compare packed unsigned 16-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] <= b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + + Compare packed unsigned 16-bit integers in "a" and "b" for less-than, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] < b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + + Compare packed unsigned 16-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] != b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed unsigned 16-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + +CASE (imm8[7:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 7 + i := j*16 + k[j] := ( a[i+15:i] OP b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compare packed unsigned 16-bit integers in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*16 + k[j] := ( a[i+15:i] == b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compare packed unsigned 16-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*16 + k[j] := ( a[i+15:i] >= b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compare packed unsigned 16-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*16 + k[j] := ( a[i+15:i] > b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compare packed unsigned 16-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*16 + k[j] := ( a[i+15:i] <= b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compare packed unsigned 16-bit integers in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*16 + k[j] := ( a[i+15:i] < b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compare packed unsigned 16-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*16 + k[j] := ( a[i+15:i] != b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + + Compare packed unsigned 16-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +CASE (imm8[7:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 7 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] OP b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed unsigned 16-bit integers in "a" and "b" for equality, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] == b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed unsigned 16-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] >= b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed unsigned 16-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] >== b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed unsigned 16-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] <= b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed unsigned 16-bit integers in "a" and "b" for less-than, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] < b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed unsigned 16-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] != b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed 16-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + +CASE (imm8[7:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 15 + i := j*16 + k[j] := ( a[i+15:i] OP b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compare packed 16-bit integers in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*16 + k[j] := ( a[i+15:i] == b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compare packed 16-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*16 + k[j] := ( a[i+15:i] >= b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compare packed 16-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*16 + k[j] := ( a[i+15:i] > b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compare packed 16-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*16 + k[j] := ( a[i+15:i] <= b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compare packed 16-bit integers in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*16 + k[j] := ( a[i+15:i] < b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compare packed 16-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*16 + k[j] := ( a[i+15:i] != b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + + Compare packed 16-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +CASE (imm8[7:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 15 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] OP b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed 16-bit integers in "a" and "b" for equality, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] == b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed 16-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] >= b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed 16-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] > b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed 16-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] <= b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed 16-bit integers in "a" and "b" for less-than, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] < b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed 16-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] != b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + + Compare packed 16-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + +CASE (imm8[7:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 31 + i := j*16 + k[j] := ( a[i+15:i] OP b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + Compare packed 16-bit integers in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 31 + i := j*16 + k[j] := ( a[i+15:i] == b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + Compare packed 16-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 31 + i := j*16 + k[j] := ( a[i+15:i] >= b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + Compare packed 16-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k". + +FOR j := 0 to 31 + i := j*16 + k[j] := ( a[i+15:i] > b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + Compare packed 16-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 31 + i := j*16 + k[j] := ( a[i+15:i] <= b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + Compare packed 16-bit integers in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 31 + i := j*16 + k[j] := ( a[i+15:i] < b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + Compare packed 16-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 31 + i := j*16 + k[j] := ( a[i+15:i] != b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + + + Compare packed 16-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +CASE (imm8[7:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 31 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] OP b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + + Compare packed 16-bit integers in "a" and "b" for equality, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] == b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + + Compare packed 16-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] >= b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + + Compare packed 16-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] > b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + + Compare packed 16-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] <= b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + + Compare packed 16-bit integers in "a" and "b" for less-than, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] < b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + + Compare packed 16-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] != b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed 16-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + +CASE (imm8[7:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 7 + i := j*16 + k[j] := ( a[i+15:i] OP b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compare packed 16-bit integers in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*16 + k[j] := ( a[i+15:i] == b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compare packed 16-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*16 + k[j] := ( a[i+15:i] >= b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compare packed 16-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*16 + k[j] := ( a[i+15:i] > b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compare packed 16-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*16 + k[j] := ( a[i+15:i] <= b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compare packed 16-bit integers in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*16 + k[j] := ( a[i+15:i] < b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compare packed 16-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*16 + k[j] := ( a[i+15:i] != b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + + Compare packed 16-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +CASE (imm8[7:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 7 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] OP b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed 16-bit integers in "a" and "b" for equality, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] == b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed 16-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] >= b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed 16-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] >== b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed 16-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] <= b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed 16-bit integers in "a" and "b" for less-than, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] < b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compare packed 16-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k1" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] != b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + Contiguously store the active 32-bit integers in "a" (those with their respective bit set in writemask "k") to "dst", and pass through the remaining elements from "src". + +size := 32 +m := 0 +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[m+size-1:m] := a[i+31:i] + m := m + size + FI +ENDFOR +dst[255:m] := src[255:m] +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Store + + + + Contiguously store the active 32-bit integers in "a" (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +size := 32 +m := base_addr +FOR j := 0 to 7 + i := j*32 + IF k[j] + MEM[m+size-1:m] := a[i+31:i] + m := m + size + FI +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + Contiguously store the active 32-bit integers in "a" (those with their respective bit set in zeromask "k") to "dst", and set the remaining elements to zero. + +size := 32 +m := 0 +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[m+size-1:m] := a[i+31:i] + m := m + size + FI +ENDFOR +dst[255:m] := 0 +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + Contiguously store the active 32-bit integers in "a" (those with their respective bit set in writemask "k") to "dst", and pass through the remaining elements from "src". + +size := 32 +m := 0 +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[m+size-1:m] := a[i+31:i] + m := m + size + FI +ENDFOR +dst[127:m] := src[127:m] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Store + + + + Contiguously store the active 32-bit integers in "a" (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +size := 32 +m := base_addr +FOR j := 0 to 3 + i := j*32 + IF k[j] + MEM[m+size-1:m] := a[i+31:i] + m := m + size + FI +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + Contiguously store the active 32-bit integers in "a" (those with their respective bit set in zeromask "k") to "dst", and set the remaining elements to zero. + +size := 32 +m := 0 +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[m+size-1:m] := a[i+31:i] + m := m + size + FI +ENDFOR +dst[127:m] := 0 +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + Contiguously store the active 64-bit integers in "a" (those with their respective bit set in writemask "k") to "dst", and pass through the remaining elements from "src". + +size := 64 +m := 0 +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[m+size-1:m] := a[i+63:i] + m := m + size + FI +ENDFOR +dst[255:m] := src[255:m] +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Store + + + + Contiguously store the active 64-bit integers in "a" (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +size := 64 +m := base_addr +FOR j := 0 to 3 + i := j*64 + IF k[j] + MEM[m+size-1:m] := a[i+63:i] + m := m + size + FI +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + Contiguously store the active 64-bit integers in "a" (those with their respective bit set in zeromask "k") to "dst", and set the remaining elements to zero. + +size := 64 +m := 0 +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[m+size-1:m] := a[i+63:i] + m := m + size + FI +ENDFOR +dst[255:m] := 0 +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + Contiguously store the active 64-bit integers in "a" (those with their respective bit set in writemask "k") to "dst", and pass through the remaining elements from "src". + +size := 64 +m := 0 +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[m+size-1:m] := a[i+63:i] + m := m + size + FI +ENDFOR +dst[127:m] := src[127:m] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Store + + + + Contiguously store the active 64-bit integers in "a" (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +size := 64 +m := base_addr +FOR j := 0 to 1 + i := j*64 + IF k[j] + MEM[m+size-1:m] := a[i+63:i] + m := m + size + FI +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + Contiguously store the active 64-bit integers in "a" (those with their respective bit set in zeromask "k") to "dst", and set the remaining elements to zero. + +size := 64 +m := 0 +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[m+size-1:m] := a[i+63:i] + m := m + size + FI +ENDFOR +dst[127:m] := 0 +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512CD + Compare + + Test each 32-bit element of "a" for equality with all other elements in "a" closer to the least significant bit. Each element's comparison forms a zero extended bit vector in "dst". + +FOR j := 0 to 7 + i := j*32 + FOR k := 0 to j-1 + m := k*32 + dst[i+k] := (a[i+31:i] == a[m+31:m]) ? 1 : 0 + ENDFOR + dst[i+31:i+j] := 0 +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512CD + Compare + + + + Test each 32-bit element of "a" for equality with all other elements in "a" closer to the least significant bit using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in "dst". + +FOR j := 0 to 7 + i := j*32 + IF k[i] + FOR l := 0 to j-1 + m := l*32 + dst[i+l] := (a[i+31:i] == a[m+31:m]) ? 1 : 0 + ENDFOR + dst[i+31:i+j] := 0 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512CD + Compare + + + Test each 32-bit element of "a" for equality with all other elements in "a" closer to the least significant bit using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in "dst". + +FOR j := 0 to 7 + i := j*32 + IF k[i] + FOR l := 0 to j-1 + m := l*32 + dst[i+l] := (a[i+31:i] == a[m+31:m]) ? 1 : 0 + ENDFOR + dst[i+31:i+j] := 0 + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512CD + Compare + + Test each 32-bit element of "a" for equality with all other elements in "a" closer to the least significant bit. Each element's comparison forms a zero extended bit vector in "dst". + +FOR j := 0 to 3 + i := j*32 + FOR k := 0 to j-1 + m := k*32 + dst[i+k] := (a[i+31:i] == a[m+31:m]) ? 1 : 0 + ENDFOR + dst[i+31:i+j] := 0 +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512CD + Compare + + + + Test each 32-bit element of "a" for equality with all other elements in "a" closer to the least significant bit using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in "dst". + +FOR j := 0 to 3 + i := j*32 + IF k[i] + FOR l := 0 to j-1 + m := l*32 + dst[i+l] := (a[i+31:i] == a[m+31:m]) ? 1 : 0 + ENDFOR + dst[i+31:i+j] := 0 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512CD + Compare + + + Test each 32-bit element of "a" for equality with all other elements in "a" closer to the least significant bit using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in "dst". + +FOR j := 0 to 3 + i := j*32 + IF k[i] + FOR l := 0 to j-1 + m := l*32 + dst[i+l] := (a[i+31:i] == a[m+31:m]) ? 1 : 0 + ENDFOR + dst[i+31:i+j] := 0 + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512CD + Compare + + Test each 64-bit element of "a" for equality with all other elements in "a" closer to the least significant bit. Each element's comparison forms a zero extended bit vector in "dst". + +FOR j := 0 to 3 + i := j*64 + FOR k := 0 to j-1 + m := k*64 + dst[i+k] := (a[i+63:i] == a[m+63:m]) ? 1 : 0 + ENDFOR + dst[i+63:i+j] := 0 +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512CD + Compare + + + + Test each 64-bit element of "a" for equality with all other elements in "a" closer to the least significant bit using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in "dst". + +FOR j := 0 to 3 + i := j*64 + IF k[j] + FOR l := 0 to j-1 + m := l*64 + dst[i+l] := (a[i+63:i] == a[m+63:m]) ? 1 : 0 + ENDFOR + dst[i+63:i+j] := 0 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512CD + Compare + + + Test each 64-bit element of "a" for equality with all other elements in "a" closer to the least significant bit using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in "dst". + +FOR j := 0 to 3 + i := j*64 + IF k[j] + FOR l := 0 to j-1 + m := l*64 + dst[i+l] := (a[i+63:i] == a[m+63:m]) ? 1 : 0 + ENDFOR + dst[i+63:i+j] := 0 + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512CD + Compare + + Test each 64-bit element of "a" for equality with all other elements in "a" closer to the least significant bit. Each element's comparison forms a zero extended bit vector in "dst". + +FOR j := 0 to 1 + i := j*64 + FOR k := 0 to j-1 + m := k*64 + dst[i+k] := (a[i+63:i] == a[m+63:m]) ? 1 : 0 + ENDFOR + dst[i+63:i+j] := 0 +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512CD + Compare + + + + Test each 64-bit element of "a" for equality with all other elements in "a" closer to the least significant bit using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in "dst". + +FOR j := 0 to 1 + i := j*64 + IF k[j] + FOR l := 0 to j-1 + m := l*64 + dst[i+l] := (a[i+63:i] == a[m+63:m]) ? 1 : 0 + ENDFOR + dst[i+63:i+j] := 0 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512CD + Compare + + + Test each 64-bit element of "a" for equality with all other elements in "a" closer to the least significant bit using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in "dst". + +FOR j := 0 to 1 + i := j*64 + IF k[j] + FOR l := 0 to j-1 + m := l*64 + dst[i+l] := (a[i+63:i] == a[m+63:m]) ? 1 : 0 + ENDFOR + dst[i+63:i+j] := 0 + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + + Shuffle 32-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + id := idx[i+2:i]*32 + IF k[j] + dst[i+31:i] := a[id+31:id] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + Shuffle 32-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + id := idx[i+2:i]*32 + IF k[j] + dst[i+31:i] := a[id+31:id] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + Shuffle 32-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst". + + +FOR j := 0 to 7 + i := j*32 + id := idx[i+2:i]*32 + dst[i+31:i] := a[id+31:id] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + + Shuffle 32-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "idx" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + off := idx[i+2:i]*32 + IF k[j] + dst[i+31:i] := idx[i+3] ? b[off+31:off] : a[off+31:off] + ELSE + dst[i+31:i] := idx[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + + Shuffle 32-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + off := idx[i+3:i]*32 + IF k[j] + dst[i+31:i] := idx[i+4] ? b[off+31:off] : a[off+31:off] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + + Shuffle 32-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + off := idx[i+2:i]*32 + IF k[j] + dst[i+31:i] := (idx[i+3]) ? b[off+31:off] : a[off+31:off] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + Shuffle 32-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst". + + +FOR j := 0 to 7 + i := j*32 + off := idx[i+2:i]*32 + dst[i+31:i] := idx[i+3] ? b[off+31:off] : a[off+31:off] +ENDFOR +dst[MAX:256] := 0 + + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + + Shuffle 32-bit integers in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "idx" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + off := idx[i+1:i]*32 + IF k[j] + dst[i+31:i] := idx[i+2] ? b[off+31:off] : a[off+31:off] + ELSE + dst[i+31:i] := idx[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + + Shuffle 32-bit integers in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + off := idx[i+1:i]*32 + IF k[j] + dst[i+31:i] := idx[i+2] ? b[off+31:off] : a[off+31:off] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + + Shuffle 32-bit integers in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + off := idx[i+1:i]*32 + IF k[j] + dst[i+31:i] := (idx[i+2]) ? b[off+31:off] : a[off+31:off] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + Shuffle 32-bit integers in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst". + + +FOR j := 0 to 3 + i := j*32 + off := idx[i+2:i]*32 + dst[i+31:i] := idx[i+3] ? b[off+31:off] : a[off+31:off] +ENDFOR +dst[MAX:128] := 0 + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "idx" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + off := idx[i+1:i]*64 + IF k[j] + dst[i+63:i] := idx[i+2] ? b[off+63:off] : a[off+63:off] + ELSE + dst[i+63:i] := idx[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + off := idx[i+1:i]*64 + IF k[j] + dst[i+63:i] := idx[i+2] ? b[off+63:off] : a[off+63:off] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + off := idx[i+1:i]*64 + IF k[j] + dst[i+63:i] := (idx[i+2]) ? b[off+63:off] : a[off+63:off] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Shuffle double-precision (64-bit) floating-point elements in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + off := idx[i+1:i]*64 + dst[i+63:i] := idx[i+2] ? b[off+63:off] : a[off+63:off] +ENDFOR +dst[MAX:256] := 0 + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "idx" when the corresponding mask bit is not set) + +FOR j := 0 to 1 + i := j*64 + off := idx[i]*64 + IF k[j] + dst[i+63:i] := idx[i+1] ? b[off+63:off] : a[off+63:off] + ELSE + dst[i+63:i] := idx[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + off := idx[i]*64 + IF k[j] + dst[i+63:i] := idx[i+1] ? b[off+63:off] : a[off+63:off] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + off := idx[i]*64 + IF k[j] + dst[i+63:i] := (idx[i+1]) ? b[off+63:off] : a[off+63:off] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Shuffle double-precision (64-bit) floating-point elements in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + off := idx[i]*64 + dst[i+63:i] := idx[i+1] ? b[off+63:off] : a[off+63:off] +ENDFOR +dst[MAX:128] := 0 + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "idx" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + off := idx[i+2:i]*32 + IF k[j] + dst[i+31:i] := idx[i+3] ? b[off+31:off] : a[off+31:off] + ELSE + dst[i+31:i] := idx[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + off := idx[i+2:i]*32 + IF k[j] + dst[i+31:i] := idx[i+3] ? b[off+31:off] : a[off+31:off] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + off := idx[i+2:i]*32 + IF k[j] + dst[i+31:i] := (idx[i+3]) ? b[off+31:off] : a[off+31:off] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Shuffle single-precision (32-bit) floating-point elements in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + off := idx[i+2:i]*32 + dst[i+31:i] := idx[i+3] ? b[off+31:off] : a[off+31:off] +ENDFOR +dst[MAX:256] := 0 + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "idx" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + off := idx[i+1:i]*32 + IF k[j] + dst[i+31:i] := idx[i+2] ? b[off+31:off] : a[off+31:off] + ELSE + dst[i+31:i] := idx[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + off := idx[i+1:i]*32 + IF k[j] + dst[i+31:i] := idx[i+2] ? b[off+31:off] : a[off+31:off] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + off := idx[i+1:i]*32 + IF k[j] + dst[i+31:i] := (idx[i+2]) ? b[off+31:off] : a[off+31:off] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Shuffle single-precision (32-bit) floating-point elements in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + off := idx[i+1:i]*32 + dst[i+31:i] := idx[i+2] ? b[off+31:off] : a[off+31:off] +ENDFOR +dst[MAX:128] := 0 + + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + + Shuffle 64-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "idx" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + off := idx[i+1:i]*64 + IF k[j] + dst[i+63:i] := idx[i+2] ? b[off+63:off] : a[off+63:off] + ELSE + dst[i+63:i] := idx[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + + Shuffle 64-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + off := idx[i+1:i]*64 + IF k[j] + dst[i+63:i] := idx[i+2] ? b[off+63:off] : a[off+63:off] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + + Shuffle 64-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + off := idx[i+1:i]*64 + IF k[j] + dst[i+63:i] := (idx[i+2]) ? b[off+63:off] : a[off+63:off] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + Shuffle 64-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst". + + +FOR j := 0 to 3 + i := j*64 + off := idx[i+1:i]*64 + dst[i+63:i] := idx[i+2] ? b[off+63:off] : a[off+63:off] +ENDFOR +dst[MAX:256] := 0 + + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + + Shuffle 64-bit integers in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "idx" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + off := idx[i]*64 + IF k[j] + dst[i+63:i] := idx[i+1] ? b[off+63:off] : a[off+63:off] + ELSE + dst[i+63:i] := idx[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + + Shuffle 64-bit integers in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + off := idx[i]*64 + IF k[j] + dst[i+63:i] := idx[i+1] ? b[off+63:off] : a[off+63:off] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + + Shuffle 64-bit integers in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + off := idx[i]*64 + IF k[j] + dst[i+63:i] := (idx[i+1]) ? b[off+63:off] : a[off+63:off] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + Shuffle 64-bit integers in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst". + + +FOR j := 0 to 1 + i := j*64 + off := idx[i]*64 + dst[i+63:i] := idx[i+1] ? b[off+63:off] : a[off+63:off] +ENDFOR +dst[MAX:128] := 0 + + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + + Shuffle 16-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "idx" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + off := 16*idx[i+3:i] + dst[i+15:i] := idx[i+4] ? b[off+15:off] : a[off+15:off] + ELSE + dst[i+15:i] := idx[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + + Shuffle 16-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + off := 16*idx[i+3:i] + dst[i+15:i] := idx[i+4] ? b[off+15:off] : a[off+15:off] + ELSE + dst[i+15:i] := a[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + + Shuffle 16-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + off := 16*idx[i+3:i] + dst[i+15:i] := idx[i+4] ? b[off+15:off] : a[off+15:off] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + Shuffle 16-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst". + +FOR j := 0 to 15 + i := j*16 + off := 16*idx[i+3:i] + dst[i+15:i] := idx[i+4] ? b[off+15:off] : a[off+15:off] +ENDFOR +dst[MAX:256] := 0 + + + +
immintrin.h
+
+ + Integer + AVX512BW + Miscellaneous + + + + + Shuffle 16-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "idx" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + off := 16*idx[i+4:i] + dst[i+15:i] := idx[i+5] ? b[off+15:off] : a[off+15:off] + ELSE + dst[i+15:i] := idx[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Miscellaneous + + + + + Shuffle 16-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + off := 16*idx[i+4:i] + dst[i+15:i] := idx[i+5] ? b[off+15:off] : a[off+15:off] + ELSE + dst[i+15:i] := a[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Miscellaneous + + + + + Shuffle 16-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + off := 16*idx[i+4:i] + dst[i+15:i] := idx[i+5] ? b[off+15:off] : a[off+15:off] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + +
immintrin.h
+
+ + Integer + AVX512BW + Miscellaneous + + + + Shuffle 16-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst". + +FOR j := 0 to 31 + i := j*16 + off := 16*idx[i+4:i] + dst[i+15:i] := idx[i+5] ? b[off+15:off] : a[off+15:off] +ENDFOR +dst[MAX:512] := 0 + + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + + Shuffle 16-bit integers in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "idx" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + off := 16*idx[i+2:i] + dst[i+15:i] := idx[i+3] ? b[off+15:off] : a[off+15:off] + ELSE + dst[i+15:i] := idx[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + + Shuffle 16-bit integers in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + off := 16*idx[i+2:i] + dst[i+15:i] := idx[i+3] ? b[off+15:off] : a[off+15:off] + ELSE + dst[i+15:i] := a[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + + Shuffle 16-bit integers in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + off := 16*idx[i+2:i] + dst[i+15:i] := idx[i+3] ? b[off+15:off] : a[off+15:off] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + Shuffle 16-bit integers in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst". + +FOR j := 0 to 7 + i := j*16 + off := 16*idx[i+2:i] + dst[i+15:i] := idx[i+4] ? b[off+15:off] : a[off+15:off] +ENDFOR +dst[MAX:128] := 0 + + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +IF (imm8[0] == 0) tmp_dst[63:0] := a[63:0] +IF (imm8[0] == 1) tmp_dst[63:0] := a[127:64] +IF (imm8[1] == 0) tmp_dst[127:64] := a[63:0] +IF (imm8[1] == 1) tmp_dst[127:64] := a[127:64] +IF (imm8[2] == 0) tmp_dst[191:128] := a[191:128] +IF (imm8[2] == 1) tmp_dst[191:128] := a[255:192] +IF (imm8[3] == 0) tmp_dst[255:192] := a[191:128] +IF (imm8[3] == 1) tmp_dst[255:192] := a[255:192] +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" within 128-bit lanes using the control in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +IF (b[1] == 0) tmp_dst[63:0] := a[63:0] +IF (b[1] == 1) tmp_dst[63:0] := a[127:64] +IF (b[65] == 0) tmp_dst[127:64] := a[63:0] +IF (b[65] == 1) tmp_dst[127:64] := a[127:64] +IF (b[129] == 0) tmp_dst[191:128] := a[191:128] +IF (b[129] == 1) tmp_dst[191:128] := a[255:192] +IF (b[193] == 0) tmp_dst[255:192] := a[191:128] +IF (b[193] == 1) tmp_dst[255:192] := a[255:192] +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Shuffle double-precision (64-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +IF (imm8[0] == 0) tmp_dst[63:0] := a[63:0] +IF (imm8[0] == 1) tmp_dst[63:0] := a[127:64] +IF (imm8[1] == 0) tmp_dst[127:64] := a[63:0] +IF (imm8[1] == 1) tmp_dst[127:64] := a[127:64] +IF (imm8[2] == 0) tmp_dst[191:128] := a[191:128] +IF (imm8[2] == 1) tmp_dst[191:128] := a[255:192] +IF (imm8[3] == 0) tmp_dst[255:192] := a[191:128] +IF (imm8[3] == 1) tmp_dst[255:192] := a[255:192] +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Shuffle double-precision (64-bit) floating-point elements in "a" within 128-bit lanes using the control in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +IF (b[1] == 0) tmp_dst[63:0] := a[63:0] +IF (b[1] == 1) tmp_dst[63:0] := a[127:64] +IF (b[65] == 0) tmp_dst[127:64] := a[63:0] +IF (b[65] == 1) tmp_dst[127:64] := a[127:64] +IF (b[129] == 0) tmp_dst[191:128] := a[191:128] +IF (b[129] == 1) tmp_dst[191:128] := a[255:192] +IF (b[193] == 0) tmp_dst[255:192] := a[191:128] +IF (b[193] == 1) tmp_dst[255:192] := a[255:192] +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +IF (imm8[0] == 0) tmp_dst[63:0] := a[63:0] +IF (imm8[0] == 1) tmp_dst[63:0] := a[127:64] +IF (imm8[1] == 0) tmp_dst[127:64] := a[63:0] +IF (imm8[1] == 1) tmp_dst[127:64] := a[127:64] +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" using the control in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +IF (b[1] == 0) tmp_dst[63:0] := a[63:0] +IF (b[1] == 1) tmp_dst[63:0] := a[127:64] +IF (b[65] == 0) tmp_dst[127:64] := a[63:0] +IF (b[65] == 1) tmp_dst[127:64] := a[127:64] +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Shuffle double-precision (64-bit) floating-point elements in "a" using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +IF (imm8[0] == 0) tmp_dst[63:0] := a[63:0] +IF (imm8[0] == 1) tmp_dst[63:0] := a[127:64] +IF (imm8[1] == 0) tmp_dst[127:64] := a[63:0] +IF (imm8[1] == 1) tmp_dst[127:64] := a[127:64] +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Shuffle double-precision (64-bit) floating-point elements in "a" using the control in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +IF (b[1] == 0) tmp_dst[63:0] := a[63:0] +IF (b[1] == 1) tmp_dst[63:0] := a[127:64] +IF (b[65] == 0) tmp_dst[127:64] := a[63:0] +IF (b[65] == 1) tmp_dst[127:64] := a[127:64] +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} + +tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +tmp_dst[95:64] := SELECT4(a[127:0], imm8[5:4]) +tmp_dst[127:96] := SELECT4(a[127:0], imm8[7:6]) +tmp_dst[159:128] := SELECT4(a[255:128], imm8[1:0]) +tmp_dst[191:160] := SELECT4(a[255:128], imm8[3:2]) +tmp_dst[223:192] := SELECT4(a[255:128], imm8[5:4]) +tmp_dst[255:224] := SELECT4(a[255:128], imm8[7:6]) +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} + +tmp_dst[31:0] := SELECT4(a[127:0], b[1:0]) +tmp_dst[63:32] := SELECT4(a[127:0], b[33:32]) +tmp_dst[95:64] := SELECT4(a[127:0], b[65:64]) +tmp_dst[127:96] := SELECT4(a[127:0], b[97:96]) +tmp_dst[159:128] := SELECT4(a[255:128], b[129:128]) +tmp_dst[191:160] := SELECT4(a[255:128], b[161:160]) +tmp_dst[223:192] := SELECT4(a[255:128], b[193:192]) +tmp_dst[255:224] := SELECT4(a[255:128], b[225:224]) +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} + +tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +tmp_dst[95:64] := SELECT4(a[127:0], imm8[5:4]) +tmp_dst[127:96] := SELECT4(a[127:0], imm8[7:6]) +tmp_dst[159:128] := SELECT4(a[255:128], imm8[1:0]) +tmp_dst[191:160] := SELECT4(a[255:128], imm8[3:2]) +tmp_dst[223:192] := SELECT4(a[255:128], imm8[5:4]) +tmp_dst[255:224] := SELECT4(a[255:128], imm8[7:6]) +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} + +tmp_dst[31:0] := SELECT4(a[127:0], b[1:0]) +tmp_dst[63:32] := SELECT4(a[127:0], b[33:32]) +tmp_dst[95:64] := SELECT4(a[127:0], b[65:64]) +tmp_dst[127:96] := SELECT4(a[127:0], b[97:96]) +tmp_dst[159:128] := SELECT4(a[255:128], b[129:128]) +tmp_dst[191:160] := SELECT4(a[255:128], b[161:160]) +tmp_dst[223:192] := SELECT4(a[255:128], b[193:192]) +tmp_dst[255:224] := SELECT4(a[255:128], b[225:224]) +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} + +tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +tmp_dst[95:64] := SELECT4(a[127:0], imm8[5:4]) +tmp_dst[127:96] := SELECT4(a[127:0], imm8[7:6]) +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" using the control in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} + +tmp_dst[31:0] := SELECT4(a[127:0], b[1:0]) +tmp_dst[63:32] := SELECT4(a[127:0], b[33:32]) +tmp_dst[95:64] := SELECT4(a[127:0], b[65:64]) +tmp_dst[127:96] := SELECT4(a[127:0], b[97:96]) +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Shuffle single-precision (32-bit) floating-point elements in "a" using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} + +tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +tmp_dst[95:64] := SELECT4(a[127:0], imm8[5:4]) +tmp_dst[127:96] := SELECT4(a[127:0], imm8[7:6]) +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Shuffle single-precision (32-bit) floating-point elements in "a" using the control in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} + +tmp_dst[31:0] := SELECT4(a[127:0], b[1:0]) +tmp_dst[63:32] := SELECT4(a[127:0], b[33:32]) +tmp_dst[95:64] := SELECT4(a[127:0], b[65:64]) +tmp_dst[127:96] := SELECT4(a[127:0], b[97:96]) +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" across lanes using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[63:0] := src[63:0] + 1: tmp[63:0] := src[127:64] + 2: tmp[63:0] := src[191:128] + 3: tmp[63:0] := src[255:192] + ESAC + RETURN tmp[63:0] +} + +tmp_dst[63:0] := SELECT4(a[255:0], imm8[1:0]) +tmp_dst[127:64] := SELECT4(a[255:0], imm8[3:2]) +tmp_dst[191:128] := SELECT4(a[255:0], imm8[5:4]) +tmp_dst[255:192] := SELECT4(a[255:0], imm8[7:6]) +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + id := idx[i+1:i]*64 + IF k[j] + dst[i+63:i] := a[id+63:id] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Shuffle double-precision (64-bit) floating-point elements in "a" across lanes using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[63:0] := src[63:0] + 1: tmp[63:0] := src[127:64] + 2: tmp[63:0] := src[191:128] + 3: tmp[63:0] := src[255:192] + ESAC + RETURN tmp[63:0] +} + +tmp_dst[63:0] := SELECT4(a[255:0], imm8[1:0]) +tmp_dst[127:64] := SELECT4(a[255:0], imm8[3:2]) +tmp_dst[191:128] := SELECT4(a[255:0], imm8[5:4]) +tmp_dst[255:192] := SELECT4(a[255:0], imm8[7:6]) +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Shuffle double-precision (64-bit) floating-point elements in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + id := idx[i+1:i]*64 + IF k[j] + dst[i+63:i] := a[id+63:id] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + Shuffle double-precision (64-bit) floating-point elements in "a" across lanes using the control in "imm8", and store the results in "dst". + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[63:0] := src[63:0] + 1: tmp[63:0] := src[127:64] + 2: tmp[63:0] := src[191:128] + 3: tmp[63:0] := src[255:192] + ESAC + RETURN tmp[63:0] +} + +dst[63:0] := SELECT4(a[255:0], imm8[1:0]) +dst[127:64] := SELECT4(a[255:0], imm8[3:2]) +dst[191:128] := SELECT4(a[255:0], imm8[5:4]) +dst[255:192] := SELECT4(a[255:0], imm8[7:6]) +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + Shuffle double-precision (64-bit) floating-point elements in "a" across lanes using the corresponding index in "idx", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + id := idx[i+1:i]*64 + dst[i+63:i] := a[id+63:id] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + id := idx[i+2:i]*32 + IF k[j] + dst[i+31:i] := a[id+31:id] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Shuffle single-precision (32-bit) floating-point elements in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + id := idx[i+2:i]*32 + IF k[j] + dst[i+31:i] := a[id+31:id] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + Shuffle single-precision (32-bit) floating-point elements in "a" across lanes using the corresponding index in "idx". + +FOR j := 0 to 7 + i := j*32 + id := idx[i+2:i]*32 + dst[i+31:i] := a[id+31:id] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + + Shuffle 64-bit integers in "a" across lanes lanes using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[63:0] := src[63:0] + 1: tmp[63:0] := src[127:64] + 2: tmp[63:0] := src[191:128] + 3: tmp[63:0] := src[255:192] + ESAC + RETURN tmp[63:0] +} + +tmp_dst[63:0] := SELECT4(a[255:0], imm8[1:0]) +tmp_dst[127:64] := SELECT4(a[255:0], imm8[3:2]) +tmp_dst[191:128] := SELECT4(a[255:0], imm8[5:4]) +tmp_dst[255:192] := SELECT4(a[255:0], imm8[7:6]) +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + + Shuffle 64-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + id := idx[i+1:i]*64 + IF k[j] + dst[i+63:i] := a[id+63:id] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + Shuffle 64-bit integers in "a" across lanes using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[63:0] := src[63:0] + 1: tmp[63:0] := src[127:64] + 2: tmp[63:0] := src[191:128] + 3: tmp[63:0] := src[255:192] + ESAC + RETURN tmp[63:0] +} + +tmp_dst[63:0] := SELECT4(a[255:0], imm8[1:0]) +tmp_dst[127:64] := SELECT4(a[255:0], imm8[3:2]) +tmp_dst[191:128] := SELECT4(a[255:0], imm8[5:4]) +tmp_dst[255:192] := SELECT4(a[255:0], imm8[7:6]) +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + Shuffle 64-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + id := idx[i+1:i]*64 + IF k[j] + dst[i+63:i] := a[id+63:id] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + Shuffle 64-bit integers in "a" across lanes using the control in "imm8", and store the results in "dst". + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[63:0] := src[63:0] + 1: tmp[63:0] := src[127:64] + 2: tmp[63:0] := src[191:128] + 3: tmp[63:0] := src[255:192] + ESAC + RETURN tmp[63:0] +} + +dst[63:0] := SELECT4(a[255:0], imm8[1:0]) +dst[127:64] := SELECT4(a[255:0], imm8[3:2]) +dst[191:128] := SELECT4(a[255:0], imm8[5:4]) +dst[255:192] := SELECT4(a[255:0], imm8[7:6]) +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + Shuffle 64-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + id := idx[i+1:i]*64 + dst[i+63:i] := a[id+63:id] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + + Shuffle 16-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + id := idx[i+3:i]*16 + IF k[j] + dst[i+15:i] := a[id+15:id] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + Shuffle 16-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + id := idx[i+3:i]*16 + IF k[j] + dst[i+15:i] := a[id+15:id] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + Shuffle 16-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst". + +FOR j := 0 to 15 + i := j*16 + id := idx[i+3:i]*16 + dst[i+15:i] := a[id+15:id] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Miscellaneous + + + + + Shuffle 16-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + id := idx[i+4:i]*16 + IF k[j] + dst[i+15:i] := a[id+15:id] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Miscellaneous + + + + Shuffle 16-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + id := idx[i+4:i]*16 + IF k[j] + dst[i+15:i] := a[id+15:id] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Miscellaneous + + + Shuffle 16-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst". + +FOR j := 0 to 31 + i := j*16 + id := idx[i+4:i]*16 + dst[i+15:i] := a[id+15:id] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + + Shuffle 16-bit integers in "a" using the corresponding index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + id := idx[i+2:i]*16 + IF k[j] + dst[i+15:i] := a[id+15:id] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + Shuffle 16-bit integers in "a" using the corresponding index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + id := idx[i+2:i]*16 + IF k[j] + dst[i+15:i] := a[id+15:id] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + Shuffle 16-bit integers in "a" using the corresponding index in "idx", and store the results in "dst". + +FOR j := 0 to 7 + i := j*16 + id := idx[i+2:i]*16 + dst[i+15:i] := a[id+15:id] +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + Load contiguous active 32-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[m+31:m] + m := m + 32 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Load + + + + Load contiguous active 32-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+m+31:mem_addr+m] + m := m + 32 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + Load contiguous active 32-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[m+31:m] + m := m + 32 + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Load + + + Load contiguous active 32-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+m+31:mem_addr+m] + m := m + 32 + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + Load contiguous active 32-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[m+31:m] + m := m + 32 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Load + + + + Load contiguous active 32-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+m+31:mem_addr+m] + m := m + 32 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + Load contiguous active 32-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[m+31:m] + m := m + 32 + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Load + + + Load contiguous active 32-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+m+31:mem_addr+m] + m := m + 32 + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + Load contiguous active 64-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[m+63:m] + m := m + 64 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Load + + + + Load contiguous active 64-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+m+63:mem_addr+m] + m := m + 64 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + Load contiguous active 64-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[m+63:m] + m := m + 64 + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Load + + + Load contiguous active 64-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+m+63:mem_addr+m] + m := m + 64 + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + Load contiguous active 64-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[m+63:m] + m := m + 64 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Load + + + + Load contiguous active 64-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+m+63:mem_addr+m] + m := m + 64 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + Load contiguous active 64-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[m+63:m] + m := m + 64 + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Load + + + Load contiguous active 64-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+m+63:mem_addr+m] + m := m + 64 + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Load + + + + + + + Gather 32-bit integers from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[base_addr + SignExtend(vindex[i+31:i])*scale] + k[j] := 0 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +k[MAX:8] := 0 +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Load + + + + + + + Gather 32-bit integers from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[base_addr + SignExtend(vindex[i+31:i])*scale] + k[j] := 0 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +k[MAX:4] := 0 +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Load + + + + + + Gather 64-bit integers from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 3 + i := j*64 + m := j*32 + IF k[j] + dst[i+63:i] := MEM[base_addr + SignExtend(vindex[m+31:m])*scale] + k[j] := 0 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +k[MAX:4] := 0 +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Load + + + + + + Gather 64-bit integers from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 1 + i := j*64 + m := j*32 + IF k[j] + dst[i+63:i] := MEM[base_addr + SignExtend(vindex[m+31:m])*scale] + k[j] := 0 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +k[MAX:2] := 0 +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Load + + + + + + Gather 32-bit integers from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 3 + i := j*32 + m := j*64 + IF k[j] + dst[i+31:i] := MEM[base_addr + SignExtend(vindex[i+63:i])*scale] + k[j] := 0 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +k[MAX:4] := 0 +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Load + + + + + + Gather 32-bit integers from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 1 + i := j*32 + m := j*64 + IF k[j] + dst[i+31:i] := MEM[base_addr + SignExtend(vindex[i+63:i])*scale] + k[j] := 0 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +k[MAX:2] := 0 +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Load + + + + + + Gather 64-bit integers from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[base_addr + SignExtend(vindex[i+63:i])*scale] + k[j] := 0 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +k[MAX:4] := 0 +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Load + + + + + + Gather 64-bit integers from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[base_addr + SignExtend(vindex[i+63:i])*scale] + k[j] := 0 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +k[MAX:2] := 0 +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512CD + Bit Manipulation + + Counts the number of leading zero bits in each packed 32-bit integer in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + tmp := 31 + dst[i+31:i] := 0 + DO WHILE (tmp >= 0 AND a[i+tmp] == 0) + tmp := tmp - 1 + dst[i+31:i] := dst[i+31:i] + 1 + OD +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512CD + Bit Manipulation + + + + Counts the number of leading zero bits in each packed 32-bit integer in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + tmp := 31 + dst[i+31:i] := 0 + DO WHILE (tmp >= 0 AND a[i+tmp] == 0) + tmp := tmp - 1 + dst[i+31:i] := dst[i+31:i] + 1 + OD + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512CD + Bit Manipulation + + + Counts the number of leading zero bits in each packed 32-bit integer in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + tmp := 31 + dst[i+31:i] := 0 + DO WHILE (tmp >= 0 AND a[i+tmp] == 0) + tmp := tmp - 1 + dst[i+31:i] := dst[i+31:i] + 1 + OD + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512CD + Bit Manipulation + + Counts the number of leading zero bits in each packed 32-bit integer in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + tmp := 31 + dst[i+31:i] := 0 + DO WHILE (tmp >= 0 AND a[i+tmp] == 0) + tmp := tmp - 1 + dst[i+31:i] := dst[i+31:i] + 1 + OD +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512CD + Bit Manipulation + + + + Counts the number of leading zero bits in each packed 32-bit integer in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + tmp := 31 + dst[i+31:i] := 0 + DO WHILE (tmp >= 0 AND a[i+tmp] == 0) + tmp := tmp - 1 + dst[i+31:i] := dst[i+31:i] + 1 + OD + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512CD + Bit Manipulation + + + Counts the number of leading zero bits in each packed 32-bit integer in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + tmp := 31 + dst[i+31:i] := 0 + DO WHILE (tmp >= 0 AND a[i+tmp] == 0) + tmp := tmp - 1 + dst[i+31:i] := dst[i+31:i] + 1 + OD + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512CD + Bit Manipulation + + Counts the number of leading zero bits in each packed 64-bit integer in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + tmp := 63 + dst[i+63:i] := 0 + DO WHILE (tmp >= 0 AND a[i+tmp] == 0) + tmp := tmp - 1 + dst[i+63:i] := dst[i+63:i] + 1 + OD +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512CD + Bit Manipulation + + + + Counts the number of leading zero bits in each packed 64-bit integer in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + tmp := 63 + dst[i+63:i] := 0 + DO WHILE (tmp >= 0 AND a[i+tmp] == 0) + tmp := tmp - 1 + dst[i+63:i] := dst[i+63:i] + 1 + OD + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512CD + Bit Manipulation + + + Counts the number of leading zero bits in each packed 64-bit integer in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + tmp := 63 + dst[i+63:i] := 0 + DO WHILE (tmp >= 0 AND a[i+tmp] == 0) + tmp := tmp - 1 + dst[i+63:i] := dst[i+63:i] + 1 + OD + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512CD + Bit Manipulation + + Counts the number of leading zero bits in each packed 64-bit integer in "a", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + tmp := 63 + dst[i+63:i] := 0 + DO WHILE (tmp >= 0 AND a[i+tmp] == 0) + tmp := tmp - 1 + dst[i+63:i] := dst[i+63:i] + 1 + OD +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512CD + Bit Manipulation + + + + Counts the number of leading zero bits in each packed 64-bit integer in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + tmp := 63 + dst[i+63:i] := 0 + DO WHILE (tmp >= 0 AND a[i+tmp] == 0) + tmp := tmp - 1 + dst[i+63:i] := dst[i+63:i] + 1 + OD + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512CD + Bit Manipulation + + + Counts the number of leading zero bits in each packed 64-bit integer in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + tmp := 63 + dst[i+63:i] := 0 + DO WHILE (tmp >= 0 AND a[i+tmp] == 0) + tmp := tmp - 1 + dst[i+63:i] := dst[i+63:i] + 1 + OD + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Multiply packed unsigned 8-bit integers in "a" by packed signed 8-bit integers in "b", producing intermediate signed 16-bit integers. Horizontally add adjacent pairs of intermediate signed 16-bit integers, and pack the saturated results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := Saturate_To_Int16( a[i+15:i+8]*b[i+15:i+8] + a[i+7:i]*b[i+7:i] ) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Multiply packed unsigned 8-bit integers in "a" by packed signed 8-bit integers in "b", producing intermediate signed 16-bit integers. Horizontally add adjacent pairs of intermediate signed 16-bit integers, and pack the saturated results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := Saturate_To_Int16( a[i+15:i+8]*b[i+15:i+8] + a[i+7:i]*b[i+7:i] ) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + Vertically multiply each unsigned 8-bit integer from "a" with the corresponding signed 8-bit integer from "b", producing intermediate signed 16-bit integers. Horizontally add adjacent pairs of intermediate signed 16-bit integers, and pack the saturated results in "dst". + + +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := Saturate_To_Int16( a[i+15:i+8]*b[i+15:i+8] + a[i+7:i]*b[i+7:i] ) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + + Multiply packed unsigned 8-bit integers in "a" by packed signed 8-bit integers in "b", producing intermediate signed 16-bit integers. Horizontally add adjacent pairs of intermediate signed 16-bit integers, and pack the saturated results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := Saturate_To_Int16( a[i+15:i+8]*b[i+15:i+8] + a[i+7:i]*b[i+7:i] ) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + Multiply packed unsigned 8-bit integers in "a" by packed signed 8-bit integers in "b", producing intermediate signed 16-bit integers. Horizontally add adjacent pairs of intermediate signed 16-bit integers, and pack the saturated results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := Saturate_To_Int16( a[i+15:i+8]*b[i+15:i+8] + a[i+7:i]*b[i+7:i] ) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Multiply packed unsigned 8-bit integers in "a" by packed signed 8-bit integers in "b", producing intermediate signed 16-bit integers. Horizontally add adjacent pairs of intermediate signed 16-bit integers, and pack the saturated results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := Saturate_To_Int16( a[i+15:i+8]*b[i+15:i+8] + a[i+7:i]*b[i+7:i] ) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Multiply packed unsigned 8-bit integers in "a" by packed signed 8-bit integers in "b", producing intermediate signed 16-bit integers. Horizontally add adjacent pairs of intermediate signed 16-bit integers, and pack the saturated results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := Saturate_To_Int16( a[i+15:i+8]*b[i+15:i+8] + a[i+7:i]*b[i+7:i] ) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Multiply packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, and pack the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i+16]*b[i+31:i+16] + a[i+15:i]*b[i+15:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Multiply packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, and pack the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i+16]*b[i+31:i+16] + a[i+15:i]*b[i+15:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + Multiply packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, and pack the results in "dst". + + +FOR j := 0 to 15 + i := j*32 + st[i+31:i] := a[i+31:i+16]*b[i+31:i+16] + a[i+15:i]*b[i+15:i] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + + Multiply packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, and pack the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i+16]*b[i+31:i+16] + a[i+15:i]*b[i+15:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + Multiply packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, and pack the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i+16]*b[i+31:i+16] + a[i+15:i]*b[i+15:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Multiply packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, and pack the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i+16]*b[i+31:i+16] + a[i+15:i]*b[i+15:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Multiply packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, and pack the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i+16]*b[i+31:i+16] + a[i+15:i]*b[i+15:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Compare packed 8-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + IF a[i+7:i] > b[i+7:i] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := b[i+7:i] + FI + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Compare packed 8-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + IF a[i+7:i] > b[i+7:i] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := b[i+7:i] + FI + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + + Compare packed 8-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + IF a[i+7:i] > b[i+7:i] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := b[i+7:i] + FI + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + Compare packed 8-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + IF a[i+7:i] > b[i+7:i] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := b[i+7:i] + FI + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + Compare packed 8-bit integers in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 63 + i := j*8 + IF a[i+7:i] > b[i+7:i] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := b[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Compare packed 8-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + IF a[i+7:i] > b[i+7:i] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := b[i+7:i] + FI + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Compare packed 8-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + IF a[i+7:i] > b[i+7:i] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := b[i+7:i] + FI + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + + Compare packed 32-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + IF a[i+31:i] > b[i+31:i] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := b[i+31:i] + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + Compare packed 32-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 7 + i := j*32 + IF k[j] + IF a[i+31:i] > b[i+31:i] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := b[i+31:i] + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + + Compare packed 32-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + IF a[i+31:i] > b[i+31:i] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := b[i+31:i] + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + Compare packed 32-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 3 + i := j*32 + IF k[j] + IF a[i+31:i] > b[i+31:i] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := b[i+31:i] + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + + Compare packed 64-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + IF a[i+63:i] > b[i+63:i] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := b[i+63:i] + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + Compare packed 64-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + IF a[i+63:i] > b[i+63:i] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := b[i+63:i] + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + Compare packed 64-bit integers in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 3 + i := j*64 + IF a[i+63:i] > b[i+63:i] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := b[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + + Compare packed 64-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + IF a[i+63:i] > b[i+63:i] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := b[i+63:i] + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + Compare packed 64-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + IF a[i+63:i] > b[i+63:i] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := b[i+63:i] + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + Compare packed 64-bit integers in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 1 + i := j*64 + IF a[i+63:i] > b[i+63:i] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := b[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Compare packed 16-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + IF a[i+15:i] > b[i+15:i] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := b[i+15:i] + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Compare packed 16-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 15 + i := j*16 + IF k[j] + IF a[i+15:i] > b[i+15:i] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := b[i+15:i] + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + + Compare packed 16-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + IF a[i+15:i] > b[i+15:i] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := b[i+15:i] + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + Compare packed 16-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 31 + i := j*16 + IF k[j] + IF a[i+15:i] > b[i+15:i] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := b[i+15:i] + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + Compare packed 16-bit integers in "a" and "b", and store packed maximum values in "dst". + + +FOR j := 0 to 31 + i := j*16 + IF a[i+15:i] > b[i+15:i] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := b[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Compare packed 16-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + IF a[i+15:i] > b[i+15:i] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := b[i+15:i] + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Compare packed 16-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 7 + i := j*16 + IF k[j] + IF a[i+15:i] > b[i+15:i] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := b[i+15:i] + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Compare packed unsigned 8-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + IF a[i+7:i] > b[i+7:i] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := b[i+7:i] + FI + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Compare packed unsigned 8-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 31 + i := j*8 + IF k[j] + IF a[i+7:i] > b[i+7:i] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := b[i+7:i] + FI + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + + Compare packed unsigned 8-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + IF a[i+7:i] > b[i+7:i] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := b[i+7:i] + FI + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + Compare packed unsigned 8-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 63 + i := j*8 + IF k[j] + IF a[i+7:i] > b[i+7:i] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := b[i+7:i] + FI + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + Compare packed unsigned 8-bit integers in "a" and "b", and store packed maximum values in "dst". + + +FOR j := 0 to 63 + i := j*8 + IF a[i+7:i] > b[i+7:i] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := b[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Compare packed unsigned 8-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + IF a[i+7:i] > b[i+7:i] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := b[i+7:i] + FI + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Compare packed unsigned 8-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 15 + i := j*8 + IF k[j] + IF a[i+7:i] > b[i+7:i] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := b[i+7:i] + FI + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + + Compare packed unsigned 32-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + IF a[i+31:i] > b[i+31:i] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := b[i+31:i] + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + Compare packed unsigned 32-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 7 + i := j*32 + IF k[j] + IF a[i+31:i] > b[i+31:i] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := b[i+31:i] + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + + Compare packed unsigned 32-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + IF a[i+31:i] > b[i+31:i] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := b[i+31:i] + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + Compare packed unsigned 32-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 3 + i := j*32 + IF k[j] + IF a[i+31:i] > b[i+31:i] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := b[i+31:i] + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + + Compare packed unsigned 64-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + IF a[i+63:i] > b[i+63:i] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := b[i+63:i] + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + Compare packed unsigned 64-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 3 + i := j*64 + IF k[j] + IF a[i+63:i] > b[i+63:i] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := b[i+63:i] + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + Compare packed unsigned 64-bit integers in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 3 + i := j*64 + IF a[i+63:i] > b[i+63:i] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := b[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + + Compare packed unsigned 64-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + IF a[i+63:i] > b[i+63:i] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := b[i+63:i] + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + Compare packed unsigned 64-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 1 + i := j*64 + IF k[j] + IF a[i+63:i] > b[i+63:i] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := b[i+63:i] + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + Compare packed unsigned 64-bit integers in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 1 + i := j*64 + IF a[i+63:i] > b[i+63:i] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := b[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Compare packed unsigned 16-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + IF a[i+15:i] > b[i+15:i] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := b[i+15:i] + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Compare packed unsigned 16-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 15 + i := j*16 + IF k[j] + IF a[i+15:i] > b[i+15:i] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := b[i+15:i] + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + + Compare packed unsigned 16-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + IF a[i+15:i] > b[i+15:i] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := b[i+15:i] + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + Compare packed unsigned 16-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 31 + i := j*16 + IF k[j] + IF a[i+15:i] > b[i+15:i] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := b[i+15:i] + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + Compare packed unsigned 16-bit integers in "a" and "b", and store packed maximum values in "dst". + + +FOR j := 0 to 31 + i := j*16 + IF a[i+15:i] > b[i+15:i] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := b[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Compare packed unsigned 16-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + IF a[i+15:i] > b[i+15:i] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := b[i+15:i] + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Compare packed unsigned 16-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 7 + i := j*16 + IF k[j] + IF a[i+15:i] > b[i+15:i] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := b[i+15:i] + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Compare packed 8-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + IF a[i+7:i] < b[i+7:i] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := b[i+7:i] + FI + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Compare packed 8-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 31 + i := j*8 + IF k[j] + IF a[i+7:i] < b[i+7:i] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := b[i+7:i] + FI + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + + Compare packed 8-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + IF a[i+7:i] < b[i+7:i] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := b[i+7:i] + FI + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + Compare packed 8-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 63 + i := j*8 + IF k[j] + IF a[i+7:i] < b[i+7:i] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := b[i+7:i] + FI + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + Compare packed 8-bit integers in "a" and "b", and store packed minimum values in "dst". + + +FOR j := 0 to 63 + i := j*8 + IF a[i+7:i] < b[i+7:i] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := b[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Compare packed 8-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + IF a[i+7:i] < b[i+7:i] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := b[i+7:i] + FI + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Compare packed 8-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 15 + i := j*8 + IF k[j] + IF a[i+7:i] < b[i+7:i] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := b[i+7:i] + FI + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + + Compare packed 32-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + IF a[i+31:i] < b[i+31:i] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := b[i+31:i] + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + Compare packed 32-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 7 + i := j*32 + IF k[j] + IF a[i+31:i] < b[i+31:i] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := b[i+31:i] + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + + Compare packed 32-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + IF a[i+31:i] < b[i+31:i] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := b[i+31:i] + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + Compare packed 32-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 3 + i := j*32 + IF k[j] + IF a[i+31:i] < b[i+31:i] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := b[i+31:i] + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + + Compare packed 64-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + IF a[i+63:i] < b[i+63:i] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := b[i+63:i] + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + Compare packed 64-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 3 + i := j*64 + IF k[j] + IF a[i+63:i] < b[i+63:i] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := b[i+63:i] + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + Compare packed 64-bit integers in "a" and "b", and store packed minimum values in "dst". + +FOR j := 0 to 3 + i := j*64 + IF a[i+63:i] < b[i+63:i] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := b[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + + Compare packed 64-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + IF a[i+63:i] < b[i+63:i] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := b[i+63:i] + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + Compare packed 64-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 1 + i := j*64 + IF k[j] + IF a[i+63:i] < b[i+63:i] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := b[i+63:i] + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + Compare packed 64-bit integers in "a" and "b", and store packed minimum values in "dst". + +FOR j := 0 to 1 + i := j*64 + IF a[i+63:i] < b[i+63:i] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := b[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Compare packed 16-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + IF a[i+15:i] < b[i+15:i] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := b[i+15:i] + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Compare packed 16-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 15 + i := j*16 + IF k[j] + IF a[i+15:i] < b[i+15:i] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := b[i+15:i] + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + + Compare packed 16-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + IF a[i+15:i] < b[i+15:i] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := b[i+15:i] + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + Compare packed 16-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 31 + i := j*16 + IF k[j] + IF a[i+15:i] < b[i+15:i] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := b[i+15:i] + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + Compare packed 16-bit integers in "a" and "b", and store packed minimum values in "dst". + + +FOR j := 0 to 31 + i := j*16 + IF a[i+15:i] < b[i+15:i] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := b[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Compare packed 16-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + IF a[i+15:i] < b[i+15:i] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := b[i+15:i] + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Compare packed 16-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 7 + i := j*16 + IF k[j] + IF a[i+15:i] < b[i+15:i] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := b[i+15:i] + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Compare packed unsigned 8-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + IF a[i+7:i] < b[i+7:i] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := b[i+7:i] + FI + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Compare packed unsigned 8-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 31 + i := j*8 + IF k[j] + IF a[i+7:i] < b[i+7:i] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := b[i+7:i] + FI + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + + Compare packed unsigned 8-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + IF a[i+7:i] < b[i+7:i] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := b[i+7:i] + FI + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + Compare packed unsigned 8-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 63 + i := j*8 + IF k[j] + IF a[i+7:i] < b[i+7:i] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := b[i+7:i] + FI + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + Compare packed unsigned 8-bit integers in "a" and "b", and store packed minimum values in "dst". + + +FOR j := 0 to 63 + i := j*8 + IF a[i+7:i] < b[i+7:i] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := b[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Compare packed unsigned 8-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + IF a[i+7:i] < b[i+7:i] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := b[i+7:i] + FI + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Compare packed unsigned 8-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 15 + i := j*8 + IF k[j] + IF a[i+7:i] < b[i+7:i] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := b[i+7:i] + FI + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + + Compare packed unsigned 32-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + IF a[i+31:i] < b[i+31:i] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := b[i+31:i] + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + Compare packed unsigned 32-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 7 + i := j*32 + IF k[j] + IF a[i+31:i] < b[i+31:i] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := b[i+31:i] + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + + Compare packed unsigned 32-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + IF a[i+31:i] < b[i+31:i] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := b[i+31:i] + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + Compare packed unsigned 32-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 3 + i := j*32 + IF k[j] + IF a[i+31:i] < b[i+31:i] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := b[i+31:i] + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + + Compare packed unsigned 64-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + IF a[i+63:i] < b[i+63:i] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := b[i+63:i] + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + Compare packed unsigned 64-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 3 + i := j*64 + IF k[j] + IF a[i+63:i] < b[i+63:i] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := b[i+63:i] + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + Compare packed unsigned 64-bit integers in "a" and "b", and store packed minimum values in "dst". + +FOR j := 0 to 3 + i := j*64 + IF a[i+63:i] < b[i+63:i] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := b[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + + Compare packed unsigned 64-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + IF a[i+63:i] < b[i+63:i] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := b[i+63:i] + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + Compare packed unsigned 64-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 1 + i := j*64 + IF k[j] + IF a[i+63:i] < b[i+63:i] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := b[i+63:i] + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + Compare packed unsigned 64-bit integers in "a" and "b", and store packed minimum values in "dst". + +FOR j := 0 to 1 + i := j*64 + IF a[i+63:i] < b[i+63:i] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := b[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Compare packed unsigned 16-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + IF a[i+15:i] < b[i+15:i] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := b[i+15:i] + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Compare packed unsigned 16-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 15 + i := j*16 + IF k[j] + IF a[i+15:i] < b[i+15:i] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := b[i+15:i] + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + + Compare packed unsigned 16-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + IF a[i+15:i] < b[i+15:i] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := b[i+15:i] + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + Compare packed unsigned 16-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 31 + i := j*16 + IF k[j] + IF a[i+15:i] < b[i+15:i] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := b[i+15:i] + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + Compare packed unsigned 16-bit integers in "a" and "b", and store packed minimum values in "dst". + + +FOR j := 0 to 31 + i := j*16 + IF a[i+15:i] < b[i+15:i] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := b[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Compare packed unsigned 16-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + IF a[i+15:i] < b[i+15:i] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := b[i+15:i] + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Compare packed unsigned 16-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 7 + i := j*16 + IF k[j] + IF a[i+15:i] < b[i+15:i] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := b[i+15:i] + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Miscellaneous + + Set each bit of mask register "k" based on the most significant bit of the corresponding packed 8-bit integer in "a". + +FOR j := 0 to 31 + i := j*8 + IF a[i+7] + k[j] := 1 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Miscellaneous + + Set each bit of mask register "k" based on the most significant bit of the corresponding packed 8-bit integer in "a". + +FOR j := 0 to 63 + i := j*8 + IF a[i+7] + k[j] := 1 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Miscellaneous + + Set each bit of mask register "k" based on the most significant bit of the corresponding packed 8-bit integer in "a". + +FOR j := 0 to 15 + i := j*8 + IF a[i+7] + k[j] := 1 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512DQ + Miscellaneous + + Set each bit of mask register "k" based on the most significant bit of the corresponding packed 32-bit integer in "a". + +FOR j := 0 to 7 + i := j*32 + IF a[i+31] + k[j] := 1 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512DQ + Miscellaneous + + Set each bit of mask register "k" based on the most significant bit of the corresponding packed 32-bit integer in "a". + +FOR j := 0 to 15 + i := j*32 + IF a[i+31] + k[j] := 1 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512DQ + Miscellaneous + + Set each bit of mask register "k" based on the most significant bit of the corresponding packed 32-bit integer in "a". + +FOR j := 0 to 3 + i := j*32 + IF a[i+31] + k[j] := 1 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + Convert packed 32-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 7 + i := 32*j + k := 8*j + dst[k+7:k] := Truncate_Int32_To_Int8(a[i+31:i]) +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed 32-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 8*j + IF k[j] + dst[l+7:l] := Truncate_Int32_To_Int8(a[i+31:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + Store + + + + Convert packed 32-bit integers in "a" to packed 8-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 7 + i := 32*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := Truncate_Int32_To_Int8(a[i+31:i]) + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + Convert packed 32-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 8*j + IF k[j] + dst[l+7:l] := Truncate_Int32_To_Int8(a[i+31:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + Convert packed 32-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 3 + i := 32*j + k := 8*j + dst[k+7:k] := Truncate_Int32_To_Int8(a[i+31:i]) +ENDFOR +dst[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed 32-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 8*j + IF k[j] + dst[l+7:l] := Truncate_Int32_To_Int8(a[i+31:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + Store + + + + Convert packed 32-bit integers in "a" to packed 8-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 3 + i := 32*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := Truncate_Int32_To_Int8(a[i+31:i]) + FI +ENDFOR +dst[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + Convert packed 32-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 8*j + IF k[j] + dst[l+7:l] := Truncate_Int32_To_Int8(a[i+31:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + Convert packed 32-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 7 + i := 32*j + k := 16*j + dst[k+15:k] := Truncate_Int32_To_Int16(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed 32-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 16*j + IF k[j] + dst[l+15:l] := Truncate_Int32_To_Int16(a[i+31:i]) + ELSE + dst[l+15:l] := src[l+15:l] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + Store + + + + Convert packed 32-bit integers in "a" to packed 16-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 7 + i := 32*j + l := 16*j + IF k[j] + MEM[base_addr+l+15:base_addr+l] := Truncate_Int32_To_Int16(a[i+31:i]) + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + Convert packed 32-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 16*j + IF k[j] + dst[l+15:l] := Truncate_Int32_To_Int16(a[i+31:i]) + ELSE + dst[l+15:l] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + Convert packed 32-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 3 + i := 32*j + k := 16*j + dst[k+15:k] := Truncate_Int32_To_Int16(a[i+31:i]) +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed 32-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 16*j + IF k[j] + dst[l+15:l] := Truncate_Int32_To_Int16(a[i+31:i]) + ELSE + dst[l+15:l] := src[l+15:l] + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + Store + + + + Convert packed 32-bit integers in "a" to packed 16-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 3 + i := 32*j + l := 16*j + IF k[j] + MEM[base_addr+l+15:base_addr+l] := Truncate_Int32_To_Int16(a[i+31:i]) + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + Convert packed 32-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 16*j + IF k[j] + dst[l+15:l] := Truncate_Int32_To_Int16(a[i+31:i]) + ELSE + dst[l+15:l] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + Set each packed 8-bit integer in "dst" to all ones or all zeros based on the value of the corresponding bit in "k". + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := 0xFF + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Miscellaneous + + Set each packed 8-bit integer in "dst" to all ones or all zeros based on the value of the corresponding bit in "k". + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := 0xFF + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + AVX512VL + Miscellaneous + + Set each packed 8-bit integer in "dst" to all ones or all zeros based on the value of the corresponding bit in "k". + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := 0xFF + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512DQ + Miscellaneous + + Set each packed 32-bit integer in "dst" to all ones or all zeros based on the value of the corresponding bit in "k". + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := 0xFFFFFFFF + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512DQ + Miscellaneous + + Set each packed 32-bit integer in "dst" to all ones or all zeros based on the value of the corresponding bit in "k". + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := 0xFFFFFFFF + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512DQ + Miscellaneous + + Set each packed 32-bit integer in "dst" to all ones or all zeros based on the value of the corresponding bit in "k". + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := 0xFFFFFFFF + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512DQ + Miscellaneous + + Set each packed 64-bit integer in "dst" to all ones or all zeros based on the value of the corresponding bit in "k". + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := 0xFFFFFFFFffffffff + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512DQ + Miscellaneous + + Set each packed 64-bit integer in "dst" to all ones or all zeros based on the value of the corresponding bit in "k". + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := 0xFFFFFFFFffffffff + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512DQ + Miscellaneous + + Set each packed 64-bit integer in "dst" to all ones or all zeros based on the value of the corresponding bit in "k". + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := 0xFFFFFFFFffffffff + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + Set each packed 16-bit integer in "dst" to all ones or all zeros based on the value of the corresponding bit in "k". + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := 0xFFFF + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Miscellaneous + + Set each packed 16-bit integer in "dst" to all ones or all zeros based on the value of the corresponding bit in "k". + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := 0xFFFF + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + Set each packed 16-bit integer in "dst" to all ones or all zeros based on the value of the corresponding bit in "k". + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := 0xFFFF + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512DQ + Miscellaneous + + Set each bit of mask register "k" based on the most significant bit of the corresponding packed 64-bit integer in "a". + +FOR j := 0 to 3 + i := j*64 + IF a[i+63] + k[j] := 1 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512DQ + Miscellaneous + + Set each bit of mask register "k" based on the most significant bit of the corresponding packed 64-bit integer in "a". + +FOR j := 0 to 7 + i := j*64 + IF a[i+63] + k[j] := 1 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512DQ + Miscellaneous + + Set each bit of mask register "k" based on the most significant bit of the corresponding packed 64-bit integer in "a". + +FOR j := 0 to 1 + i := j*64 + IF a[i+63] + k[j] := 1 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:2] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + Convert packed 64-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 3 + i := 64*j + k := 8*j + dst[k+7:k] := Truncate_Int64_To_Int8(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed 64-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 8*j + IF k[j] + dst[l+7:l] := Truncate_Int64_To_Int8(a[i+63:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + Store + + + + Convert packed 64-bit integers in "a" to packed 8-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 3 + i := 64*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := Truncate_Int64_To_Int8(a[i+63:i]) + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + Convert packed 64-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 8*j + IF k[j] + dst[l+7:l] := Truncate_Int64_To_Int8(a[i+63:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + Convert packed 64-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 1 + i := 64*j + k := 8*j + dst[k+7:k] := Truncate_Int64_To_Int8(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed 64-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 8*j + IF k[j] + dst[l+7:l] := Truncate_Int64_To_Int8(a[i+63:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + Store + + + + Convert packed 64-bit integers in "a" to packed 8-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 1 + i := 64*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := Truncate_Int64_To_Int8(a[i+63:i]) + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + Convert packed 64-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 8*j + IF k[j] + dst[l+7:l] := Truncate_Int64_To_Int8(a[i+63:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + Convert packed 64-bit integers in "a" to packed 32-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 3 + i := 64*j + k := 32*j + dst[k+31:k] := Truncate_Int64_To_Int32(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed 64-bit integers in "a" to packed 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 32*j + IF k[j] + dst[l+31:l] := Truncate_Int64_To_Int32(a[i+63:i]) + ELSE + dst[l+31:l] := src[l+31:l] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + Store + + + + Convert packed 64-bit integers in "a" to packed 32-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 3 + i := 64*j + l := 32*j + IF k[j] + MEM[base_addr+l+31:base_addr+l] := Truncate_Int64_To_Int32(a[i+63:i]) + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + Convert packed 64-bit integers in "a" to packed 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 32*j + IF k[j] + dst[l+31:l] := Truncate_Int64_To_Int32(a[i+63:i]) + ELSE + dst[l+31:l] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + Convert packed 64-bit integers in "a" to packed 32-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 1 + i := 64*j + k := 32*j + dst[k+31:k] := Truncate_Int64_To_Int32(a[i+63:i]) +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed 64-bit integers in "a" to packed 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 32*j + IF k[j] + dst[l+31:l] := Truncate_Int64_To_Int32(a[i+63:i]) + ELSE + dst[l+31:l] := src[l+31:l] + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + Store + + + + Convert packed 64-bit integers in "a" to packed 32-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 1 + i := 64*j + l := 32*j + IF k[j] + MEM[base_addr+l+31:base_addr+l] := Truncate_Int64_To_Int32(a[i+63:i]) + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + Convert packed 64-bit integers in "a" to packed 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 32*j + IF k[j] + dst[l+31:l] := Truncate_Int64_To_Int32(a[i+63:i]) + ELSE + dst[l+31:l] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + Convert packed 64-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 3 + i := 64*j + k := 16*j + dst[k+15:k] := Truncate_Int64_To_Int16(a[i+63:i]) +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed 64-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 16*j + IF k[j] + dst[l+15:l] := Truncate_Int64_To_Int16(a[i+63:i]) + ELSE + dst[l+15:l] := src[l+15:l] + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + Store + + + + Convert packed 64-bit integers in "a" to packed 16-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 3 + i := 64*j + l := 16*j + IF k[j] + MEM[base_addr+l+15:base_addr+l] := Truncate_Int64_To_Int16(a[i+63:i]) + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + Convert packed 64-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 16*j + IF k[j] + dst[l+15:l] := Truncate_Int64_To_Int16(a[i+63:i]) + ELSE + dst[l+15:l] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + Convert packed 64-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 1 + i := 64*j + k := 16*j + dst[k+15:k] := Truncate_Int64_To_Int16(a[i+63:i]) +ENDFOR +dst[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed 64-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 16*j + IF k[j] + dst[l+15:l] := Truncate_Int64_To_Int16(a[i+63:i]) + ELSE + dst[l+15:l] := src[l+15:l] + FI +ENDFOR +dst[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + Store + + + + Convert packed 64-bit integers in "a" to packed 16-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 1 + i := 64*j + l := 16*j + IF k[j] + MEM[base_addr+l+15:base_addr+l] := Truncate_Int64_To_Int16(a[i+63:i]) + FI +ENDFOR +dst[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + Convert packed 64-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 16*j + IF k[j] + dst[l+15:l] := Truncate_Int64_To_Int16(a[i+63:i]) + ELSE + dst[l+15:l] := 0 + FI +ENDFOR +dst[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + Convert packed 32-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst". + +FOR j := 0 to 7 + i := 32*j + k := 8*j + dst[k+7:k] := Saturate_Int32_To_Int8(a[i+31:i]) +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed 32-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate_Int32_To_Int8(a[i+31:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + Store + + + + Convert packed 32-bit integers in "a" to packed 8-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 7 + i := 32*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := Saturate_Int32_To_Int8(a[i+31:i]) + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + Convert packed 32-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate_Int32_To_Int8(a[i+31:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + Convert packed 32-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst". + +FOR j := 0 to 3 + i := 32*j + k := 8*j + dst[k+7:k] := Saturate_Int32_To_Int8(a[i+31:i]) +ENDFOR +dst[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed 32-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate_Int32_To_Int8(a[i+31:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + Store + + + + Convert packed 32-bit integers in "a" to packed 8-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 3 + i := 32*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := Saturate_Int32_To_Int8(a[i+31:i]) + FI +ENDFOR +dst[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + Convert packed 32-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate_Int32_To_Int8(a[i+31:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + Convert packed 32-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst". + +FOR j := 0 to 7 + i := 32*j + k := 16*j + dst[k+15:k] := Saturate_Int32_To_Int16(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed 32-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 16*j + IF k[j] + dst[l+15:l] := Saturate_Int32_To_Int16(a[i+31:i]) + ELSE + dst[l+15:l] := src[l+15:l] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + Store + + + + Convert packed 32-bit integers in "a" to packed 16-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 7 + i := 32*j + l := 16*j + IF k[j] + MEM[base_addr+l+15:base_addr+l] := Saturate_Int32_To_Int16(a[i+31:i]) + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + Convert packed 32-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 16*j + IF k[j] + dst[l+15:l] := Saturate_Int32_To_Int16(a[i+31:i]) + ELSE + dst[l+15:l] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + Convert packed 32-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst". + +FOR j := 0 to 3 + i := 32*j + k := 16*j + dst[k+15:k] := Saturate_Int32_To_Int16(a[i+31:i]) +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed 32-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 16*j + IF k[j] + dst[l+15:l] := Saturate_Int32_To_Int16(a[i+31:i]) + ELSE + dst[l+15:l] := src[l+15:l] + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + Store + + + + Convert packed 32-bit integers in "a" to packed 16-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 3 + i := 32*j + l := 16*j + IF k[j] + MEM[base_addr+l+15:base_addr+l] := Saturate_Int32_To_Int16(a[i+31:i]) + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + Convert packed 32-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 16*j + IF k[j] + dst[l+15:l] := Saturate_Int32_To_Int16(a[i+31:i]) + ELSE + dst[l+15:l] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + Convert packed 64-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst". + +FOR j := 0 to 3 + i := 64*j + k := 8*j + dst[k+7:k] := Saturate_Int64_To_Int8(a[i+63:i]) +ENDFOR +dst[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed 64-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate_Int64_To_Int8(a[i+63:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + Store + + + + Convert packed 64-bit integers in "a" to packed 8-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 3 + i := 64*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := Saturate_Int64_To_Int8(a[i+63:i]) + FI +ENDFOR +dst[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + Convert packed 64-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate_Int64_To_Int8(a[i+63:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + Convert packed 64-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst". + +FOR j := 0 to 1 + i := 64*j + k := 8*j + dst[k+7:k] := Saturate_Int64_To_Int8(a[i+63:i]) +ENDFOR +dst[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed 64-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate_Int64_To_Int8(a[i+63:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + Store + + + + Convert packed 64-bit integers in "a" to packed 8-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 1 + i := 64*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := Saturate_Int64_To_Int8(a[i+63:i]) + FI +ENDFOR +dst[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + Convert packed 64-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate_Int64_To_Int8(a[i+63:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + Convert packed 64-bit integers in "a" to packed 32-bit integers with signed saturation, and store the results in "dst". + +FOR j := 0 to 3 + i := 64*j + k := 32*j + dst[k+31:k] := Saturate_Int64_To_Int32(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed 64-bit integers in "a" to packed 32-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 32*j + IF k[j] + dst[l+31:l] := Saturate_Int64_To_Int32(a[i+63:i]) + ELSE + dst[l+31:l] := src[l+31:l] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + Store + + + + Convert packed 64-bit integers in "a" to packed 32-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 3 + i := 64*j + l := 32*j + IF k[j] + MEM[base_addr+l+31:base_addr+l] := Saturate_Int64_To_Int32(a[i+63:i]) + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + Convert packed 64-bit integers in "a" to packed 32-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 32*j + IF k[j] + dst[l+31:l] := Saturate_Int64_To_Int32(a[i+63:i]) + ELSE + dst[l+31:l] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + Convert packed 64-bit integers in "a" to packed 32-bit integers with signed saturation, and store the results in "dst". + +FOR j := 0 to 1 + i := 64*j + k := 32*j + dst[k+31:k] := Saturate_Int64_To_Int32(a[i+63:i]) +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed 64-bit integers in "a" to packed 32-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 32*j + IF k[j] + dst[l+31:l] := Saturate_Int64_To_Int32(a[i+63:i]) + ELSE + dst[l+31:l] := src[l+31:l] + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + Store + + + + Convert packed 64-bit integers in "a" to packed 32-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 1 + i := 64*j + l := 32*j + IF k[j] + MEM[base_addr+l+31:base_addr+l] := Saturate_Int64_To_Int32(a[i+63:i]) + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + Convert packed 64-bit integers in "a" to packed 32-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 32*j + IF k[j] + dst[l+31:l] := Saturate_Int64_To_Int32(a[i+63:i]) + ELSE + dst[l+31:l] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + Convert packed 64-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst". + +FOR j := 0 to 3 + i := 64*j + k := 16*j + dst[k+15:k] := Saturate_Int64_To_Int16(a[i+63:i]) +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed 64-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 16*j + IF k[j] + dst[l+15:l] := Saturate_Int64_To_Int16(a[i+63:i]) + ELSE + dst[l+15:l] := src[l+15:l] + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + Store + + + + Convert packed 64-bit integers in "a" to packed 16-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 3 + i := 64*j + l := 16*j + IF k[j] + MEM[base_addr+l+15:base_addr+l] := Saturate_Int64_To_Int16(a[i+63:i]) + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + Convert packed 64-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 16*j + IF k[j] + dst[l+15:l] := Saturate_Int64_To_Int16(a[i+63:i]) + ELSE + dst[l+15:l] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + Convert packed 64-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst". + +FOR j := 0 to 1 + i := 64*j + k := 16*j + dst[k+15:k] := Saturate_Int64_To_Int16(a[i+63:i]) +ENDFOR +dst[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed 64-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 16*j + IF k[j] + dst[l+15:l] := Saturate_Int64_To_Int16(a[i+63:i]) + ELSE + dst[l+15:l] := src[l+15:l] + FI +ENDFOR +dst[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + Store + + + + Convert packed 64-bit integers in "a" to packed 16-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 1 + i := 64*j + l := 16*j + IF k[j] + MEM[base_addr+l+15:base_addr+l] := Saturate_Int64_To_Int16(a[i+63:i]) + FI +ENDFOR +dst[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + Convert packed 64-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 16*j + IF k[j] + dst[l+15:l] := Saturate_Int64_To_Int16(a[i+63:i]) + ELSE + dst[l+15:l] := 0 + FI +ENDFOR +dst[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Convert + + Convert packed 16-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst". + +FOR j := 0 to 15 + i := 16*j + l := 8*j + dst[l+7:l] := Saturate_Int16_To_Int8(a[i+15:i]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Convert + + + + Convert packed 16-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 16*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate_Int16_To_Int8(a[i+15:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Convert + Store + + + + Convert packed 16-bit integers in "a" to packed 8-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 15 + i := 16*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := Saturate_Int16_To_Int8(a[i+15:i]) + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Convert + + + Convert packed 16-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 16*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate_Int16_To_Int8(a[i+15:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Convert + + Convert packed 16-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst". + +FOR j := 0 to 31 + i := 16*j + l := 8*j + dst[l+7:l] := Saturate_Int16_To_Int8(a[i+15:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Convert + + + + Convert packed 16-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := 16*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate_Int16_To_Int8(a[i+15:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Convert + Store + + + + Convert packed 16-bit integers in "a" to packed 8-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 31 + i := 16*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := Saturate_Int16_To_Int8(a[i+15:i]) + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Convert + + + Convert packed 16-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := 16*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate_Int16_To_Int8(a[i+15:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Convert + + Convert packed 16-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst". + +FOR j := 0 to 7 + i := 16*j + l := 8*j + dst[l+7:l] := Saturate_Int16_To_Int8(a[i+15:i]) +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Convert + + + + Convert packed 16-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 16*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate_Int16_To_Int8(a[i+15:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Convert + Store + + + + Convert packed 16-bit integers in "a" to packed 8-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 7 + i := 16*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := Saturate_Int16_To_Int8(a[i+15:i]) + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Convert + + + Convert packed 16-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 16*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate_Int16_To_Int8(a[i+15:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + + Sign extend packed 8-bit integers in the low 8 bytes of "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 8*j + IF k[j] + dst[i+31:i] := SignExtend(a[l+7:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + Sign extend packed 8-bit integers in the low 8 bytes of "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 8*j + IF k[j] + dst[i+31:i] := SignExtend(a[l+7:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + + Sign extend packed 8-bit integers in the low 4 bytes of "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 8*j + IF k[j] + dst[i+31:i] := SignExtend(a[l+7:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + Sign extend packed 8-bit integers in the low 4 bytes of "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 8*j + IF k[j] + dst[i+31:i] := SignExtend(a[l+7:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + + Sign extend packed 8-bit integers in the low 4 bytes of "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 8*j + IF k[j] + dst[i+63:i] := SignExtend(a[l+7:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + Sign extend packed 8-bit integers in the low 4 bytes of "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 8*j + IF k[j] + dst[i+63:i] := SignExtend(a[l+7:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + + Sign extend packed 8-bit integers in the low 2 bytes of "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 8*j + IF k[j] + dst[i+63:i] := SignExtend(a[l+7:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + Sign extend packed 8-bit integers in the low 2 bytes of "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 8*j + IF k[j] + dst[i+63:i] := SignExtend(a[l+7:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Convert + + + + Sign extend packed 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + l := j*16 + IF k[j] + dst[l+15:l] := SignExtend(a[i+7:i]) + ELSE + dst[l+15:l] := src[l+15:l] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Convert + + + Sign extend packed 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 15 + i := j*8 + l := j*16 + IF k[j] + dst[l+15:l] := SignExtend(a[i+7:i]) + ELSE + dst[l+15:l] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Convert + + Sign extend packed 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst". + +FOR j := 0 to 31 + i := j*8 + l := j*16 + dst[l+15:l] := SignExtend(a[i+7:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Convert + + + + Sign extend packed 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + l := j*16 + IF k[j] + dst[l+15:l] := SignExtend(a[i+7:i]) + ELSE + dst[l+15:l] := src[l+15:l] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Convert + + + Sign extend packed 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 31 + i := j*8 + l := j*16 + IF k[j] + dst[l+15:l] := SignExtend(a[i+7:i]) + ELSE + dst[l+15:l] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Convert + + + + Sign extend packed 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*8 + l := j*16 + IF k[j] + dst[l+15:l] := SignExtend(a[i+7:i]) + ELSE + dst[l+15:l] := src[l+15:l] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Convert + + + Sign extend packed 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 7 + i := j*8 + l := j*16 + IF k[j] + dst[l+15:l] := SignExtend(a[i+7:i]) + ELSE + dst[l+15:l] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + + Sign extend packed 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 32*j + IF k[j] + dst[i+63:i] := SignExtend(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + Sign extend packed 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 32*j + IF k[j] + dst[i+63:i] := SignExtend(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + + Sign extend packed 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 32*j + IF k[j] + dst[i+63:i] := SignExtend(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + Sign extend packed 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 32*j + IF k[j] + dst[i+63:i] := SignExtend(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + + Sign extend packed 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + l := j*16 + IF k[j] + dst[i+31:i] := SignExtend(a[l+15:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + Sign extend packed 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 16*j + IF k[j] + dst[i+31:i] := SignExtend(a[l+15:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + + Sign extend packed 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + l := j*16 + IF k[j] + dst[i+31:i] := SignExtend(a[l+15:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + Sign extend packed 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 16*j + IF k[j] + dst[i+31:i] := SignExtend(a[l+15:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + + Sign extend packed 16-bit integers in the low 8 bytes of "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 16*j + IF k[j] + dst[i+63:i] := SignExtend(a[l+15:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + Sign extend packed 16-bit integers in the low 8 bytes of "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 16*j + IF k[j] + dst[i+63:i] := SignExtend(a[l+15:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + + Sign extend packed 16-bit integers in the low 4 bytes of "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 16*j + IF k[j] + dst[i+63:i] := SignExtend(a[l+15:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + Sign extend packed 16-bit integers in the low 4 bytes of "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 16*j + IF k[j] + dst[i+63:i] := SignExtend(a[l+15:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + Convert packed unsigned 32-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst". + +FOR j := 0 to 7 + i := 32*j + k := 8*j + dst[k+7:k] := Saturate_UnsignedInt32_To_Int8(a[i+31:i]) +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed unsigned 32-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate_UnsignedInt32_To_Int8(a[i+31:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + Store + + + + Convert packed unsigned 32-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 7 + i := 32*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := Saturate_UnsignedInt32_To_Int8(a[i+31:i]) + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + Convert packed unsigned 32-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate_UnsignedInt32_To_Int8(a[i+31:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + Convert packed unsigned 32-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst". + +FOR j := 0 to 3 + i := 32*j + k := 8*j + dst[k+7:k] := Saturate_UnsignedInt32_To_Int8(a[i+31:i]) +ENDFOR +dst[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed unsigned 32-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate_UnsignedInt32_To_Int8(a[i+31:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + Store + + + + Convert packed unsigned 32-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 3 + i := 32*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := Saturate_UnsignedInt32_To_Int8(a[i+31:i]) + FI +ENDFOR +dst[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + Convert packed unsigned 32-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate_UnsignedInt32_To_Int8(a[i+31:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + Convert packed unsigned 32-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst". + +FOR j := 0 to 7 + i := 32*j + k := 16*j + dst[k+15:k] := Saturate_UnsignedInt32_To_Int16(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed unsigned 32-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 16*j + IF k[j] + dst[l+15:l] := Saturate_UnsignedInt32_To_Int16(a[i+31:i]) + ELSE + dst[l+15:l] := src[l+15:l] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + Store + + + + Convert packed unsigned 32-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 7 + i := 32*j + l := 16*j + IF k[j] + MEM[base_addr+l+15:base_addr+l] := Saturate_UnsignedInt32_To_Int16(a[i+31:i]) + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + Convert packed unsigned 32-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 16*j + IF k[j] + dst[l+15:l] := Saturate_UnsignedInt32_To_Int16(a[i+31:i]) + ELSE + dst[l+15:l] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + Convert packed unsigned 32-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst". + +FOR j := 0 to 3 + i := 32*j + k := 16*j + dst[k+15:k] := Saturate_UnsignedInt32_To_Int16(a[i+31:i]) +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed unsigned 32-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 16*j + IF k[j] + dst[l+15:l] := Saturate_UnsignedInt32_To_Int16(a[i+31:i]) + ELSE + dst[l+15:l] := src[l+15:l] + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + Store + + + + Convert packed unsigned 32-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 3 + i := 32*j + l := 16*j + IF k[j] + MEM[base_addr+l+15:base_addr+l] := Saturate_UnsignedInt32_To_Int16(a[i+31:i]) + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + Convert packed unsigned 32-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 16*j + IF k[j] + dst[l+15:l] := Saturate_UnsignedInt32_To_Int16(a[i+31:i]) + ELSE + dst[l+15:l] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst". + +FOR j := 0 to 3 + i := 64*j + k := 8*j + dst[k+7:k] := Saturate_UnsignedInt64_To_Int8(a[i+63:i]) +ENDFOR +dst[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate_UnsignedInt64_To_Int8(a[i+63:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + Store + + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 3 + i := 64*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := Saturate_UnsignedInt64_To_Int8(a[i+63:i]) + FI +ENDFOR +dst[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate_UnsignedInt64_To_Int8(a[i+63:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst". + +FOR j := 0 to 1 + i := 64*j + k := 8*j + dst[k+7:k] := Saturate_UnsignedInt64_To_Int8(a[i+63:i]) +ENDFOR +dst[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate_UnsignedInt64_To_Int8(a[i+63:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + Store + + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 1 + i := 64*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := Saturate_UnsignedInt64_To_Int8(a[i+63:i]) + FI +ENDFOR +dst[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate_UnsignedInt64_To_Int8(a[i+63:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 32-bit integers with unsigned saturation, and store the results in "dst". + +FOR j := 0 to 3 + i := 64*j + k := 32*j + dst[k+31:k] := Saturate_UnsignedInt64_To_Int32(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 32-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 32*j + IF k[j] + dst[l+31:l] := Saturate_UnsignedInt64_To_Int32(a[i+63:i]) + ELSE + dst[l+31:l] := src[l+31:l] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + Store + + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 32-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 3 + i := 64*j + l := 32*j + IF k[j] + MEM[base_addr+l+31:base_addr+l] := Saturate_UnsignedInt64_To_Int32(a[i+63:i]) + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 32-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 32*j + IF k[j] + dst[l+31:l] := Saturate_UnsignedInt64_To_Int32(a[i+63:i]) + ELSE + dst[l+31:l] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 32-bit integers with unsigned saturation, and store the results in "dst". + +FOR j := 0 to 1 + i := 64*j + k := 32*j + dst[k+31:k] := Saturate_UnsignedInt64_To_Int32(a[i+63:i]) +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 32-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 32*j + IF k[j] + dst[l+31:l] := Saturate_UnsignedInt64_To_Int32(a[i+63:i]) + ELSE + dst[l+31:l] := src[l+31:l] + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + Store + + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 32-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 1 + i := 64*j + l := 32*j + IF k[j] + MEM[base_addr+l+31:base_addr+l] := Saturate_UnsignedInt64_To_Int32(a[i+63:i]) + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 32-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 32*j + IF k[j] + dst[l+31:l] := Saturate_UnsignedInt64_To_Int32(a[i+63:i]) + ELSE + dst[l+31:l] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst". + +FOR j := 0 to 3 + i := 64*j + k := 16*j + dst[k+15:k] := Saturate_UnsignedInt64_To_Int16(a[i+63:i]) +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 16*j + IF k[j] + dst[l+15:l] := Saturate_UnsignedInt64_To_Int16(a[i+63:i]) + ELSE + dst[l+15:l] := src[l+15:l] + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + Store + + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 3 + i := 64*j + l := 16*j + IF k[j] + MEM[base_addr+l+15:base_addr+l] := Saturate_UnsignedInt64_To_Int16(a[i+63:i]) + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 16*j + IF k[j] + dst[l+15:l] := Saturate_UnsignedInt64_To_Int16(a[i+63:i]) + ELSE + dst[l+15:l] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst". + +FOR j := 0 to 1 + i := 64*j + k := 16*j + dst[k+15:k] := Saturate_UnsignedInt64_To_Int16(a[i+63:i]) +ENDFOR +dst[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 16*j + IF k[j] + dst[l+15:l] := Saturate_UnsignedInt64_To_Int16(a[i+63:i]) + ELSE + dst[l+15:l] := src[l+15:l] + FI +ENDFOR +dst[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + Store + + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 1 + i := 64*j + l := 16*j + IF k[j] + MEM[base_addr+l+15:base_addr+l] := Saturate_UnsignedInt64_To_Int16(a[i+63:i]) + FI +ENDFOR +dst[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 16*j + IF k[j] + dst[l+15:l] := Saturate_UnsignedInt64_To_Int16(a[i+63:i]) + ELSE + dst[l+15:l] := 0 + FI +ENDFOR +dst[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Convert + + Convert packed unsigned 16-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst". + +FOR j := 0 to 15 + i := 16*j + l := 8*j + dst[l+7:l] := Saturate_UnsignedInt16_To_Int8(a[i+15:i]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Convert + + + + Convert packed unsigned 16-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 16*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate_UnsignedInt16_To_Int8(a[i+15:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Convert + Store + + + + Convert packed unsigned 16-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 15 + i := 16*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := Saturate_UnsignedInt16_To_Int8(a[i+15:i]) + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Convert + + + Convert packed unsigned 16-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 16*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate_UnsignedInt16_To_Int8(a[i+15:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Convert + + Convert packed unsigned 16-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst". + +FOR j := 0 to 31 + i := 16*j + l := 8*j + dst[l+7:l] := Saturate_UnsignedInt16_To_Int8(a[i+15:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Convert + + + + Convert packed unsigned 16-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := 16*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate_UnsignedInt16_To_Int8(a[i+15:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Convert + Store + + + + Convert packed unsigned 16-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 31 + i := 16*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := Saturate_UnsignedInt16_To_Int8(a[i+15:i]) + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Convert + + + Convert packed unsigned 16-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := 16*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate_UnsignedInt16_To_Int8(a[i+15:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Convert + + Convert packed unsigned 16-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst". + +FOR j := 0 to 7 + i := 16*j + l := 8*j + dst[l+7:l] := Saturate_UnsignedInt16_To_Int8(a[i+15:i]) +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Convert + + + + Convert packed unsigned 16-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 16*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate_UnsignedInt16_To_Int8(a[i+15:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Convert + Store + + + + Convert packed unsigned 16-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 7 + i := 16*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := Saturate_UnsignedInt16_To_Int8(a[i+15:i]) + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Convert + + + Convert packed unsigned 16-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 16*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate_UnsignedInt16_To_Int8(a[i+15:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Miscellaneous + + Set each bit of mask register "k" based on the most significant bit of the corresponding packed 16-bit integer in "a". + +FOR j := 0 to 15 + i := j*16 + IF a[i+15] + k[j] := 1 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Miscellaneous + + Set each bit of mask register "k" based on the most significant bit of the corresponding packed 16-bit integer in "a". + +FOR j := 0 to 31 + i := j*16 + IF a[i+15] + k[j] := 1 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Miscellaneous + + Set each bit of mask register "k" based on the most significant bit of the corresponding packed 16-bit integer in "a". + +FOR j := 0 to 7 + i := j*16 + IF a[i+15] + k[j] := 1 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Convert + + Convert packed 16-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 15 + i := 16*j + l := 8*j + dst[l+7:l] := Truncate_Int16_To_Int8(a[i+15:i]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Convert + + + + Convert packed 16-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 16*j + l := 8*j + IF k[j] + dst[l+7:l] := Truncate_Int16_To_Int8(a[i+15:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Convert + Store + + + + Convert packed 16-bit integers in "a" to packed 8-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 15 + i := 16*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := Truncate_Int16_To_Int8(a[i+15:i]) + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Convert + + + Convert packed 16-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 16*j + l := 8*j + IF k[j] + dst[l+7:l] := Truncate_Int16_To_Int8(a[i+15:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Convert + + Convert packed 16-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 31 + i := 16*j + l := 8*j + dst[l+7:l] := Truncate_Int16_To_Int8(a[i+15:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Convert + + + + Convert packed 16-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := 16*j + l := 8*j + IF k[j] + dst[l+7:l] := Truncate_Int16_To_Int8(a[i+15:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Convert + Store + + + + Convert packed 16-bit integers in "a" to packed 8-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 31 + i := 16*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := Truncate_Int16_To_Int8(a[i+15:i]) + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Convert + + + Convert packed 16-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := 16*j + l := 8*j + IF k[j] + dst[l+7:l] := Truncate_Int16_To_Int8(a[i+15:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Convert + + Convert packed 16-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 7 + i := 16*j + l := 8*j + dst[l+7:l] := Truncate_Int16_To_Int8(a[i+15:i]) +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Convert + + + + Convert packed 16-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 16*j + l := 8*j + IF k[j] + dst[l+7:l] := Truncate_Int16_To_Int8(a[i+15:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Convert + Store + + + + Convert packed 16-bit integers in "a" to packed 8-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 7 + i := 16*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := Truncate_Int16_To_Int8(a[i+15:i]) + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Convert + + + Convert packed 16-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 16*j + l := 8*j + IF k[j] + dst[l+7:l] := Truncate_Int16_To_Int8(a[i+15:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + + Zero extend packed unsigned 8-bit integers in the low 8 bytes of "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 8*j + IF k[j] + dst[i+31:i] := ZeroExtend(a[l+7:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + Zero extend packed unsigned 8-bit integers in the low 8 bytes of "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 8*j + IF k[j] + dst[i+31:i] := ZeroExtend(a[l+7:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + + Zero extend packed unsigned 8-bit integers in the low 4 bytes of "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 8*j + IF k[j] + dst[i+31:i] := ZeroExtend(a[l+7:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + Zero extend packed unsigned 8-bit integers in th elow 4 bytes of "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 8*j + IF k[j] + dst[i+31:i] := ZeroExtend(a[l+7:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + + Zero extend packed unsigned 8-bit integers in the low 4 bytes of "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 8*j + IF k[j] + dst[i+63:i] := ZeroExtend(a[l+7:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + Zero extend packed unsigned 8-bit integers in the low 4 bytes of "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 8*j + IF k[j] + dst[i+63:i] := ZeroExtend(a[l+7:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + + Zero extend packed unsigned 8-bit integers in the low 2 bytes of "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 8*j + IF k[j] + dst[i+63:i] := ZeroExtend(a[l+7:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + Zero extend packed unsigned 8-bit integers in the low 2 bytes of "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 8*j + IF k[j] + dst[i+63:i] := ZeroExtend(a[l+7:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Convert + + + + Zero extend packed unsigned 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + l := j*16 + IF k[j] + dst[l+15:l] := ZeroExtend(a[i+7:i]) + ELSE + dst[l+15:l] := src[l+15:l] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Convert + + + Zero extend packed unsigned 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 15 + i := j*8 + l := j*16 + IF k[j] + dst[l+15:l] := ZeroExtend(a[i+7:i]) + ELSE + dst[l+15:l] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Convert + + Zero extend packed unsigned 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst". + +FOR j := 0 to 31 + i := j*8 + l := j*16 + dst[l+15:l] := ZeroExtend(a[i+7:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Convert + + + + Zero extend packed unsigned 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + l := j*16 + IF k[j] + dst[l+15:l] := ZeroExtend(a[i+7:i]) + ELSE + dst[l+15:l] := src[l+15:l] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Convert + + + Zero extend packed unsigned 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 31 + i := j*8 + l := j*16 + IF k[j] + dst[l+15:l] := ZeroExtend(a[i+7:i]) + ELSE + dst[l+15:l] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Convert + + + + Zero extend packed unsigned 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*8 + l := j*16 + IF k[j] + dst[l+15:l] := ZeroExtend(a[i+7:i]) + ELSE + dst[l+15:l] := src[l+15:l] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Convert + + + Zero extend packed unsigned 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 7 + i := j*8 + l := j*16 + IF k[j] + dst[l+15:l] := ZeroExtend(a[i+7:i]) + ELSE + dst[l+15:l] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + + Zero extend packed unsigned 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 32*j + IF k[j] + dst[i+63:i] := ZeroExtend(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + Zero extend packed unsigned 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 32*j + IF k[j] + dst[i+63:i] := ZeroExtend(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + + Zero extend packed unsigned 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 32*j + IF k[j] + dst[i+63:i] := ZeroExtend(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + Zero extend packed unsigned 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 32*j + IF k[j] + dst[i+63:i] := ZeroExtend(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + + Zero extend packed unsigned 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 16*j + IF k[j] + dst[i+31:i] := ZeroExtend(a[l+15:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + Zero extend packed unsigned 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 16*j + IF k[j] + dst[i+31:i] := ZeroExtend(a[l+15:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + + Zero extend packed unsigned 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 16*j + IF k[j] + dst[i+31:i] := ZeroExtend(a[l+15:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + Zero extend packed unsigned 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 16*j + IF k[j] + dst[i+31:i] := ZeroExtend(a[l+15:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + + Zero extend packed unsigned 16-bit integers in the low 8 bytes of "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 16*j + IF k[j] + dst[i+63:i] := ZeroExtend(a[l+15:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + Zero extend packed unsigned 16-bit integers in the low 8 bytes of "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 16*j + IF k[j] + dst[i+63:i] := ZeroExtend(a[l+15:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + + Zero extend packed unsigned 16-bit integers in the low 4 bytes of "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 16*j + IF k[j] + dst[i+63:i] := ZeroExtend(a[l+15:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Convert + + + Zero extend packed unsigned 16-bit integers in the low 4 bytes of "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 16*j + IF k[j] + dst[i+63:i] := ZeroExtend(a[l+15:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + + Multiply the low 32-bit integers from each packed 64-bit element in "a" and "b", and store the signed 64-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+31:i] * b[i+31:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + Multiply the low 32-bit integers from each packed 64-bit element in "a" and "b", and store the signed 64-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+31:i] * b[i+31:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + + Multiply the low 32-bit integers from each packed 64-bit element in "a" and "b", and store the signed 64-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+31:i] * b[i+31:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + Multiply the low 32-bit integers from each packed 64-bit element in "a" and "b", and store the signed 64-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+31:i] * b[i+31:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Multiply packed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Truncate each intermediate integer to the 18 most significant bits, round by adding 1, and store bits [16:1] to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + tmp[31:0] := ((a[i+15:i] * b[i+15:i]) >> 14) + 1 + dst[i+15:i] := tmp[16:1] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Multiply packed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Truncate each intermediate integer to the 18 most significant bits, round by adding 1, and store bits [16:1] to "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + tmp[31:0] := ((a[i+15:i] * b[i+15:i]) >> 14) + 1 + dst[i+15:i] := tmp[16:1] + ELSE + dst[i+15:i] := 9 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + + Multiply packed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Truncate each intermediate integer to the 18 most significant bits, round by adding 1, and store bits [16:1] to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + tmp[31:0] := ((a[i+15:i] * b[i+15:i]) >> 14) + 1 + dst[i+15:i] := tmp[16:1] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + Multiply packed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Truncate each intermediate integer to the 18 most significant bits, round by adding 1, and store bits [16:1] to "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + tmp[31:0] := ((a[i+15:i] * b[i+15:i]) >> 14) + 1 + dst[i+15:i] := tmp[16:1] + ELSE + dst[i+15:i] := 9 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + Multiply packed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Truncate each intermediate integer to the 18 most significant bits, round by adding 1, and store bits [16:1] to "dst". + +FOR j := 0 to 31 + i := j*16 + tmp[31:0] := ((a[i+15:i] * b[i+15:i]) >> 14) + 1 + dst[i+15:i] := tmp[16:1] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Multiply packed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Truncate each intermediate integer to the 18 most significant bits, round by adding 1, and store bits [16:1] to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + tmp[31:0] := ((a[i+15:i] * b[i+15:i]) >> 14) + 1 + dst[i+15:i] := tmp[16:1] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Multiply packed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Truncate each intermediate integer to the 18 most significant bits, round by adding 1, and store bits [16:1] to "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + tmp[31:0] := ((a[i+15:i] * b[i+15:i]) >> 14) + 1 + dst[i+15:i] := tmp[16:1] + ELSE + dst[i+15:i] := 9 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Multiply the packed unsigned 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + tmp[31:0] := a[i+15:i] * b[i+15:i] + dst[i+15:i] := tmp[31:16] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Multiply the packed unsigned 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + tmp[31:0] := a[i+15:i] * b[i+15:i] + dst[i+15:i] := tmp[31:16] + ELSE + dst[i+15:i] := o + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + + Multiply the packed unsigned 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + tmp[31:0] := a[i+15:i] * b[i+15:i] + dst[i+15:i] := tmp[31:16] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + Multiply the packed unsigned 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + tmp[31:0] := a[i+15:i] * b[i+15:i] + dst[i+15:i] := tmp[31:16] + ELSE + dst[i+15:i] := o + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + Multiply the packed unsigned 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst". + +FOR j := 0 to 31 + i := j*16 + tmp[31:0] := a[i+15:i] * b[i+15:i] + dst[i+15:i] := tmp[31:16] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Multiply the packed unsigned 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + tmp[31:0] := a[i+15:i] * b[i+15:i] + dst[i+15:i] := tmp[31:16] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Multiply the packed unsigned 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + tmp[31:0] := a[i+15:i] * b[i+15:i] + dst[i+15:i] := tmp[31:16] + ELSE + dst[i+15:i] := o + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Multiply the packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + tmp[31:0] := a[i+15:i] * b[i+15:i] + dst[i+15:i] := tmp[31:16] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Multiply the packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + tmp[31:0] := a[i+15:i] * b[i+15:i] + dst[i+15:i] := tmp[31:16] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + + Multiply the packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + tmp[31:0] := a[i+15:i] * b[i+15:i] + dst[i+15:i] := tmp[31:16] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + Multiply the packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + tmp[31:0] := a[i+15:i] * b[i+15:i] + dst[i+15:i] := tmp[31:16] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + Multiply the packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst". + +FOR j := 0 to 31 + i := j*16 + tmp[31:0] := a[i+15:i] * b[i+15:i] + dst[i+15:i] := tmp[31:16] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Multiply the packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + tmp[31:0] := a[i+15:i] * b[i+15:i] + dst[i+15:i] := tmp[31:16] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Multiply the packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + tmp[31:0] := a[i+15:i] * b[i+15:i] + dst[i+15:i] := tmp[31:16] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + + Multiply the packed 32-bit integers in "a" and "b", producing intermediate 64-bit integers, and store the low 32 bits of the intermediate integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + tmp[63:0] := a[i+31:i] * b[i+31:i] + dst[i+31:i] := tmp[31:0] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + Multiply the packed 32-bit integers in "a" and "b", producing intermediate 64-bit integers, and store the low 32 bits of the intermediate integers in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + tmp[63:0] := a[i+31:i] * b[i+31:i] + dst[i+31:i] := tmp[31:0] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512F + Arithmetic + + + + Multiply the packed 32-bit integers in "a" and "b", producing intermediate 64-bit integers, and store the low 32 bits of the intermediate integers in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + tmp[63:0] := a[i+31:i] * b[i+31:i] + dst[i+31:i] := tmp[31:0] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + + Multiply the packed 32-bit integers in "a" and "b", producing intermediate 64-bit integers, and store the low 32 bits of the intermediate integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + tmp[63:0] := a[i+31:i] * b[i+31:i] + dst[i+31:i] := tmp[31:0] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + Multiply the packed 32-bit integers in "a" and "b", producing intermediate 64-bit integers, and store the low 32 bits of the intermediate integers in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + tmp[63:0] := a[i+31:i] * b[i+31:i] + dst[i+31:i] := tmp[31:0] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512DQ + Arithmetic + + + + + Multiply the packed 64-bit integers in "a" and "b", producing intermediate 128-bit integers, and store the low 64 bits of the intermediate integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + tmp[127:0] := a[i+63:i] * b[i+63:i] + dst[i+63:i] := tmp[63:0] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512DQ + Arithmetic + + + + Multiply the packed 64-bit integers in "a" and "b", producing intermediate 128-bit integers, and store the low 64 bits of the intermediate integers in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + tmp[127:0] := a[i+63:i] * b[i+63:i] + dst[i+63:i] := tmp[63:0] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512DQ + Arithmetic + + + Multiply the packed 64-bit integers in "a" and "b", producing intermediate 128-bit integers, and store the low 64 bits of the intermediate integers in "dst". + +FOR j := 0 to 3 + i := j*64 + tmp[127:0] := a[i+63:i] * b[i+63:i] + dst[i+63:i] := tmp[63:0] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512DQ + Arithmetic + + + + + Multiply the packed 64-bit integers in "a" and "b", producing intermediate 128-bit integers, and store the low 64 bits of the intermediate integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + tmp[127:0] := a[i+63:i] * b[i+63:i] + dst[i+63:i] := tmp[63:0] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512DQ + Arithmetic + + + + Multiply the packed 64-bit integers in "a" and "b", producing intermediate 128-bit integers, and store the low 64 bits of the intermediate integers in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + tmp[127:0] := a[i+63:i] * b[i+63:i] + dst[i+63:i] := tmp[63:0] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512DQ + Arithmetic + + + Multiply the packed 64-bit integers in "a" and "b", producing intermediate 128-bit integers, and store the low 64 bits of the intermediate integers in "dst". + +FOR j := 0 to 7 + i := j*64 + tmp[127:0] := a[i+63:i] * b[i+63:i] + dst[i+63:i] := tmp[63:0] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512DQ + Arithmetic + + + + + Multiply the packed 64-bit integers in "a" and "b", producing intermediate 128-bit integers, and store the low 64 bits of the intermediate integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + tmp[127:0] := a[i+63:i] * b[i+63:i] + dst[i+63:i] := tmp[63:0] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512DQ + Arithmetic + + + + Multiply the packed 64-bit integers in "a" and "b", producing intermediate 128-bit integers, and store the low 64 bits of the intermediate integers in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + tmp[127:0] := a[i+63:i] * b[i+63:i] + dst[i+63:i] := tmp[63:0] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512DQ + Arithmetic + + + Multiply the packed 64-bit integers in "a" and "b", producing intermediate 128-bit integers, and store the low 64 bits of the intermediate integers in "dst". + +FOR j := 0 to 1 + i := j*64 + tmp[127:0] := a[i+63:i] * b[i+63:i] + dst[i+63:i] := tmp[63:0] +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Multiply the packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the low 16 bits of the intermediate integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + tmp[31:0] := a[i+15:i] * b[i+15:i] + dst[i+15:i] := tmp[15:0] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Multiply the packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the low 16 bits of the intermediate integers in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + tmp[31:0] := a[i+15:i] * b[i+15:i] + dst[i+15:i] := tmp[15:0] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + + Multiply the packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the low 16 bits of the intermediate integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + tmp[31:0] := a[i+15:i] * b[i+15:i] + dst[i+15:i] := tmp[15:0] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + Multiply the packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the low 16 bits of the intermediate integers in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + tmp[31:0] := a[i+15:i] * b[i+15:i] + dst[i+15:i] := tmp[15:0] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + Multiply the packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the low 16 bits of the intermediate integers in "dst". + +FOR j := 0 to 31 + i := j*16 + tmp[31:0] := a[i+15:i] * b[i+15:i] + dst[i+15:i] := tmp[15:0] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Multiply the packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the low 16 bits of the intermediate integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + tmp[31:0] := a[i+15:i] * b[i+15:i] + dst[i+15:i] := tmp[15:0] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Multiply the packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the low 16 bits of the intermediate integers in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + tmp[31:0] := a[i+15:i] * b[i+15:i] + dst[i+15:i] := tmp[15:0] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + + Multiply the low unsigned 32-bit integers from each packed 64-bit element in "a" and "b", and store the unsigned 64-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+31:i] * b[i+31:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + Multiply the low unsigned 32-bit integers from each packed 64-bit element in "a" and "b", and store the unsigned 64-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+31:i] * b[i+31:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + + Multiply the low unsigned 32-bit integers from each packed 64-bit element in "a" and "b", and store the unsigned 64-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+31:i] * b[i+31:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + Multiply the low unsigned 32-bit integers from each packed 64-bit element in "a" and "b", and store the unsigned 64-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+31:i] * b[i+31:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Logical + + + + + Compute the bitwise OR of packed 32-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] OR b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Logical + + + + Compute the bitwise OR of packed 32-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] OR b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Logical + + + + + Compute the bitwise OR of packed 32-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] OR b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Logical + + + + Compute the bitwise OR of packed 32-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] OR b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Logical + + + + + Compute the bitwise OR of packed 64-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] OR b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Logical + + + + Compute the bitwise OR of packed 64-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] OR b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Logical + + + + + Compute the bitwise OR of packed 64-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] OR b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Logical + + + + Compute the bitwise OR of packed 64-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] OR b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +LEFT_ROTATE_DWORDS(src, count_src){ + count := count_src modulo 32 + RETURN (src << count) OR (src >> (32 - count)) +} +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +LEFT_ROTATE_DWORDS(src, count_src){ + count := count_src modulo 32 + RETURN (src << count) OR (src >> (32 - count)) +} +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst". + +LEFT_ROTATE_DWORDS(src, count_src){ + count := count_src modulo 32 + RETURN (src << count) OR (src >> (32 - count)) +} +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +LEFT_ROTATE_DWORDS(src, count_src){ + count := count_src modulo 32 + RETURN (src << count) OR (src >> (32 - count)) +} +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +LEFT_ROTATE_DWORDS(src, count_src){ + count := count_src modulo 32 + RETURN (src << count) OR (src >> (32 - count)) +} +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst". + +LEFT_ROTATE_DWORDS(src, count_src){ + count := count_src modulo 32 + RETURN (src << count) OR (src >> (32 - count)) +} +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +LEFT_ROTATE_QWORDS(src, count_src){ + count := count_src modulo 64 + RETURN (src << count) OR (src >> (64 - count)) +} +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +LEFT_ROTATE_QWORDS(src, count_src){ + count := count_src modulo 64 + RETURN (src << count) OR (src >> (64 - count)) +} +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst". + +LEFT_ROTATE_QWORDS(src, count_src){ + count := count_src modulo 64 + RETURN (src << count) OR (src >> (64 - count)) +} +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +LEFT_ROTATE_QWORDS(src, count_src){ + count := count_src modulo 64 + RETURN (src << count) OR (src >> (64 - count)) +} +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +LEFT_ROTATE_QWORDS(src, count_src){ + count := count_src modulo 64 + RETURN (src << count) OR (src >> (64 - count)) +} +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst". + +LEFT_ROTATE_QWORDS(src, count_src){ + count := count_src modulo 64 + RETURN (src << count) OR (src >> (64 - count)) +} +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +LEFT_ROTATE_DWORDS(src, count_src){ + count := count_src modulo 32 + RETURN (src << count) OR (src >> (32 - count)) +} +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +LEFT_ROTATE_DWORDS(src, count_src){ + count := count_src modulo 32 + RETURN (src << count) OR (src >> (32 - count)) +} +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst". + +LEFT_ROTATE_DWORDS(src, count_src){ + count := count_src modulo 32 + RETURN (src << count) OR (src >> (32 - count)) +} +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +LEFT_ROTATE_DWORDS(src, count_src){ + count := count_src modulo 32 + RETURN (src << count) OR (src >> (32 - count)) +} +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +LEFT_ROTATE_DWORDS(src, count_src){ + count := count_src modulo 32 + RETURN (src << count) OR (src >> (32 - count)) +} +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst". + +LEFT_ROTATE_DWORDS(src, count_src){ + count := count_src modulo 32 + RETURN (src << count) OR (src >> (32 - count)) +} +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +LEFT_ROTATE_QWORDS(src, count_src){ + count := count_src modulo 64 + RETURN (src << count) OR (src >> (64 - count)) +} +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +LEFT_ROTATE_QWORDS(src, count_src){ + count := count_src modulo 64 + RETURN (src << count) OR (src >> (64 - count)) +} +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst". + +LEFT_ROTATE_QWORDS(src, count_src){ + count := count_src modulo 64 + RETURN (src << count) OR (src >> (64 - count)) +} +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +LEFT_ROTATE_QWORDS(src, count_src){ + count := count_src modulo 64 + RETURN (src << count) OR (src >> (64 - count)) +} +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +LEFT_ROTATE_QWORDS(src, count_src){ + count := count_src modulo 64 + RETURN (src << count) OR (src >> (64 - count)) +} +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst". + +LEFT_ROTATE_QWORDS(src, count_src){ + count := count_src modulo 64 + RETURN (src << count) OR (src >> (64 - count)) +} +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +RIGHT_ROTATE_DWORDS(src, count_src){ + count := count_src modulo 32 + RETURN (src >>count) OR (src << (32 - count)) +} +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +RIGHT_ROTATE_DWORDS(src, count_src){ + count := count_src modulo 32 + RETURN (src >>count) OR (src << (32 - count)) +} +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst". + +RIGHT_ROTATE_DWORDS(src, count_src){ + count := count_src modulo 32 + RETURN (src >>count) OR (src << (32 - count)) +} +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +RIGHT_ROTATE_DWORDS(src, count_src){ + count := count_src modulo 32 + RETURN (src >>count) OR (src << (32 - count)) +} +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +RIGHT_ROTATE_DWORDS(src, count_src){ + count := count_src modulo 32 + RETURN (src >>count) OR (src << (32 - count)) +} +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst". + +RIGHT_ROTATE_DWORDS(src, count_src){ + count := count_src modulo 32 + RETURN (src >>count) OR (src << (32 - count)) +} +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +RIGHT_ROTATE_QWORDS(src, count_src){ + count := count_src modulo 64 + RETURN (src >> count) OR (src << (64 - count)) +} +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +RIGHT_ROTATE_QWORDS(src, count_src){ + count := count_src modulo 64 + RETURN (src >> count) OR (src << (64 - count)) +} +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst". + +RIGHT_ROTATE_QWORDS(src, count_src){ + count := count_src modulo 64 + RETURN (src >> count) OR (src << (64 - count)) +} +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +RIGHT_ROTATE_QWORDS(src, count_src){ + count := count_src modulo 64 + RETURN (src >> count) OR (src << (64 - count)) +} +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +RIGHT_ROTATE_QWORDS(src, count_src){ + count := count_src modulo 64 + RETURN (src >> count) OR (src << (64 - count)) +} +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst". + +RIGHT_ROTATE_QWORDS(src, count_src){ + count := count_src modulo 64 + RETURN (src >> count) OR (src << (64 - count)) +} +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +RIGHT_ROTATE_DWORDS(src, count_src){ + count := count_src modulo 32 + RETURN (src >>count) OR (src << (32 - count)) +} +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +RIGHT_ROTATE_DWORDS(src, count_src){ + count := count_src modulo 32 + RETURN (src >>count) OR (src << (32 - count)) +} +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst". + +RIGHT_ROTATE_DWORDS(src, count_src){ + count := count_src modulo 32 + RETURN (src >>count) OR (src << (32 - count)) +} +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +RIGHT_ROTATE_DWORDS(src, count_src){ + count := count_src modulo 32 + RETURN (src >>count) OR (src << (32 - count)) +} +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +RIGHT_ROTATE_DWORDS(src, count_src){ + count := count_src modulo 32 + RETURN (src >>count) OR (src << (32 - count)) +} +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst". + +RIGHT_ROTATE_DWORDS(src, count_src){ + count := count_src modulo 32 + RETURN (src >>count) OR (src << (32 - count)) +} +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +RIGHT_ROTATE_QWORDS(src, count_src){ + count := count_src modulo 64 + RETURN (src >> count) OR (src << (64 - count)) +} +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +RIGHT_ROTATE_QWORDS(src, count_src){ + count := count_src modulo 64 + RETURN (src >> count) OR (src << (64 - count)) +} +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst". + +RIGHT_ROTATE_QWORDS(src, count_src){ + count := count_src modulo 64 + RETURN (src >> count) OR (src << (64 - count)) +} +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +RIGHT_ROTATE_QWORDS(src, count_src){ + count := count_src modulo 64 + RETURN (src >> count) OR (src << (64 - count)) +} +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +RIGHT_ROTATE_QWORDS(src, count_src){ + count := count_src modulo 64 + RETURN (src >> count) OR (src << (64 - count)) +} +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst". + +RIGHT_ROTATE_QWORDS(src, count_src){ + count := count_src modulo 64 + RETURN (src >> count) OR (src << (64 - count)) +} +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Miscellaneous + + + Compute the absolute differences of packed unsigned 8-bit integers in "a" and "b", then horizontally sum each consecutive 8 differences to produce eight unsigned 16-bit integers, and pack these unsigned 16-bit integers in the low 16 bits of 64-bit elements in "dst". + +FOR j := 0 to 63 + i := j*8 + tmp[i+7:i] := ABS(a[i+7:i] - b[i+7:i]) +ENDFOR +FOR j := 0 to 7 + i := j*64 + dst[i+15:i] := tmp[i+7:i] + tmp[i+15:i+8] + tmp[i+23:i+16] + tmp[i+31:i+24] + tmp[i+39:i+32] + tmp[i+47:i+40] + tmp[i+55:i+48] + tmp[i+63:i+56] + dst[i+63:i+16] := 0 +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Store + + + + + Scatter 32-bit integers from "a" into memory using 32-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 7 + i := j*32 + MEM[base_addr + SignExtend(vindex[i+31:i])*scale] := a[i+31:i] +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Store + + + + + + Scatter 32-bit integers from "a" into memory using 32-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 7 + i := j*32 + IF k[j] + MEM[base_addr + SignExtend(vindex[i+31:i])*scale] := a[i+31:i] + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Store + + + + + Scatter 32-bit integers from "a" into memory using 32-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 3 + i := j*32 + MEM[base_addr + SignExtend(vindex[i+31:i])*scale] := a[i+31:i] +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Store + + + + + + Scatter 32-bit integers from "a" into memory using 32-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 3 + i := j*32 + IF k[j] + MEM[base_addr + SignExtend(vindex[i+31:i])*scale] := a[i+31:i] + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Store + + + + + Scatter 64-bit integers from "a" into memory using 32-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 3 + i := j*64 + l := j*32 + MEM[base_addr + SignExtend(vindex[l+31:l])*scale] := a[i+63:i] +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Store + + + + + + Scatter 64-bit integers from "a" into memory using 32-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 3 + i := j*64 + l := j*32 + IF k[j] + MEM[base_addr + SignExtend(vindex[l+31:l])*scale] := a[i+63:i] + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Store + + + + + Scatter 64-bit integers from "a" into memory using 32-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 1 + i := j*64 + l := j*32 + MEM[base_addr + SignExtend(vindex[l+31:l])*scale] := a[i+63:i] +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Store + + + + + + Scatter 64-bit integers from "a" into memory using 32-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 1 + i := j*64 + l := j*32 + IF k[j] + MEM[base_addr + SignExtend(vindex[l+31:l])*scale] := a[i+63:i] + k[j] := 0 + FI +ENDFOR +k[MAX:2] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Store + + + + + Scatter 32-bit integers from "a" into memory using 64-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 3 + i := j*32 + l := j*64 + MEM[base_addr + SignExtend(vindex[l+63:l])*scale] := a[i+31:i] +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Store + + + + + + Scatter 32-bit integers from "a" into memory using 64-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 3 + i := j*32 + l := j*64 + IF k[j] + MEM[base_addr + SignExtend(vindex[l+63:l])*scale] := a[i+31:i] + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Store + + + + + Scatter 32-bit integers from "a" into memory using 64-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 1 + i := j*32 + l := j*64 + MEM[base_addr + SignExtend(vindex[l+63:l])*scale] := a[i+31:i] +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Store + + + + + + Scatter 32-bit integers from "a" into memory using 64-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 1 + i := j*32 + l := j*64 + IF k[j] + MEM[base_addr + SignExtend(vindex[l+63:l])*scale] := a[i+31:i] + k[j] := 0 + FI +ENDFOR +k[MAX:2] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Store + + + + + Scatter 64-bit integers from "a" into memory using 64-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 3 + i := j*64 + MEM[base_addr + SignExtend(vindex[i+63:i])*scale] := a[i+63:i] +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Store + + + + + + Scatter 64-bit integers from "a" into memory using 64-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 3 + i := j*64 + IF k[j] + MEM[base_addr + SignExtend(vindex[i+63:i])*scale] := a[i+63:i] + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Store + + + + + Scatter 64-bit integers from "a" into memory using 64-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 1 + i := j*64 + MEM[base_addr + SignExtend(vindex[i+63:i])*scale] := a[i+63:i] +ENDFOR + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Store + + + + + + Scatter 64-bit integers from "a" into memory using 64-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 1 + i := j*64 + IF k[j] + MEM[base_addr + SignExtend(vindex[i+63:i])*scale] := a[i+63:i] + k[j] := 0 + FI +ENDFOR +k[MAX:2] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + + Shuffle packed 8-bit integers in "a" according to shuffle control mask in the corresponding 8-bit element of "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + IF b[i+7] == 1 + dst[i+7:i] := 0 + ELSE + index[4:0] := b[i+3:i] + (j & 0x10) + dst[i+7:i] := a[index*8+7:index*8] + FI + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + Shuffle packed 8-bit integers in "a" according to shuffle control mask in the corresponding 8-bit element of "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + IF b[i+7] == 1 + dst[i+7:i] := 0 + ELSE + index[4:0] := b[i+3:i] + (j & 0x10) + dst[i+7:i] := a[index*8+7:index*8] + FI + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Miscellaneous + + + + + Shuffle 8-bit integers in "a" within 128-bit lanes using the control in the corresponding 8-bit element of "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + IF b[i+7] == 1 + dst[i+7:i] := 0 + ELSE + index[5:0] := b[i+3:i] + (j & 0x30) + dst[i+7:i] := a[index*8+7:index*8] + FI + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Miscellaneous + + + + Shuffle packed 8-bit integers in "a" according to shuffle control mask in the corresponding 8-bit element of "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + IF b[i+7] == 1 + dst[i+7:i] := 0 + ELSE + index[5:0] := b[i+3:i] + (j & 0x30) + dst[i+7:i] := a[index*8+7:index*8] + FI + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Miscellaneous + + + Shuffle packed 8-bit integers in "a" according to shuffle control mask in the corresponding 8-bit element of "b", and store the results in "dst". + +FOR j := 0 to 63 + i := j*8 + IF b[i+7] == 1 + dst[i+7:i] := 0 + ELSE + index[5:0] := b[i+3:i] + (j & 0x30) + dst[i+7:i] := a[index*8+7:index*8] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + + Shuffle packed 8-bit integers in "a" according to shuffle control mask in the corresponding 8-bit element of "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + IF b[i+7] == 1 + dst[i+7:i] := 0 + ELSE + index[3:0] := b[i+3:i] + dst[i+7:i] := a[index*8+7:index*8] + FI + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + Shuffle packed 8-bit integers in "a" according to shuffle control mask in the corresponding 8-bit element of "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + IF b[i+7] == 1 + dst[i+7:i] := 0 + ELSE + index[3:0] := b[i+3:i] + dst[i+7:i] := a[index*8+7:index*8] + FI + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + + Shuffle 32-bit integers in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} + +tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +tmp_dst[95:64] := SELECT4(a[127:0], imm8[5:4]) +tmp_dst[127:96] := SELECT4(a[127:0], imm8[7:6]) +tmp_dst[159:128] := SELECT4(a[255:128], imm8[1:0]) +tmp_dst[191:160] := SELECT4(a[255:128], imm8[3:2]) +tmp_dst[223:192] := SELECT4(a[255:128], imm8[5:4]) +tmp_dst[255:224] := SELECT4(a[255:128], imm8[7:6]) +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + Shuffle 32-bit integers in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} + +tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +tmp_dst[95:64] := SELECT4(a[127:0], imm8[5:4]) +tmp_dst[127:96] := SELECT4(a[127:0], imm8[7:6]) +tmp_dst[159:128] := SELECT4(a[255:128], imm8[1:0]) +tmp_dst[191:160] := SELECT4(a[255:128], imm8[3:2]) +tmp_dst[223:192] := SELECT4(a[255:128], imm8[5:4]) +tmp_dst[255:224] := SELECT4(a[255:128], imm8[7:6]) +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + + Shuffle 32-bit integers in "a" using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} + +tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +tmp_dst[95:64] := SELECT4(a[127:0], imm8[5:4]) +tmp_dst[127:96] := SELECT4(a[127:0], imm8[7:6]) +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + Shuffle 32-bit integers in "a" using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} + +tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +tmp_dst[95:64] := SELECT4(a[127:0], imm8[5:4]) +tmp_dst[127:96] := SELECT4(a[127:0], imm8[7:6]) +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + + Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of "a" using the control in "imm8". Store the results in the high 64 bits of 128-bit lanes of "dst", with the low 64 bits of 128-bit lanes being copied from from "a" to "dst", using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp_dst[63:0] := a[63:0] +tmp_dst[79:64] := (a >> (imm8[1:0] * 16))[79:64] +tmp_dst[95:80] := (a >> (imm8[3:2] * 16))[79:64] +tmp_dst[111:96] := (a >> (imm8[5:4] * 16))[79:64] +tmp_dst[127:112] := (a >> (imm8[7:6] * 16))[79:64] +tmp_dst[191:128] := a[191:128] +tmp_dst[207:192] := (a >> (imm8[1:0] * 16))[207:192] +tmp_dst[223:208] := (a >> (imm8[3:2] * 16))[207:192] +tmp_dst[239:224] := (a >> (imm8[5:4] * 16))[207:192] +tmp_dst[255:240] := (a >> (imm8[7:6] * 16))[207:192] + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of "a" using the control in "imm8". Store the results in the high 64 bits of 128-bit lanes of "dst", with the low 64 bits of 128-bit lanes being copied from from "a" to "dst", using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp_dst[63:0] := a[63:0] +tmp_dst[79:64] := (a >> (imm8[1:0] * 16))[79:64] +tmp_dst[95:80] := (a >> (imm8[3:2] * 16))[79:64] +tmp_dst[111:96] := (a >> (imm8[5:4] * 16))[79:64] +tmp_dst[127:112] := (a >> (imm8[7:6] * 16))[79:64] +tmp_dst[191:128] := a[191:128] +tmp_dst[207:192] := (a >> (imm8[1:0] * 16))[207:192] +tmp_dst[223:208] := (a >> (imm8[3:2] * 16))[207:192] +tmp_dst[239:224] := (a >> (imm8[5:4] * 16))[207:192] +tmp_dst[255:240] := (a >> (imm8[7:6] * 16))[207:192] + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Miscellaneous + + + + + Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of "a" using the control in "imm8". Store the results in the high 64 bits of 128-bit lanes of "dst", with the low 64 bits of 128-bit lanes being copied from from "a" to "dst", using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp_dst[63:0] := a[63:0] +tmp_dst[79:64] := (a >> (imm8[1:0] * 16))[79:64] +tmp_dst[95:80] := (a >> (imm8[3:2] * 16))[79:64] +tmp_dst[111:96] := (a >> (imm8[5:4] * 16))[79:64] +tmp_dst[127:112] := (a >> (imm8[7:6] * 16))[79:64] +tmp_dst[191:128] := a[191:128] +tmp_dst[207:192] := (a >> (imm8[1:0] * 16))[207:192] +tmp_dst[223:208] := (a >> (imm8[3:2] * 16))[207:192] +tmp_dst[239:224] := (a >> (imm8[5:4] * 16))[207:192] +tmp_dst[255:240] := (a >> (imm8[7:6] * 16))[207:192] +tmp_dst[319:256] := a[319:256] +tmp_dst[335:320] := (a >> (imm8[1:0] * 16))[335:320] +tmp_dst[351:336] := (a >> (imm8[3:2] * 16))[335:320] +tmp_dst[367:352] := (a >> (imm8[5:4] * 16))[335:320] +tmp_dst[383:368] := (a >> (imm8[7:6] * 16))[335:320] +tmp_dst[447:384] := a[447:384] +tmp_dst[463:448] := (a >> (imm8[1:0] * 16))[463:448] +tmp_dst[479:464] := (a >> (imm8[3:2] * 16))[463:448] +tmp_dst[495:480] := (a >> (imm8[5:4] * 16))[463:448] +tmp_dst[511:496] := (a >> (imm8[7:6] * 16))[463:448] + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Miscellaneous + + + + Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of "a" using the control in "imm8". Store the results in the high 64 bits of 128-bit lanes of "dst", with the low 64 bits of 128-bit lanes being copied from from "a" to "dst", using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp_dst[63:0] := a[63:0] +tmp_dst[79:64] := (a >> (imm8[1:0] * 16))[79:64] +tmp_dst[95:80] := (a >> (imm8[3:2] * 16))[79:64] +tmp_dst[111:96] := (a >> (imm8[5:4] * 16))[79:64] +tmp_dst[127:112] := (a >> (imm8[7:6] * 16))[79:64] +tmp_dst[191:128] := a[191:128] +tmp_dst[207:192] := (a >> (imm8[1:0] * 16))[207:192] +tmp_dst[223:208] := (a >> (imm8[3:2] * 16))[207:192] +tmp_dst[239:224] := (a >> (imm8[5:4] * 16))[207:192] +tmp_dst[255:240] := (a >> (imm8[7:6] * 16))[207:192] +tmp_dst[319:256] := a[319:256] +tmp_dst[335:320] := (a >> (imm8[1:0] * 16))[335:320] +tmp_dst[351:336] := (a >> (imm8[3:2] * 16))[335:320] +tmp_dst[367:352] := (a >> (imm8[5:4] * 16))[335:320] +tmp_dst[383:368] := (a >> (imm8[7:6] * 16))[335:320] +tmp_dst[447:384] := a[447:384] +tmp_dst[463:448] := (a >> (imm8[1:0] * 16))[463:448] +tmp_dst[479:464] := (a >> (imm8[3:2] * 16))[463:448] +tmp_dst[495:480] := (a >> (imm8[5:4] * 16))[463:448] +tmp_dst[511:496] := (a >> (imm8[7:6] * 16))[463:448] + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Miscellaneous + + + Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of "a" using the control in "imm8". Store the results in the high 64 bits of 128-bit lanes of "dst", with the low 64 bits of 128-bit lanes being copied from from "a" to "dst". + +dst[63:0] := a[63:0] +dst[79:64] := (a >> (imm8[1:0] * 16))[79:64] +dst[95:80] := (a >> (imm8[3:2] * 16))[79:64] +dst[111:96] := (a >> (imm8[5:4] * 16))[79:64] +dst[127:112] := (a >> (imm8[7:6] * 16))[79:64] +dst[191:128] := a[191:128] +dst[207:192] := (a >> (imm8[1:0] * 16))[207:192] +dst[223:208] := (a >> (imm8[3:2] * 16))[207:192] +dst[239:224] := (a >> (imm8[5:4] * 16))[207:192] +dst[255:240] := (a >> (imm8[7:6] * 16))[207:192] +dst[319:256] := a[319:256] +dst[335:320] := (a >> (imm8[1:0] * 16))[335:320] +dst[351:336] := (a >> (imm8[3:2] * 16))[335:320] +dst[367:352] := (a >> (imm8[5:4] * 16))[335:320] +dst[383:368] := (a >> (imm8[7:6] * 16))[335:320] +dst[447:384] := a[447:384] +dst[463:448] := (a >> (imm8[1:0] * 16))[463:448] +dst[479:464] := (a >> (imm8[3:2] * 16))[463:448] +dst[495:480] := (a >> (imm8[5:4] * 16))[463:448] +dst[511:496] := (a >> (imm8[7:6] * 16))[463:448] +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + + Shuffle 16-bit integers in the high 64 bits of "a" using the control in "imm8". Store the results in the high 64 bits of "dst", with the low 64 bits being copied from from "a" to "dst", using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp_dst[63:0] := a[63:0] +tmp_dst[79:64] := (a >> (imm8[1:0] * 16))[79:64] +tmp_dst[95:80] := (a >> (imm8[3:2] * 16))[79:64] +tmp_dst[111:96] := (a >> (imm8[5:4] * 16))[79:64] +tmp_dst[127:112] := (a >> (imm8[7:6] * 16))[79:64] + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + Shuffle 16-bit integers in the high 64 bits of "a" using the control in "imm8". Store the results in the high 64 bits of "dst", with the low 64 bits being copied from from "a" to "dst", using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp_dst[63:0] := a[63:0] +tmp_dst[79:64] := (a >> (imm8[1:0] * 16))[79:64] +tmp_dst[95:80] := (a >> (imm8[3:2] * 16))[79:64] +tmp_dst[111:96] := (a >> (imm8[5:4] * 16))[79:64] +tmp_dst[127:112] := (a >> (imm8[7:6] * 16))[79:64] + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + + Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of "a" using the control in "imm8". Store the results in the low 64 bits of 128-bit lanes of "dst", with the high 64 bits of 128-bit lanes being copied from from "a" to "dst", using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp_dst[15:0] := (a >> (imm8[1:0] * 16))[15:0] +tmp_dst[31:16] := (a >> (imm8[3:2] * 16))[15:0] +tmp_dst[47:32] := (a >> (imm8[5:4] * 16))[15:0] +tmp_dst[63:48] := (a >> (imm8[7:6] * 16))[15:0] +tmp_dst[127:64] := a[127:64] +tmp_dst[143:128] := (a >> (imm8[1:0] * 16))[143:128] +tmp_dst[159:144] := (a >> (imm8[3:2] * 16))[143:128] +tmp_dst[175:160] := (a >> (imm8[5:4] * 16))[143:128] +tmp_dst[191:176] := (a >> (imm8[7:6] * 16))[143:128] +tmp_dst[255:192] := a[255:192] + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of "a" using the control in "imm8". Store the results in the low 64 bits of 128-bit lanes of "dst", with the high 64 bits of 128-bit lanes being copied from from "a" to "dst", using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp_dst[15:0] := (a >> (imm8[1:0] * 16))[15:0] +tmp_dst[31:16] := (a >> (imm8[3:2] * 16))[15:0] +tmp_dst[47:32] := (a >> (imm8[5:4] * 16))[15:0] +tmp_dst[63:48] := (a >> (imm8[7:6] * 16))[15:0] +tmp_dst[127:64] := a[127:64] +tmp_dst[143:128] := (a >> (imm8[1:0] * 16))[143:128] +tmp_dst[159:144] := (a >> (imm8[3:2] * 16))[143:128] +tmp_dst[175:160] := (a >> (imm8[5:4] * 16))[143:128] +tmp_dst[191:176] := (a >> (imm8[7:6] * 16))[143:128] +tmp_dst[255:192] := a[255:192] + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Miscellaneous + + + + + Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of "a" using the control in "imm8". Store the results in the low 64 bits of 128-bit lanes of "dst", with the high 64 bits of 128-bit lanes being copied from from "a" to "dst", using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp_dst[15:0] := (a >> (imm8[1:0] * 16))[15:0] +tmp_dst[31:16] := (a >> (imm8[3:2] * 16))[15:0] +tmp_dst[47:32] := (a >> (imm8[5:4] * 16))[15:0] +tmp_dst[63:48] := (a >> (imm8[7:6] * 16))[15:0] +tmp_dst[127:64] := a[127:64] +tmp_dst[143:128] := (a >> (imm8[1:0] * 16))[143:128] +tmp_dst[159:144] := (a >> (imm8[3:2] * 16))[143:128] +tmp_dst[175:160] := (a >> (imm8[5:4] * 16))[143:128] +tmp_dst[191:176] := (a >> (imm8[7:6] * 16))[143:128] +tmp_dst[255:192] := a[255:192] +tmp_dst[271:256] := (a >> (imm8[1:0] * 16))[271:256] +tmp_dst[287:272] := (a >> (imm8[3:2] * 16))[271:256] +tmp_dst[303:288] := (a >> (imm8[5:4] * 16))[271:256] +tmp_dst[319:304] := (a >> (imm8[7:6] * 16))[271:256] +tmp_dst[383:320] := a[383:320] +tmp_dst[399:384] := (a >> (imm8[1:0] * 16))[399:384] +tmp_dst[415:400] := (a >> (imm8[3:2] * 16))[399:384] +tmp_dst[431:416] := (a >> (imm8[5:4] * 16))[399:384] +tmp_dst[447:432] := (a >> (imm8[7:6] * 16))[399:384] +tmp_dst[511:448] := a[511:448] + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Miscellaneous + + + + Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of "a" using the control in "imm8". Store the results in the low 64 bits of 128-bit lanes of "dst", with the high 64 bits of 128-bit lanes being copied from from "a" to "dst", using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp_dst[15:0] := (a >> (imm8[1:0] * 16))[15:0] +tmp_dst[31:16] := (a >> (imm8[3:2] * 16))[15:0] +tmp_dst[47:32] := (a >> (imm8[5:4] * 16))[15:0] +tmp_dst[63:48] := (a >> (imm8[7:6] * 16))[15:0] +tmp_dst[127:64] := a[127:64] +tmp_dst[143:128] := (a >> (imm8[1:0] * 16))[143:128] +tmp_dst[159:144] := (a >> (imm8[3:2] * 16))[143:128] +tmp_dst[175:160] := (a >> (imm8[5:4] * 16))[143:128] +tmp_dst[191:176] := (a >> (imm8[7:6] * 16))[143:128] +tmp_dst[255:192] := a[255:192] +tmp_dst[271:256] := (a >> (imm8[1:0] * 16))[271:256] +tmp_dst[287:272] := (a >> (imm8[3:2] * 16))[271:256] +tmp_dst[303:288] := (a >> (imm8[5:4] * 16))[271:256] +tmp_dst[319:304] := (a >> (imm8[7:6] * 16))[271:256] +tmp_dst[383:320] := a[383:320] +tmp_dst[399:384] := (a >> (imm8[1:0] * 16))[399:384] +tmp_dst[415:400] := (a >> (imm8[3:2] * 16))[399:384] +tmp_dst[431:416] := (a >> (imm8[5:4] * 16))[399:384] +tmp_dst[447:432] := (a >> (imm8[7:6] * 16))[399:384] +tmp_dst[511:448] := a[511:448] + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Miscellaneous + + + Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of "a" using the control in "imm8". Store the results in the low 64 bits of 128-bit lanes of "dst", with the high 64 bits of 128-bit lanes being copied from from "a" to "dst". + +dst[15:0] := (a >> (imm8[1:0] * 16))[15:0] +dst[31:16] := (a >> (imm8[3:2] * 16))[15:0] +dst[47:32] := (a >> (imm8[5:4] * 16))[15:0] +dst[63:48] := (a >> (imm8[7:6] * 16))[15:0] +dst[127:64] := a[127:64] +dst[143:128] := (a >> (imm8[1:0] * 16))[143:128] +dst[159:144] := (a >> (imm8[3:2] * 16))[143:128] +dst[175:160] := (a >> (imm8[5:4] * 16))[143:128] +dst[191:176] := (a >> (imm8[7:6] * 16))[143:128] +dst[255:192] := a[255:192] +dst[271:256] := (a >> (imm8[1:0] * 16))[271:256] +dst[287:272] := (a >> (imm8[3:2] * 16))[271:256] +dst[303:288] := (a >> (imm8[5:4] * 16))[271:256] +dst[319:304] := (a >> (imm8[7:6] * 16))[271:256] +dst[383:320] := a[383:320] +dst[399:384] := (a >> (imm8[1:0] * 16))[399:384] +dst[415:400] := (a >> (imm8[3:2] * 16))[399:384] +dst[431:416] := (a >> (imm8[5:4] * 16))[399:384] +dst[447:432] := (a >> (imm8[7:6] * 16))[399:384] +dst[511:448] := a[511:448] +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + + Shuffle 16-bit integers in the low 64 bits of "a" using the control in "imm8". Store the results in the low 64 bits of "dst", with the high 64 bits being copied from from "a" to "dst", using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp_dst[15:0] := (a >> (imm8[1:0] * 16))[15:0] +tmp_dst[31:16] := (a >> (imm8[3:2] * 16))[15:0] +tmp_dst[47:32] := (a >> (imm8[5:4] * 16))[15:0] +tmp_dst[63:48] := (a >> (imm8[7:6] * 16))[15:0] +tmp_dst[127:64] := a[127:64] + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + Shuffle 16-bit integers in the low 64 bits of "a" using the control in "imm8". Store the results in the low 64 bits of "dst", with the high 64 bits being copied from from "a" to "dst", using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp_dst[15:0] := (a >> (imm8[1:0] * 16))[15:0] +tmp_dst[31:16] := (a >> (imm8[3:2] * 16))[15:0] +tmp_dst[47:32] := (a >> (imm8[5:4] * 16))[15:0] +tmp_dst[63:48] := (a >> (imm8[7:6] * 16))[15:0] +tmp_dst[127:64] := a[127:64] + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Shift packed 32-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + IF count[63:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend(a[i+31:i] << count[63:0]) + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Shift packed 32-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + IF imm8[7:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend(a[i+31:i] << imm8[7:0]) + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Shift packed 32-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + IF count[63:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend(a[i+31:i] << count[63:0]) + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Shift packed 32-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + IF imm8[7:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend(a[i+31:i] << imm8[7:0]) + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Shift packed 32-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + IF count[63:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend(a[i+31:i] << count[63:0]) + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Shift packed 32-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + IF imm8[7:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend(a[i+31:i] << imm8[7:0]) + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Shift packed 32-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + IF count[63:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend(a[i+31:i] << count[63:0]) + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Shift packed 32-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + IF imm8[7:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend(a[i+31:i] << imm8[7:0]) + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Shift + + + Shift 128-bit lanes in "a" left by "imm8" bytes while shifting in zeros, and store the results in "dst". + +tmp := imm8[7:0] +IF tmp > 15 + tmp := 16 +FI +dst[127:0] := a[127:0] << (tmp*8) +dst[255:128] := a[255:128] << (tmp*8) +dst[383:256] := a[383:256] << (tmp*8) +dst[511:384] := a[511:384] << (tmp*8) +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Shift packed 64-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + IF count[63:0] > 63 + dst[i+63:i] := 0 + ELSE + dst[i+63:i] := ZeroExtend(a[i+63:i] << count[63:0]) + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Shift packed 64-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + IF imm8[7:0] > 63 + dst[i+63:i] := 0 + ELSE + dst[i+63:i] := ZeroExtend(a[i+63:i] << imm8[7:0]) + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Shift packed 64-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + IF count[63:0] > 63 + dst[i+63:i] := 0 + ELSE + dst[i+63:i] := ZeroExtend(a[i+63:i] << count[63:0]) + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Shift packed 64-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + IF imm8[7:0] > 63 + dst[i+63:i] := 0 + ELSE + dst[i+63:i] := ZeroExtend(a[i+63:i] << imm8[7:0]) + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Shift packed 64-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + IF count[63:0] > 63 + dst[i+63:i] := 0 + ELSE + dst[i+63:i] := ZeroExtend(a[i+63:i] << count[63:0]) + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Shift packed 64-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + IF imm8[7:0] > 63 + dst[i+63:i] := 0 + ELSE + dst[i+63:i] := ZeroExtend(a[i+63:i] << imm8[7:0]) + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Shift packed 64-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + IF count[63:0] > 63 + dst[i+63:i] := 0 + ELSE + dst[i+63:i] := ZeroExtend(a[i+63:i] << count[63:0]) + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Shift packed 64-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + IF imm8[7:0] > 63 + dst[i+63:i] := 0 + ELSE + dst[i+63:i] := ZeroExtend(a[i+63:i] << imm8[7:0]) + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Shift packed 32-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := ZeroExtend(a[i+31:i] << count[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Shift packed 32-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := ZeroExtend(a[i+31:i] << count[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Shift packed 32-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := ZeroExtend(a[i+31:i] << count[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Shift packed 32-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := ZeroExtend(a[i+31:i] << count[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Shift packed 64-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := ZeroExtend(a[i+63:i] << count[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Shift packed 64-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := ZeroExtend(a[i+63:i] << count[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Shift packed 64-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := ZeroExtend(a[i+63:i] << count[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Shift packed 64-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := ZeroExtend(a[i+63:i] << count[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Shift + + + + + Shift packed 16-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := ZeroExtend(a[i+15:i] << count[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Shift + + + + Shift packed 16-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := ZeroExtend(a[i+15:i] << count[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Shift + + + Shift packed 16-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := ZeroExtend(a[i+15:i] << count[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Shift + + + + + Shift packed 16-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := ZeroExtend(a[i+15:i] << count[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Shift + + + + Shift packed 16-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := ZeroExtend(a[i+15:i] << count[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Shift + + + Shift packed 16-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := ZeroExtend(a[i+15:i] << count[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Shift + + + + + Shift packed 16-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := ZeroExtend(a[i+15:i] << count[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Shift + + + + Shift packed 16-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := ZeroExtend(a[i+15:i] << count[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Shift + + + Shift packed 16-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := ZeroExtend(a[i+15:i] << count[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Shift + + + + + Shift packed 16-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + IF count[63:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend(a[i+15:i] << count[63:0]) + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Shift + + + + + Shift packed 16-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + IF imm8[7:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend(a[i+15:i] << imm8[7:0]) + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Shift + + + + Shift packed 16-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + IF count[63:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend(a[i+15:i] << count[63:0]) + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Shift + + + + Shift packed 16-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + IF imm8[7:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend(a[i+15:i] << imm8[7:0]) + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Shift + + + + + Shift packed 16-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + IF count[63:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend(a[i+15:i] << count[63:0]) + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Shift + + + + + Shift packed 16-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + IF imm8[7:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend(a[i+15:i] << imm8[7:0]) + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Shift + + + + Shift packed 16-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + IF count[63:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend(a[i+15:i] << count[63:0]) + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Shift + + + + Shift packed 16-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + IF imm8[7:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend(a[i+15:i] << imm8[7:0]) + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Shift + + + Shift packed 16-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 31 + i := j*16 + IF count[63:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend(a[i+15:i] << count[63:0]) + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Shift + + + Shift packed 16-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 31 + i := j*16 + IF imm8[7:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend(a[i+15:i] << imm8[7:0]) + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Shift + + + + + Shift packed 16-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + IF count[63:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend(a[i+15:i] << count[63:0]) + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Shift + + + + + Shift packed 16-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + IF imm8[7:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend(a[i+15:i] << imm8[7:0]) + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Shift + + + + Shift packed 16-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + IF count[63:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend(a[i+15:i] << count[63:0]) + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Shift + + + + Shift packed 16-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + IF imm8[7:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend(a[i+15:i] << imm8[7:0]) + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Shift packed 32-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + IF count[63:0] > 31 + dst[i+31:i] := SignBit + ELSE + dst[i+31:i] := SignExtend(a[i+31:i] >> count[63:0]) + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Shift packed 32-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + IF imm8[7:0] > 31 + dst[i+31:i] := SignBit + ELSE + dst[i+31:i] := SignExtend(a[i+31:i] >> imm8[7:0]) + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Shift packed 32-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + IF count[63:0] > 31 + dst[i+31:i] := SignBit + ELSE + dst[i+31:i] := SignExtend(a[i+31:i] >> count[63:0]) + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Shift packed 32-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + IF imm8[7:0] > 31 + dst[i+31:i] := SignBit + ELSE + dst[i+31:i] := SignExtend(a[i+31:i] >> imm8[7:0]) + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Shift packed 32-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + IF count[63:0] > 31 + dst[i+31:i] := SignBit + ELSE + dst[i+31:i] := SignExtend(a[i+31:i] >> count[63:0]) + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Shift packed 32-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + IF imm8[7:0] > 31 + dst[i+31:i] := SignBit + ELSE + dst[i+31:i] := SignExtend(a[i+31:i] >> imm8[7:0]) + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Shift packed 32-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + IF count[63:0] > 31 + dst[i+31:i] := SignBit + ELSE + dst[i+31:i] := SignExtend(a[i+31:i] >> count[63:0]) + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Shift packed 32-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + IF imm8[7:0] > 31 + dst[i+31:i] := SignBit + ELSE + dst[i+31:i] := SignExtend(a[i+31:i] >> imm8[7:0]) + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Shift packed 64-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + IF count[63:0] > 63 + dst[i+63:i] := SignBit + ELSE + dst[i+63:i] := SignExtend(a[i+63:i] >> count[63:0]) + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Shift packed 64-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + IF imm8[7:0] > 63 + dst[i+63:i] := SignBit + ELSE + dst[i+63:i] := SignExtend(a[i+63:i] << imm8[7:0]) + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Shift packed 64-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + IF count[63:0] > 63 + dst[i+63:i] := SignBit + ELSE + dst[i+63:i] := SignExtend(a[i+63:i] >> count[63:0]) + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Shift packed 64-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + IF imm8[7:0] > 63 + dst[i+63:i] := SignBit + ELSE + dst[i+63:i] := SignExtend(a[i+63:i] << imm8[7:0]) + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + Shift packed 64-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + IF count[63:0] > 63 + dst[i+63:i] := SignBit + ELSE + dst[i+63:i] := SignExtend(a[i+63:i] >> count[63:0]) + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + Shift packed 64-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + IF imm8[7:0] > 63 + dst[i+63:i] := SignBit + ELSE + dst[i+63:i] := SignExtend(a[i+63:i] << imm8[7:0]) + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Shift packed 64-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + IF count[63:0] > 63 + dst[i+63:i] := SignBit + ELSE + dst[i+63:i] := SignExtend(a[i+63:i] >> count[63:0]) + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Shift packed 64-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + IF imm8[7:0] > 63 + dst[i+63:i] := SignBit + ELSE + dst[i+63:i] := SignExtend(a[i+63:i] << imm8[7:0]) + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Shift packed 64-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + IF count[63:0] > 63 + dst[i+63:i] := SignBit + ELSE + dst[i+63:i] := SignExtend(a[i+63:i] >> count[63:0]) + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Shift packed 64-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + IF imm8[7:0] > 63 + dst[i+63:i] := SignBit + ELSE + dst[i+63:i] := SignExtend(a[i+63:i] << imm8[7:0]) + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + Shift packed 64-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + IF count[63:0] > 63 + dst[i+63:i] := SignBit + ELSE + dst[i+63:i] := SignExtend(a[i+63:i] >> count[63:0]) + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + Shift packed 64-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + IF imm8[7:0] > 63 + dst[i+63:i] := SignBit + ELSE + dst[i+63:i] := SignExtend(a[i+63:i] << imm8[7:0]) + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := SignExtend(a[i+31:i] >> count[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := SignExtend(a[i+31:i] >> count[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := SignExtend(a[i+31:i] >> count[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := SignExtend(a[i+31:i] >> count[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := SignExtend(a[i+63:i] >> count[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := SignExtend(a[i+63:i] >> count[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := SignExtend(a[i+63:i] >> count[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := SignExtend(a[i+63:i] >> count[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := SignExtend(a[i+63:i] >> count[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := SignExtend(a[i+63:i] >> count[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Shift + + + + + Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := SignExtend(a[i+15:i] >> count[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Shift + + + + Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := SignExtend(a[i+15:i] >> count[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Shift + + + Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := SignExtend(a[i+15:i] >> count[i+15:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Shift + + + + + Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := SignExtend(a[i+15:i] >> count[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Shift + + + + Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := SignExtend(a[i+15:i] >> count[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Shift + + + Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := SignExtend(a[i+15:i] >> count[i+15:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Shift + + + + + Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := SignExtend(a[i+15:i] >> count[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Shift + + + + Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := SignExtend(a[i+15:i] >> count[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Shift + + + Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := SignExtend(a[i+15:i] >> count[i+15:i]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Shift + + + + + Shift packed 16-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + IF count[63:0] > 15 + dst[i+15:i] := SignBit + ELSE + dst[i+15:i] := SignExtend(a[i+15:i] >> count[63:0]) + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Shift + + + + + Shift packed 16-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + IF imm8[7:0] > 15 + dst[i+15:i] := SignBit + ELSE + dst[i+15:i] := SignExtend(a[i+15:i] >> imm8[7:0]) + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Shift + + + + Shift packed 16-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + IF count[63:0] > 15 + dst[i+15:i] := SignBit + ELSE + dst[i+15:i] := SignExtend(a[i+15:i] >> count[63:0]) + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Shift + + + + Shift packed 16-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + IF imm8[7:0] > 15 + dst[i+15:i] := SignBit + ELSE + dst[i+15:i] := SignExtend(a[i+15:i] >> imm8[7:0]) + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Shift + + + + + Shift packed 16-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + IF count[63:0] > 15 + dst[i+15:i] := SignBit + ELSE + dst[i+15:i] := SignExtend(a[i+15:i] >> count[63:0]) + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Shift + + + + + Shift packed 16-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + IF imm8[7:0] > 15 + dst[i+15:i] := SignBit + ELSE + dst[i+15:i] := SignExtend(a[i+15:i] >> imm8[7:0]) + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Shift + + + + Shift packed 16-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + IF count[63:0] > 15 + dst[i+15:i] := SignBit + ELSE + dst[i+15:i] := SignExtend(a[i+15:i] >> count[63:0]) + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Shift + + + + Shift packed 16-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + IF imm8[7:0] > 15 + dst[i+15:i] := SignBit + ELSE + dst[i+15:i] := SignExtend(a[i+15:i] >> imm8[7:0]) + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Shift + + + Shift packed 16-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 31 + i := j*16 + IF count[63:0] > 15 + dst[i+15:i] := SignBit + ELSE + dst[i+15:i] := SignExtend(a[i+15:i] >> count[63:0]) + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Shift + + + Shift packed 16-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 31 + i := j*16 + IF imm8[7:0] > 15 + dst[i+15:i] := SignBit + ELSE + dst[i+15:i] := SignExtend(a[i+15:i] >> imm8[7:0]) + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Shift + + + + + Shift packed 16-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + IF count[63:0] > 15 + dst[i+15:i] := SignBit + ELSE + dst[i+15:i] := SignExtend(a[i+15:i] >> count[63:0]) + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Shift + + + + + Shift packed 16-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + IF imm8[7:0] > 15 + dst[i+15:i] := SignBit + ELSE + dst[i+15:i] := SignExtend(a[i+15:i] >> imm8[7:0]) + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Shift + + + + Shift packed 16-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + IF count[63:0] > 15 + dst[i+15:i] := SignBit + ELSE + dst[i+15:i] := SignExtend(a[i+15:i] >> count[63:0]) + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Shift + + + + Shift packed 16-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + IF imm8[7:0] > 15 + dst[i+15:i] := SignBit + ELSE + dst[i+15:i] := SignExtend(a[i+15:i] >> imm8[7:0]) + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Shift packed 32-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + IF count[63:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend(a[i+31:i] >> count[63:0]) + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Shift packed 32-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + IF imm8[7:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend(a[i+31:i] >> imm8[7:0]) + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Shift packed 32-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + IF count[63:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend(a[i+31:i] >> count[63:0]) + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Shift packed 32-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + IF imm8[7:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend(a[i+31:i] >> imm8[7:0]) + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Shift packed 32-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + IF count[63:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend(a[i+31:i] >> count[63:0]) + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Shift packed 32-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + IF imm8[7:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend(a[i+31:i] >> imm8[7:0]) + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Shift packed 32-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + IF count[63:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend(a[i+31:i] >> count[63:0]) + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Shift packed 32-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + IF imm8[7:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend(a[i+31:i] >> imm8[7:0]) + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Shift + + + Shift 128-bit lanes in "a" right by "imm8" bytes while shifting in zeros, and store the results in "dst". + +tmp := imm8[7:0] +IF tmp > 15 + tmp := 16 +FI +dst[127:0] := a[127:0] >> (tmp*8) +dst[255:128] := a[255:128] >> (tmp*8) +dst[383:256] := a[383:256] >> (tmp*8) +dst[511:384] := a[511:384] >> (tmp*8) +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Shift packed 64-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + IF count[63:0] > 63 + dst[i+63:i] := 0 + ELSE + dst[i+63:i] := ZeroExtend(a[i+63:i] >> count[63:0]) + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Shift packed 64-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + IF imm8[7:0] > 63 + dst[i+63:i] := 0 + ELSE + dst[i+63:i] := ZeroExtend(a[i+63:i] >> imm8[7:0]) + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Shift packed 64-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + IF count[63:0] > 63 + dst[i+63:i] := 0 + ELSE + dst[i+63:i] := ZeroExtend(a[i+63:i] >> count[63:0]) + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Shift packed 64-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + IF imm8[7:0] > 63 + dst[i+63:i] := 0 + ELSE + dst[i+63:i] := ZeroExtend(a[i+63:i] >> imm8[7:0]) + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Shift packed 64-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + IF count[63:0] > 63 + dst[i+63:i] := 0 + ELSE + dst[i+63:i] := ZeroExtend(a[i+63:i] >> count[63:0]) + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Shift packed 64-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + IF imm8[7:0] > 63 + dst[i+63:i] := 0 + ELSE + dst[i+63:i] := ZeroExtend(a[i+63:i] >> imm8[7:0]) + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Shift packed 64-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + IF count[63:0] > 63 + dst[i+63:i] := 0 + ELSE + dst[i+63:i] := ZeroExtend(a[i+63:i] >> count[63:0]) + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Shift packed 64-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + IF imm8[7:0] > 63 + dst[i+63:i] := 0 + ELSE + dst[i+63:i] := ZeroExtend(a[i+63:i] >> imm8[7:0]) + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := ZeroExtend(a[i+31:i] >> count[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := ZeroExtend(a[i+31:i] >> count[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := ZeroExtend(a[i+31:i] >> count[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := ZeroExtend(a[i+31:i] >> count[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := ZeroExtend(a[i+63:i] >> count[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := ZeroExtend(a[i+63:i] >> count[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + + Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := ZeroExtend(a[i+63:i] >> count[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Shift + + + + Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := ZeroExtend(a[i+63:i] >> count[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Shift + + + + + Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := ZeroExtend(a[i+15:i] >> count[i+63:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Shift + + + + Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := ZeroExtend(a[i+15:i] >> count[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Shift + + + Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := ZeroExtend(a[i+15:i] >> count[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Shift + + + + + Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := ZeroExtend(a[i+15:i] >> count[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Shift + + + + Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := ZeroExtend(a[i+15:i] >> count[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Shift + + + Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := ZeroExtend(a[i+15:i] >> count[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Shift + + + + + Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := ZeroExtend(a[i+15:i] >> count[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Shift + + + + Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := ZeroExtend(a[i+15:i] >> count[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Shift + + + Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := ZeroExtend(a[i+15:i] >> count[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Shift + + + + + Shift packed 16-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + IF count[63:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend(a[i+15:i] >> count[63:0]) + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Shift + + + + + Shift packed 16-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + IF imm8[7:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend(a[i+15:i] >> imm8[7:0]) + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Shift + + + + Shift packed 16-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + IF count[63:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend(a[i+15:i] >> count[63:0]) + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Shift + + + + Shift packed 16-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + IF imm8[7:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend(a[i+15:i] >> imm8[7:0]) + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Shift + + + + + Shift packed 16-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + IF count[63:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend(a[i+15:i] >> count[63:0]) + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Shift + + + + + Shift packed 16-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + IF imm8[7:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend(a[i+15:i] >> imm8[7:0]) + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Shift + + + + Shift packed 16-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + IF count[63:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend(a[i+15:i] >> count[63:0]) + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Shift + + + + Shift packed 16-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + IF imm8[7:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend(a[i+15:i] >> imm8[7:0]) + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Shift + + + Shift packed 16-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 31 + i := j*16 + IF count[63:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend(a[i+15:i] >> count[63:0]) + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Shift + + + Shift packed 16-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 31 + i := j*16 + IF imm8[7:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend(a[i+15:i] >> imm8[7:0]) + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Shift + + + + + Shift packed 16-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + IF count[63:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend(a[i+15:i] >> count[63:0]) + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Shift + + + + + Shift packed 16-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + IF imm8[7:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend(a[i+15:i] >> imm8[7:0]) + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Shift + + + + Shift packed 16-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + IF count[63:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend(a[i+15:i] >> count[63:0]) + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Shift + + + + Shift packed 16-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + IF imm8[7:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend(a[i+15:i] >> imm8[7:0]) + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW> + Arithmetic + + + + + Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := a[i+7:i] - b[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW> + Arithmetic + + + + Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := a[i+7:i] - b[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + + Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := a[i+7:i] - b[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := a[i+7:i] - b[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a", and store the results in "dst". + +FOR j := 0 to 63 + i := j*8 + dst[i+7:i] := a[i+7:i] - b[i+7:i] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW> + Arithmetic + + + + + Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := a[i+7:i] - b[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW> + Arithmetic + + + + Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := a[i+7:i] - b[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + + Subtract packed 32-bit integers in "b" from packed 32-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] - b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + Subtract packed 32-bit integers in "b" from packed 32-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] - b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + + Subtract packed 32-bit integers in "b" from packed 32-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] - b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + Subtract packed 32-bit integers in "b" from packed 32-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] - b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + + Subtract packed 64-bit integers in "b" from packed 64-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] - b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + Subtract packed 64-bit integers in "b" from packed 64-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] - b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + + Subtract packed 64-bit integers in "b" from packed 64-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] - b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Arithmetic + + + + Subtract packed 64-bit integers in "b" from packed 64-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] - b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := Saturate_To_Int8(a[i+7:i] - b[i+7:i]) + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := Saturate_To_Int8(a[i+7:i] - b[i+7:i]) + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + + Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := Saturate_To_Int8(a[i+7:i] - b[i+7:i]) + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := Saturate_To_Int8(a[i+7:i] - b[i+7:i]) + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a" using saturation, and store the results in "dst". + +FOR j := 0 to 63 + i := j*8 + dst[i+7:i] := Saturate_To_Int8(a[i+7:i] - b[i+7:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := Saturate_To_Int8(a[i+7:i] - b[i+7:i]) + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := Saturate_To_Int8(a[i+7:i] - b[i+7:i]) + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := Saturate_To_Int16(a[i+15:i] - b[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := Saturate_To_Int16(a[i+15:i] - b[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + + Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := Saturate_To_Int16(a[i+15:i] - b[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := Saturate_To_Int16(a[i+15:i] - b[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a" using saturation, and store the results in "dst". + +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := Saturate_To_Int16(a[i+15:i] - b[i+15:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := Saturate_To_Int16(a[i+15:i] - b[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := Saturate_To_Int16(a[i+15:i] - b[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Subtract packed unsigned 8-bit integers in "b" from packed unsigned 8-bit integers in "a" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := Saturate_To_UnsignedInt8(a[i+7:i] - b[i+7:i]) + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Subtract packed unsigned 8-bit integers in "b" from packed unsigned 8-bit integers in "a" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := Saturate_To_UnsignedInt8(a[i+7:i] - b[i+7:i]) + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + + Subtract packed unsigned 8-bit integers in "b" from packed unsigned 8-bit integers in "a" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := Saturate_To_UnsignedInt8(a[i+7:i] - b[i+7:i]) + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + Subtract packed unsigned 8-bit integers in "b" from packed unsigned 8-bit integers in "a" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := Saturate_To_UnsignedInt8(a[i+7:i] - b[i+7:i]) + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + Subtract packed unsigned 8-bit integers in "b" from packed unsigned 8-bit integers in "a" using saturation, and store the results in "dst". + +FOR j := 0 to 63 + i := j*8 + dst[i+7:i] := Saturate_To_UnsignedInt8(a[i+7:i] - b[i+7:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Subtract packed unsigned 8-bit integers in "b" from packed unsigned 8-bit integers in "a" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := Saturate_To_UnsignedInt8(a[i+7:i] - b[i+7:i]) + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Subtract packed unsigned 8-bit integers in "b" from packed unsigned 8-bit integers in "a" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := Saturate_To_UnsignedInt8(a[i+7:i] - b[i+7:i]) + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Subtract packed unsigned 16-bit integers in "b" from packed unsigned 16-bit integers in "a" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := Saturate_To_UnsignedInt16(a[i+15:i] - b[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Subtract packed unsigned 16-bit integers in "b" from packed unsigned 16-bit integers in "a" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := Saturate_To_UnsignedInt16(a[i+15:i] - b[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + + Subtract packed unsigned 16-bit integers in "b" from packed unsigned 16-bit integers in "a" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := Saturate_To_UnsignedInt16(a[i+15:i] - b[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + Subtract packed unsigned 16-bit integers in "b" from packed unsigned 16-bit integers in "a" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := Saturate_To_UnsignedInt16(a[i+15:i] - b[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + Subtract packed unsigned 16-bit integers in "b" from packed unsigned 16-bit integers in "a" using saturation, and store the results in "dst". + +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := Saturate_To_UnsignedInt16(a[i+15:i] - b[i+15:i]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Subtract packed unsigned 16-bit integers in "b" from packed unsigned 16-bit integers in "a" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := Saturate_To_UnsignedInt16(a[i+15:i] - b[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Subtract packed unsigned 16-bit integers in "b" from packed unsigned 16-bit integers in "a" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := Saturate_To_UnsignedInt16(a[i+15:i] - b[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := a[i+15:i] - b[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := a[i+15:i] - b[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + + Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := a[i+15:i] - b[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + + Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := a[i+15:i] - b[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Arithmetic + + + Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a", and store the results in "dst". + +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := a[i+15:i] - b[i+15:i] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + + Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := a[i+15:i] - b[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Arithmetic + + + + Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := a[i+15:i] - b[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Logical + + + + + + Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 32-bit integer, the corresponding bit from "src", "a", and "b" are used to form a 3 bit index into "imm8", and the value at that bit in "imm8" is written to the corresponding bit in "dst" using writemask "k" at 32-bit granularity (32-bit elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + FOR h := 0 to 31 + index[2:0] := (src[i+h] << 2) OR (a[i+h] << 1) OR b[i+h] + dst[i+h] := imm8[index[2:0]] + ENDFOR + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Logical + + + + + + Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 32-bit integer, the corresponding bit from "a", "b", and "c" are used to form a 3 bit index into "imm8", and the value at that bit in "imm8" is written to the corresponding bit in "dst" using zeromask "k" at 32-bit granularity (32-bit elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + FOR h := 0 to 31 + index[2:0] := (a[i+h] << 2) OR (b[i+h] << 1) OR c[i+h] + dst[i+h] := imm8[index[2:0]] + ENDFOR + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Logical + + + + + Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 32-bit integer, the corresponding bit from "a", "b", and "c" are used to form a 3 bit index into "imm8", and the value at that bit in "imm8" is written to the corresponding bit in "dst". + +FOR j := 0 to 7 + i := j*32 + FOR h := 0 to 31 + index[2:0] := (a[i+h] << 2) OR (b[i+h] << 1) OR c[i+h] + dst[i+h] := imm8[index[2:0]] + ENDFOR +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Logical + + + + + + Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 32-bit integer, the corresponding bit from "src", "a", and "b" are used to form a 3 bit index into "imm8", and the value at that bit in "imm8" is written to the corresponding bit in "dst" using writemask "k" at 32-bit granularity (32-bit elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + FOR h := 0 to 31 + index[2:0] := (src[i+h] << 2) OR (a[i+h] << 1) OR b[i+h] + dst[i+h] := imm8[index[2:0]] + ENDFOR + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Logical + + + + + + Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 32-bit integer, the corresponding bit from "a", "b", and "c" are used to form a 3 bit index into "imm8", and the value at that bit in "imm8" is written to the corresponding bit in "dst" using zeromask "k" at 32-bit granularity (32-bit elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + FOR h := 0 to 31 + index[2:0] := (a[i+h] << 2) OR (b[i+h] << 1) OR c[i+h] + dst[i+h] := imm8[index[2:0]] + ENDFOR + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Logical + + + + + Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 32-bit integer, the corresponding bit from "a", "b", and "c" are used to form a 3 bit index into "imm8", and the value at that bit in "imm8" is written to the corresponding bit in "dst". + +FOR j := 0 to 3 + i := j*32 + FOR h := 0 to 31 + index[2:0] := (a[i+h] << 2) OR (b[i+h] << 1) OR c[i+h] + dst[i+h] := imm8[index[2:0]] + ENDFOR +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Logical + + + + + + Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 64-bit integer, the corresponding bit from "src", "a", and "b" are used to form a 3 bit index into "imm8", and the value at that bit in "imm8" is written to the corresponding bit in "dst" using writemask "k" at 64-bit granularity (64-bit elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + FOR h := 0 to 63 + index[2:0] := (src[i+h] << 2) OR (a[i+h] << 1) OR b[i+h] + dst[i+h] := imm8[index[2:0]] + ENDFOR + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Logical + + + + + + Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 64-bit integer, the corresponding bit from "a", "b", and "c" are used to form a 3 bit index into "imm8", and the value at that bit in "imm8" is written to the corresponding bit in "dst" using zeromask "k" at 64-bit granularity (64-bit elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + FOR h := 0 to 63 + index[2:0] := (a[i+h] << 2) OR (b[i+h] << 1) OR c[i+h] + dst[i+h] := imm8[index[2:0]] + ENDFOR + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Logical + + + + + Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 64-bit integer, the corresponding bit from "a", "b", and "c" are used to form a 3 bit index into "imm8", and the value at that bit in "imm8" is written to the corresponding bit in "dst". + +FOR j := 0 to 3 + i := j*64 + FOR h := 0 to 63 + index[2:0] := (a[i+h] << 2) OR (b[i+h] << 1) OR c[i+h] + dst[i+h] := imm8[index[2:0]] + ENDFOR +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Logical + + + + + + Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 64-bit integer, the corresponding bit from "src", "a", and "b" are used to form a 3 bit index into "imm8", and the value at that bit in "imm8" is written to the corresponding bit in "dst" using writemask "k" at 64-bit granularity (64-bit elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + FOR h := 0 to 63 + index[2:0] := (src[i+h] << 2) OR (a[i+h] << 1) OR b[i+h] + dst[i+h] := imm8[index[2:0]] + ENDFOR + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Logical + + + + + + Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 64-bit integer, the corresponding bit from "a", "b", and "c" are used to form a 3 bit index into "imm8", and the value at that bit in "imm8" is written to the corresponding bit in "dst" using zeromask "k" at 64-bit granularity (64-bit elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + FOR h := 0 to 63 + index[2:0] := (a[i+h] << 2) OR (b[i+h] << 1) OR c[i+h] + dst[i+h] := imm8[index[2:0]] + ENDFOR + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Logical + + + + + Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 64-bit integer, the corresponding bit from "a", "b", and "c" are used to form a 3 bit index into "imm8", and the value at that bit in "imm8" is written to the corresponding bit in "dst". + +FOR j := 0 to 1 + i := j*64 + FOR h := 0 to 63 + index[2:0] := (a[i+h] << 2) OR (b[i+h] << 1) OR c[i+h] + dst[i+h] := imm8[index[2:0]] + ENDFOR +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compute the bitwise AND of packed 8-bit integers in "a" and "b", producing intermediate 8-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is non-zero. + +FOR j := 0 to 31 + i := j*8 + IF k1[j] + k[j] := ((a[i+7:i] AND b[i+7:i]) != 0) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compute the bitwise AND of packed 8-bit integers in "a" and "b", producing intermediate 8-bit values, and set the corresponding bit in result mask "k" if the intermediate value is non-zero. + +FOR j := 0 to 31 + i := j*8 + k[j] := ((a[i+7:i] AND b[i+7:i]) != 0) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + + Compute the bitwise AND of packed 8-bit integers in "a" and "b", producing intermediate 8-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is non-zero. + +FOR j := 0 to 63 + i := j*8 + IF k1[j] + k[j] := ((a[i+7:i] AND b[i+7:i]) != 0) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + Compute the bitwise AND of packed 8-bit integers in "a" and "b", producing intermediate 8-bit values, and set the corresponding bit in result mask "k" if the intermediate value is non-zero. + +FOR j := 0 to 63 + i := j*8 + k[j] := ((a[i+7:i] AND b[i+7:i]) != 0) ? 1 : 0 +ENDFOR +k[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compute the bitwise AND of packed 8-bit integers in "a" and "b", producing intermediate 8-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is non-zero. + +FOR j := 0 to 15 + i := j*8 + IF k1[j] + k[j] := ((a[i+7:i] AND b[i+7:i]) != 0) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compute the bitwise AND of packed 8-bit integers in "a" and "b", producing intermediate 8-bit values, and set the corresponding bit in result mask "k" if the intermediate value is non-zero. + +FOR j := 0 to 15 + i := j*8 + k[j] := ((a[i+7:i] AND b[i+7:i]) != 0) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compute the bitwise AND of packed 32-bit integers in "a" and "b", producing intermediate 32-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is non-zero. + +FOR j := 0 to 7 + i := j*32 + IF k1[j] + k[j] := ((a[i+31:i] AND b[i+31:i]) != 0) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compute the bitwise AND of packed 32-bit integers in "a" and "b", producing intermediate 32-bit values, and set the corresponding bit in result mask "k" if the intermediate value is non-zero. + +FOR j := 0 to 7 + i := j*32 + k[j] := ((a[i+31:i] AND b[i+31:i]) != 0) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compute the bitwise AND of packed 32-bit integers in "a" and "b", producing intermediate 32-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is non-zero. + +FOR j := 0 to 3 + i := j*32 + IF k1[j] + k[j] := ((a[i+31:i] AND b[i+31:i]) != 0) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compute the bitwise AND of packed 32-bit integers in "a" and "b", producing intermediate 32-bit values, and set the corresponding bit in result mask "k" if the intermediate value is non-zero. + +FOR j := 0 to 3 + i := j*32 + k[j] := ((a[i+31:i] AND b[i+31:i]) != 0) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compute the bitwise AND of packed 64-bit integers in "a" and "b", producing intermediate 64-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is non-zero. + +FOR j := 0 to 3 + i := j*64 + IF k1[j] + k[j] := ((a[i+63:i] AND b[i+63:i]) != 0) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compute the bitwise AND of packed 64-bit integers in "a" and "b", producing intermediate 64-bit values, and set the corresponding bit in result mask "k" if the intermediate value is non-zero. + +FOR j := 0 to 3 + i := j*64 + k[j] := ((a[i+63:i] AND b[i+63:i]) != 0) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compute the bitwise AND of packed 64-bit integers in "a" and "b", producing intermediate 64-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is non-zero. + +FOR j := 0 to 1 + i := j*64 + IF k1[j] + k[j] := ((a[i+63:i] AND b[i+63:i]) != 0) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:2] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compute the bitwise AND of packed 64-bit integers in "a" and "b", producing intermediate 64-bit values, and set the corresponding bit in result mask "k" if the intermediate value is non-zero. + +FOR j := 0 to 1 + i := j*64 + k[j] := ((a[i+63:i] AND b[i+63:i]) != 0) ? 1 : 0 +ENDFOR +k[MAX:2] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compute the bitwise AND of packed 16-bit integers in "a" and "b", producing intermediate 16-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is non-zero. + +FOR j := 0 to 15 + i := j*16 + IF k1[j] + k[j] := ((a[i+15:i] AND b[i+15:i]) != 0) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compute the bitwise AND of packed 16-bit integers in "a" and "b", producing intermediate 16-bit values, and set the corresponding bit in result mask "k" if the intermediate value is non-zero. + +FOR j := 0 to 15 + i := j*16 + k[j] := ((a[i+15:i] AND b[i+15:i]) != 0) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + + Compute the bitwise AND of packed 16-bit integers in "a" and "b", producing intermediate 16-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is non-zero. + +FOR j := 0 to 31 + i := j*16 + IF k1[j] + k[j] := ((a[i+15:i] AND b[i+15:i]) != 0) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + Compute the bitwise AND of packed 16-bit integers in "a" and "b", producing intermediate 16-bit values, and set the corresponding bit in result mask "k" if the intermediate value is non-zero. + +FOR j := 0 to 31 + i := j*16 + k[j] := ((a[i+15:i] AND b[i+15:i]) != 0) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compute the bitwise AND of packed 16-bit integers in "a" and "b", producing intermediate 16-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is non-zero. + +FOR j := 0 to 7 + i := j*16 + IF k1[j] + k[j] := ((a[i+15:i] AND b[i+15:i]) != 0) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compute the bitwise AND of packed 16-bit integers in "a" and "b", producing intermediate 16-bit values, and set the corresponding bit in result mask "k" if the intermediate value is non-zero. + +FOR j := 0 to 7 + i := j*16 + k[j] := ((a[i+15:i] AND b[i+15:i]) != 0) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compute the bitwise NAND of packed 8-bit integers in "a" and "b", producing intermediate 8-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is zero. + +FOR j := 0 to 31 + i := j*8 + IF k1[j] + k[j] := ((a[i+7:i] AND b[i+7:i]) == 0) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compute the bitwise NAND of packed 8-bit integers in "a" and "b", producing intermediate 8-bit values, and set the corresponding bit in result mask "k" if the intermediate value is zero. + +FOR j := 0 to 31 + i := j*8 + k[j] := ((a[i+7:i] AND b[i+7:i]) == 0) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + + Compute the bitwise NAND of packed 8-bit integers in "a" and "b", producing intermediate 8-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is zero. + +FOR j := 0 to 63 + i := j*8 + IF k1[j] + k[j] := ((a[i+7:i] AND b[i+7:i]) == 0) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + Compute the bitwise NAND of packed 8-bit integers in "a" and "b", producing intermediate 8-bit values, and set the corresponding bit in result mask "k" if the intermediate value is zero. + +FOR j := 0 to 63 + i := j*8 + k[j] := ((a[i+7:i] AND b[i+7:i]) == 0) ? 1 : 0 +ENDFOR +k[MAX:64] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compute the bitwise NAND of packed 8-bit integers in "a" and "b", producing intermediate 8-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is zero. + +FOR j := 0 to 15 + i := j*8 + IF k1[j] + k[j] := ((a[i+7:i] AND b[i+7:i]) == 0) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compute the bitwise NAND of packed 8-bit integers in "a" and "b", producing intermediate 8-bit values, and set the corresponding bit in result mask "k" if the intermediate value is zero. + +FOR j := 0 to 15 + i := j*8 + k[j] := ((a[i+7:i] AND b[i+7:i]) == 0) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compute the bitwise NAND of packed 32-bit integers in "a" and "b", producing intermediate 32-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is zero. + +FOR j := 0 to 7 + i := j*32 + IF k1[j] + k[j] := ((a[i+31:i] AND b[i+31:i]) == 0) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compute the bitwise NAND of packed 32-bit integers in "a" and "b", producing intermediate 32-bit values, and set the corresponding bit in result mask "k" if the intermediate value is zero. + +FOR j := 0 to 7 + i := j*32 + k[j] := ((a[i+31:i] AND b[i+31:i]) == 0) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compute the bitwise NAND of packed 32-bit integers in "a" and "b", producing intermediate 32-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is zero. + +FOR j := 0 to 3 + i := j*32 + IF k1[j] + k[j] := ((a[i+31:i] AND b[i+31:i]) == 0) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compute the bitwise NAND of packed 32-bit integers in "a" and "b", producing intermediate 32-bit values, and set the corresponding bit in result mask "k" if the intermediate value is zero. + +FOR j := 0 to 3 + i := j*32 + k[j] := ((a[i+31:i] NAND b[i+31:i]) == 0) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compute the bitwise NAND of packed 64-bit integers in "a" and "b", producing intermediate 64-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is zero. + +FOR j := 0 to 3 + i := j*64 + IF k1[j] + k[j] := ((a[i+63:i] AND b[i+63:i]) == 0) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compute the bitwise NAND of packed 64-bit integers in "a" and "b", producing intermediate 64-bit values, and set the corresponding bit in result mask "k" if the intermediate value is zero. + +FOR j := 0 to 3 + i := j*64 + k[j] := ((a[i+63:i] AND b[i+63:i]) == 0) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + + Compute the bitwise NAND of packed 64-bit integers in "a" and "b", producing intermediate 64-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is zero. + +FOR j := 0 to 1 + i := j*64 + IF k1[j] + k[j] := ((a[i+63:i] AND b[i+63:i]) == 0) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:2] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512F + Compare + + + Compute the bitwise NAND of packed 64-bit integers in "a" and "b", producing intermediate 64-bit values, and set the corresponding bit in result mask "k" if the intermediate value is zero. + +FOR j := 0 to 1 + i := j*64 + k[j] := ((a[i+63:i] AND b[i+63:i]) == 0) ? 1 : 0 +ENDFOR +k[MAX:2] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compute the bitwise NAND of packed 16-bit integers in "a" and "b", producing intermediate 16-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is zero. + +FOR j := 0 to 15 + i := j*16 + IF k1[j] + k[j] := ((a[i+15:i] AND b[i+15:i]) == 0) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compute the bitwise NAND of packed 16-bit integers in "a" and "b", producing intermediate 16-bit values, and set the corresponding bit in result mask "k" if the intermediate value is zero. + +FOR j := 0 to 15 + i := j*16 + k[j] := ((a[i+15:i] AND b[i+15:i]) == 0) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + + Compute the bitwise NAND of packed 16-bit integers in "a" and "b", producing intermediate 16-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is zero. + +FOR j := 0 to 31 + i := j*16 + IF k1[j] + k[j] := ((a[i+15:i] AND b[i+15:i]) == 0) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512BW + Compare + + + Compute the bitwise NAND of packed 16-bit integers in "a" and "b", producing intermediate 16-bit values, and set the corresponding bit in result mask "k" if the intermediate value is zero. + +FOR j := 0 to 31 + i := j*16 + k[j] := ((a[i+15:i] AND b[i+15:i]) == 0) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + + Compute the bitwise NAND of packed 16-bit integers in "a" and "b", producing intermediate 16-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is zero. + +FOR j := 0 to 7 + i := j*16 + IF k1[j] + k[j] := ((a[i+15:i] AND b[i+15:i]) == 0) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + Mask + AVX512VL + AVX512BW + Compare + + + Compute the bitwise NAND of packed 16-bit integers in "a" and "b", producing intermediate 16-bit values, and set the corresponding bit in result mask "k" if the intermediate value is zero. + +FOR j := 0 to 7 + i := j*16 + k[j] := ((a[i+15:i] AND b[i+15:i]) == 0) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + + Unpack and interleave 8-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +INTERLEAVE_HIGH_BYTES(src1[127:0], src2[127:0]){ + dst[7:0] := src1[71:64] + dst[15:8] := src2[71:64] + dst[23:16] := src1[79:72] + dst[31:24] := src2[79:72] + dst[39:32] := src1[87:80] + dst[47:40] := src2[87:80] + dst[55:48] := src1[95:88] + dst[63:56] := src2[95:88] + dst[71:64] := src1[103:96] + dst[79:72] := src2[103:96] + dst[87:80] := src1[111:104] + dst[95:88] := src2[111:104] + dst[103:96] := src1[119:112] + dst[111:104] := src2[119:112] + dst[119:112] := src1[127:120] + dst[127:120] := src2[127:120] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_HIGH_BYTES(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_HIGH_BYTES(a[255:128], b[255:128]) + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + Unpack and interleave 8-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +INTERLEAVE_HIGH_BYTES(src1[127:0], src2[127:0]){ + dst[7:0] := src1[71:64] + dst[15:8] := src2[71:64] + dst[23:16] := src1[79:72] + dst[31:24] := src2[79:72] + dst[39:32] := src1[87:80] + dst[47:40] := src2[87:80] + dst[55:48] := src1[95:88] + dst[63:56] := src2[95:88] + dst[71:64] := src1[103:96] + dst[79:72] := src2[103:96] + dst[87:80] := src1[111:104] + dst[95:88] := src2[111:104] + dst[103:96] := src1[119:112] + dst[111:104] := src2[119:112] + dst[119:112] := src1[127:120] + dst[127:120] := src2[127:120] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_HIGH_BYTES(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_HIGH_BYTES(a[255:128], b[255:128]) + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Miscellaneous + + + + + Unpack and interleave 8-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +INTERLEAVE_HIGH_BYTES(src1[127:0], src2[127:0]){ + dst[7:0] := src1[71:64] + dst[15:8] := src2[71:64] + dst[23:16] := src1[79:72] + dst[31:24] := src2[79:72] + dst[39:32] := src1[87:80] + dst[47:40] := src2[87:80] + dst[55:48] := src1[95:88] + dst[63:56] := src2[95:88] + dst[71:64] := src1[103:96] + dst[79:72] := src2[103:96] + dst[87:80] := src1[111:104] + dst[95:88] := src2[111:104] + dst[103:96] := src1[119:112] + dst[111:104] := src2[119:112] + dst[119:112] := src1[127:120] + dst[127:120] := src2[127:120] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_HIGH_BYTES(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_HIGH_BYTES(a[255:128], b[255:128]) +tmp_dst[383:256] := INTERLEAVE_HIGH_BYTES(a[383:256], b[383:256]) +tmp_dst[511:384] := INTERLEAVE_HIGH_BYTES(a[511:384], b[511:384]) + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Miscellaneous + + + + Unpack and interleave 8-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +INTERLEAVE_HIGH_BYTES(src1[127:0], src2[127:0]){ + dst[7:0] := src1[71:64] + dst[15:8] := src2[71:64] + dst[23:16] := src1[79:72] + dst[31:24] := src2[79:72] + dst[39:32] := src1[87:80] + dst[47:40] := src2[87:80] + dst[55:48] := src1[95:88] + dst[63:56] := src2[95:88] + dst[71:64] := src1[103:96] + dst[79:72] := src2[103:96] + dst[87:80] := src1[111:104] + dst[95:88] := src2[111:104] + dst[103:96] := src1[119:112] + dst[111:104] := src2[119:112] + dst[119:112] := src1[127:120] + dst[127:120] := src2[127:120] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_HIGH_BYTES(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_HIGH_BYTES(a[255:128], b[255:128]) +tmp_dst[383:256] := INTERLEAVE_HIGH_BYTES(a[383:256], b[383:256]) +tmp_dst[511:384] := INTERLEAVE_HIGH_BYTES(a[511:384], b[511:384]) + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Miscellaneous + + + Unpack and interleave 8-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst". + +INTERLEAVE_HIGH_BYTES(src1[127:0], src2[127:0]){ + dst[7:0] := src1[71:64] + dst[15:8] := src2[71:64] + dst[23:16] := src1[79:72] + dst[31:24] := src2[79:72] + dst[39:32] := src1[87:80] + dst[47:40] := src2[87:80] + dst[55:48] := src1[95:88] + dst[63:56] := src2[95:88] + dst[71:64] := src1[103:96] + dst[79:72] := src2[103:96] + dst[87:80] := src1[111:104] + dst[95:88] := src2[111:104] + dst[103:96] := src1[119:112] + dst[111:104] := src2[119:112] + dst[119:112] := src1[127:120] + dst[127:120] := src2[127:120] + RETURN dst[127:0] +} + +dst[127:0] := INTERLEAVE_HIGH_BYTES(a[127:0], b[127:0]) +dst[255:128] := INTERLEAVE_HIGH_BYTES(a[255:128], b[255:128]) +dst[383:256] := INTERLEAVE_HIGH_BYTES(a[383:256], b[383:256]) +dst[511:384] := INTERLEAVE_HIGH_BYTES(a[511:384], b[511:384]) +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + + Unpack and interleave 8-bit integers from the high half of "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +INTERLEAVE_HIGH_BYTES(src1[127:0], src2[127:0]){ + dst[7:0] := src1[71:64] + dst[15:8] := src2[71:64] + dst[23:16] := src1[79:72] + dst[31:24] := src2[79:72] + dst[39:32] := src1[87:80] + dst[47:40] := src2[87:80] + dst[55:48] := src1[95:88] + dst[63:56] := src2[95:88] + dst[71:64] := src1[103:96] + dst[79:72] := src2[103:96] + dst[87:80] := src1[111:104] + dst[95:88] := src2[111:104] + dst[103:96] := src1[119:112] + dst[111:104] := src2[119:112] + dst[119:112] := src1[127:120] + dst[127:120] := src2[127:120] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_HIGH_BYTES(a[127:0], b[127:0]) + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + Unpack and interleave 8-bit integers from the high half of "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +INTERLEAVE_HIGH_BYTES(src1[127:0], src2[127:0]){ + dst[7:0] := src1[71:64] + dst[15:8] := src2[71:64] + dst[23:16] := src1[79:72] + dst[31:24] := src2[79:72] + dst[39:32] := src1[87:80] + dst[47:40] := src2[87:80] + dst[55:48] := src1[95:88] + dst[63:56] := src2[95:88] + dst[71:64] := src1[103:96] + dst[79:72] := src2[103:96] + dst[87:80] := src1[111:104] + dst[95:88] := src2[111:104] + dst[103:96] := src1[119:112] + dst[111:104] := src2[119:112] + dst[119:112] := src1[127:120] + dst[127:120] := src2[127:120] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_HIGH_BYTES(a[127:0], b[127:0]) + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + + Unpack and interleave 32-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]){ + dst[31:0] := src1[95:64] + dst[63:32] := src2[95:64] + dst[95:64] := src1[127:96] + dst[127:96] := src2[127:96] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_HIGH_DWORDS(a[255:128], b[255:128]) + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + Unpack and interleave 32-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]){ + dst[31:0] := src1[95:64] + dst[63:32] := src2[95:64] + dst[95:64] := src1[127:96] + dst[127:96] := src2[127:96] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_HIGH_DWORDS(a[255:128], b[255:128]) + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + + Unpack and interleave 32-bit integers from the high half of "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]){ + dst[31:0] := src1[95:64] + dst[63:32] := src2[95:64] + dst[95:64] := src1[127:96] + dst[127:96] := src2[127:96] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + Unpack and interleave 32-bit integers from the high half of "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]){ + dst[31:0] := src1[95:64] + dst[63:32] := src2[95:64] + dst[95:64] := src1[127:96] + dst[127:96] := src2[127:96] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + + Unpack and interleave 64-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]){ + dst[63:0] := src1[127:64] + dst[127:64] := src2[127:64] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_HIGH_QWORDS(a[255:128], b[255:128]) + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + Unpack and interleave 64-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]){ + dst[63:0] := src1[127:64] + dst[127:64] := src2[127:64] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_HIGH_QWORDS(a[255:128], b[255:128]) + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + + Unpack and interleave 64-bit integers from the high half of "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]){ + dst[63:0] := src1[127:64] + dst[127:64] := src2[127:64] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + Unpack and interleave 64-bit integers from the high half of "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]){ + dst[63:0] := src1[127:64] + dst[127:64] := src2[127:64] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + + Unpack and interleave 16-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +INTERLEAVE_HIGH_WORDS(src1[127:0], src2[127:0]){ + dst[15:0] := src1[79:64] + dst[31:16] := src2[79:64] + dst[47:32] := src1[95:80] + dst[63:48] := src2[95:80] + dst[79:64] := src1[111:96] + dst[95:80] := src2[111:96] + dst[111:96] := src1[127:112] + dst[127:112] := src2[127:112] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_HIGH_WORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_HIGH_WORDS(a[255:128], b[255:128]) + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + Unpack and interleave 16-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +INTERLEAVE_HIGH_WORDS(src1[127:0], src2[127:0]){ + dst[15:0] := src1[79:64] + dst[31:16] := src2[79:64] + dst[47:32] := src1[95:80] + dst[63:48] := src2[95:80] + dst[79:64] := src1[111:96] + dst[95:80] := src2[111:96] + dst[111:96] := src1[127:112] + dst[127:112] := src2[127:112] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_HIGH_WORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_HIGH_WORDS(a[255:128], b[255:128]) + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Miscellaneous + + + + + Unpack and interleave 16-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +INTERLEAVE_HIGH_WORDS(src1[127:0], src2[127:0]){ + dst[15:0] := src1[79:64] + dst[31:16] := src2[79:64] + dst[47:32] := src1[95:80] + dst[63:48] := src2[95:80] + dst[79:64] := src1[111:96] + dst[95:80] := src2[111:96] + dst[111:96] := src1[127:112] + dst[127:112] := src2[127:112] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_HIGH_WORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_HIGH_WORDS(a[255:128], b[255:128]) +tmp_dst[383:256] := INTERLEAVE_HIGH_WORDS(a[383:256], b[383:256]) +tmp_dst[511:384] := INTERLEAVE_HIGH_WORDS(a[511:384], b[511:384]) + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Miscellaneous + + + + Unpack and interleave 16-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +INTERLEAVE_HIGH_WORDS(src1[127:0], src2[127:0]){ + dst[15:0] := src1[79:64] + dst[31:16] := src2[79:64] + dst[47:32] := src1[95:80] + dst[63:48] := src2[95:80] + dst[79:64] := src1[111:96] + dst[95:80] := src2[111:96] + dst[111:96] := src1[127:112] + dst[127:112] := src2[127:112] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_HIGH_WORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_HIGH_WORDS(a[255:128], b[255:128]) +tmp_dst[383:256] := INTERLEAVE_HIGH_WORDS(a[383:256], b[383:256]) +tmp_dst[511:384] := INTERLEAVE_HIGH_WORDS(a[511:384], b[511:384]) + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Miscellaneous + + + Unpack and interleave 16-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst". + +INTERLEAVE_HIGH_WORDS(src1[127:0], src2[127:0]){ + dst[15:0] := src1[79:64] + dst[31:16] := src2[79:64] + dst[47:32] := src1[95:80] + dst[63:48] := src2[95:80] + dst[79:64] := src1[111:96] + dst[95:80] := src2[111:96] + dst[111:96] := src1[127:112] + dst[127:112] := src2[127:112] + RETURN dst[127:0] +} + +dst[127:0] := INTERLEAVE_HIGH_WORDS(a[127:0], b[127:0]) +dst[255:128] := INTERLEAVE_HIGH_WORDS(a[255:128], b[255:128]) +dst[383:256] := INTERLEAVE_HIGH_WORDS(a[383:256], b[383:256]) +dst[511:384] := INTERLEAVE_HIGH_WORDS(a[511:384], b[511:384]) +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + + Unpack and interleave 16-bit integers from the high half of "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +INTERLEAVE_HIGH_WORDS(src1[127:0], src2[127:0]){ + dst[15:0] := src1[79:64] + dst[31:16] := src2[79:64] + dst[47:32] := src1[95:80] + dst[63:48] := src2[95:80] + dst[79:64] := src1[111:96] + dst[95:80] := src2[111:96] + dst[111:96] := src1[127:112] + dst[127:112] := src2[127:112] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_HIGH_WORDS(a[127:0], b[127:0]) + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + Unpack and interleave 16-bit integers from the high half of "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +INTERLEAVE_HIGH_WORDS(src1[127:0], src2[127:0]){ + dst[15:0] := src1[79:64] + dst[31:16] := src2[79:64] + dst[47:32] := src1[95:80] + dst[63:48] := src2[95:80] + dst[79:64] := src1[111:96] + dst[95:80] := src2[111:96] + dst[111:96] := src1[127:112] + dst[127:112] := src2[127:112] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_HIGH_WORDS(a[127:0], b[127:0]) + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + + Unpack and interleave 8-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +INTERLEAVE_BYTES(src1[127:0], src2[127:0]){ + dst[7:0] := src1[7:0] + dst[15:8] := src2[7:0] + dst[23:16] := src1[15:8] + dst[31:24] := src2[15:8] + dst[39:32] := src1[23:16] + dst[47:40] := src2[23:16] + dst[55:48] := src1[31:24] + dst[63:56] := src2[31:24] + dst[71:64] := src1[39:32] + dst[79:72] := src2[39:32] + dst[87:80] := src1[47:40] + dst[95:88] := src2[47:40] + dst[103:96] := src1[55:48] + dst[111:104] := src2[55:48] + dst[119:112] := src1[63:56] + dst[127:120] := src2[63:56] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_BYTES(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_BYTES(a[255:128], b[255:128]) + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + Unpack and interleave 8-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +INTERLEAVE_BYTES(src1[127:0], src2[127:0]){ + dst[7:0] := src1[7:0] + dst[15:8] := src2[7:0] + dst[23:16] := src1[15:8] + dst[31:24] := src2[15:8] + dst[39:32] := src1[23:16] + dst[47:40] := src2[23:16] + dst[55:48] := src1[31:24] + dst[63:56] := src2[31:24] + dst[71:64] := src1[39:32] + dst[79:72] := src2[39:32] + dst[87:80] := src1[47:40] + dst[95:88] := src2[47:40] + dst[103:96] := src1[55:48] + dst[111:104] := src2[55:48] + dst[119:112] := src1[63:56] + dst[127:120] := src2[63:56] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_BYTES(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_BYTES(a[255:128], b[255:128]) + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Miscellaneous + + + + + Unpack and interleave 8-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +INTERLEAVE_BYTES(src1[127:0], src2[127:0]){ + dst[7:0] := src1[7:0] + dst[15:8] := src2[7:0] + dst[23:16] := src1[15:8] + dst[31:24] := src2[15:8] + dst[39:32] := src1[23:16] + dst[47:40] := src2[23:16] + dst[55:48] := src1[31:24] + dst[63:56] := src2[31:24] + dst[71:64] := src1[39:32] + dst[79:72] := src2[39:32] + dst[87:80] := src1[47:40] + dst[95:88] := src2[47:40] + dst[103:96] := src1[55:48] + dst[111:104] := src2[55:48] + dst[119:112] := src1[63:56] + dst[127:120] := src2[63:56] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_BYTES(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_BYTES(a[255:128], b[255:128]) +tmp_dst[383:256] := INTERLEAVE_BYTES(a[383:256], b[383:256]) +tmp_dst[511:384] := INTERLEAVE_BYTES(a[511:384], b[511:384]) + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Miscellaneous + + + + Unpack and interleave 8-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +INTERLEAVE_BYTES(src1[127:0], src2[127:0]){ + dst[7:0] := src1[7:0] + dst[15:8] := src2[7:0] + dst[23:16] := src1[15:8] + dst[31:24] := src2[15:8] + dst[39:32] := src1[23:16] + dst[47:40] := src2[23:16] + dst[55:48] := src1[31:24] + dst[63:56] := src2[31:24] + dst[71:64] := src1[39:32] + dst[79:72] := src2[39:32] + dst[87:80] := src1[47:40] + dst[95:88] := src2[47:40] + dst[103:96] := src1[55:48] + dst[111:104] := src2[55:48] + dst[119:112] := src1[63:56] + dst[127:120] := src2[63:56] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_BYTES(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_BYTES(a[255:128], b[255:128]) +tmp_dst[383:256] := INTERLEAVE_BYTES(a[383:256], b[383:256]) +tmp_dst[511:384] := INTERLEAVE_BYTES(a[511:384], b[511:384]) + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Miscellaneous + + + Unpack and interleave 8-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst". + +INTERLEAVE_BYTES(src1[127:0], src2[127:0]){ + dst[7:0] := src1[7:0] + dst[15:8] := src2[7:0] + dst[23:16] := src1[15:8] + dst[31:24] := src2[15:8] + dst[39:32] := src1[23:16] + dst[47:40] := src2[23:16] + dst[55:48] := src1[31:24] + dst[63:56] := src2[31:24] + dst[71:64] := src1[39:32] + dst[79:72] := src2[39:32] + dst[87:80] := src1[47:40] + dst[95:88] := src2[47:40] + dst[103:96] := src1[55:48] + dst[111:104] := src2[55:48] + dst[119:112] := src1[63:56] + dst[127:120] := src2[63:56] + RETURN dst[127:0] +} + +dst[127:0] := INTERLEAVE_BYTES(a[127:0], b[127:0]) +dst[255:128] := INTERLEAVE_BYTES(a[255:128], b[255:128]) +dst[383:256] := INTERLEAVE_BYTES(a[383:256], b[383:256]) +dst[511:384] := INTERLEAVE_BYTES(a[511:384], b[511:384]) +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + + Unpack and interleave 8-bit integers from the low half of "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +INTERLEAVE_BYTES(src1[127:0], src2[127:0]){ + dst[7:0] := src1[7:0] + dst[15:8] := src2[7:0] + dst[23:16] := src1[15:8] + dst[31:24] := src2[15:8] + dst[39:32] := src1[23:16] + dst[47:40] := src2[23:16] + dst[55:48] := src1[31:24] + dst[63:56] := src2[31:24] + dst[71:64] := src1[39:32] + dst[79:72] := src2[39:32] + dst[87:80] := src1[47:40] + dst[95:88] := src2[47:40] + dst[103:96] := src1[55:48] + dst[111:104] := src2[55:48] + dst[119:112] := src1[63:56] + dst[127:120] := src2[63:56] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_BYTES(a[127:0], b[127:0]) + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + Unpack and interleave 8-bit integers from the low half of "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +INTERLEAVE_BYTES(src1[127:0], src2[127:0]){ + dst[7:0] := src1[7:0] + dst[15:8] := src2[7:0] + dst[23:16] := src1[15:8] + dst[31:24] := src2[15:8] + dst[39:32] := src1[23:16] + dst[47:40] := src2[23:16] + dst[55:48] := src1[31:24] + dst[63:56] := src2[31:24] + dst[71:64] := src1[39:32] + dst[79:72] := src2[39:32] + dst[87:80] := src1[47:40] + dst[95:88] := src2[47:40] + dst[103:96] := src1[55:48] + dst[111:104] := src2[55:48] + dst[119:112] := src1[63:56] + dst[127:120] := src2[63:56] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_BYTES(a[127:0], b[127:0]) + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + + Unpack and interleave 32-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +INTERLEAVE_DWORDS(src1[127:0], src2[127:0]){ + dst[31:0] := src1[31:0] + dst[63:32] := src2[31:0] + dst[95:64] := src1[63:32] + dst[127:96] := src2[63:32] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_DWORDS(a[255:128], b[255:128]) + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + Unpack and interleave 32-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +INTERLEAVE_DWORDS(src1[127:0], src2[127:0]){ + dst[31:0] := src1[31:0] + dst[63:32] := src2[31:0] + dst[95:64] := src1[63:32] + dst[127:96] := src2[63:32] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_DWORDS(a[255:128], b[255:128]) + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + + Unpack and interleave 32-bit integers from the low half of "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +INTERLEAVE_DWORDS(src1[127:0], src2[127:0]){ + dst[31:0] := src1[31:0] + dst[63:32] := src2[31:0] + dst[95:64] := src1[63:32] + dst[127:96] := src2[63:32] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + Unpack and interleave 32-bit integers from the low half of "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +INTERLEAVE_DWORDS(src1[127:0], src2[127:0]){ + dst[31:0] := src1[31:0] + dst[63:32] := src2[31:0] + dst[95:64] := src1[63:32] + dst[127:96] := src2[63:32] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + + Unpack and interleave 64-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +INTERLEAVE_QWORDS(src1[127:0], src2[127:0]){ + dst[63:0] := src1[63:0] + dst[127:64] := src2[63:0] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_QWORDS(a[255:128], b[255:128]) + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + Unpack and interleave 64-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +INTERLEAVE_QWORDS(src1[127:0], src2[127:0]){ + dst[63:0] := src1[63:0] + dst[127:64] := src2[63:0] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_QWORDS(a[255:128], b[255:128]) + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + + Unpack and interleave 64-bit integers from the low half of "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +INTERLEAVE_QWORDS(src1[127:0], src2[127:0]){ + dst[63:0] := src1[63:0] + dst[127:64] := src2[63:0] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Miscellaneous + + + + Unpack and interleave 64-bit integers from the low half of "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +INTERLEAVE_QWORDS(src1[127:0], src2[127:0]){ + dst[63:0] := src1[63:0] + dst[127:64] := src2[63:0] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + + Unpack and interleave 16-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +INTERLEAVE_WORDS(src1[127:0], src2[127:0]){ + dst[15:0] := src1[15:0] + dst[31:16] := src2[15:0] + dst[47:32] := src1[31:16] + dst[63:48] := src2[31:16] + dst[79:64] := src1[47:32] + dst[95:80] := src2[47:32] + dst[111:96] := src1[63:48] + dst[127:112] := src2[63:48] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_WORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_WORDS(a[255:128], b[255:128]) + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + Unpack and interleave 16-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +INTERLEAVE_WORDS(src1[127:0], src2[127:0]){ + dst[15:0] := src1[15:0] + dst[31:16] := src2[15:0] + dst[47:32] := src1[31:16] + dst[63:48] := src2[31:16] + dst[79:64] := src1[47:32] + dst[95:80] := src2[47:32] + dst[111:96] := src1[63:48] + dst[127:112] := src2[63:48] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_WORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_WORDS(a[255:128], b[255:128]) + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Miscellaneous + + + + + Unpack and interleave 16-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +INTERLEAVE_WORDS(src1[127:0], src2[127:0]){ + dst[15:0] := src1[15:0] + dst[31:16] := src2[15:0] + dst[47:32] := src1[31:16] + dst[63:48] := src2[31:16] + dst[79:64] := src1[47:32] + dst[95:80] := src2[47:32] + dst[111:96] := src1[63:48] + dst[127:112] := src2[63:48] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_WORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_WORDS(a[255:128], b[255:128]) +tmp_dst[383:256] := INTERLEAVE_WORDS(a[383:256], b[383:256]) +tmp_dst[511:384] := INTERLEAVE_WORDS(a[511:384], b[511:384]) + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Miscellaneous + + + + Unpack and interleave 16-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +INTERLEAVE_WORDS(src1[127:0], src2[127:0]){ + dst[15:0] := src1[15:0] + dst[31:16] := src2[15:0] + dst[47:32] := src1[31:16] + dst[63:48] := src2[31:16] + dst[79:64] := src1[47:32] + dst[95:80] := src2[47:32] + dst[111:96] := src1[63:48] + dst[127:112] := src2[63:48] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_WORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_WORDS(a[255:128], b[255:128]) +tmp_dst[383:256] := INTERLEAVE_WORDS(a[383:256], b[383:256]) +tmp_dst[511:384] := INTERLEAVE_WORDS(a[511:384], b[511:384]) + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512BW + Miscellaneous + + + Unpack and interleave 16-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst". + +INTERLEAVE_WORDS(src1[127:0], src2[127:0]){ + dst[15:0] := src1[15:0] + dst[31:16] := src2[15:0] + dst[47:32] := src1[31:16] + dst[63:48] := src2[31:16] + dst[79:64] := src1[47:32] + dst[95:80] := src2[47:32] + dst[111:96] := src1[63:48] + dst[127:112] := src2[63:48] + RETURN dst[127:0] +} + +dst[127:0] := INTERLEAVE_WORDS(a[127:0], b[127:0]) +dst[255:128] := INTERLEAVE_WORDS(a[255:128], b[255:128]) +dst[383:256] := INTERLEAVE_WORDS(a[383:256], b[383:256]) +dst[511:384] := INTERLEAVE_WORDS(a[511:384], b[511:384]) +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + + Unpack and interleave 16-bit integers from the low half of "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +INTERLEAVE_WORDS(src1[127:0], src2[127:0]){ + dst[15:0] := src1[15:0] + dst[31:16] := src2[15:0] + dst[47:32] := src1[31:16] + dst[63:48] := src2[31:16] + dst[79:64] := src1[47:32] + dst[95:80] := src2[47:32] + dst[111:96] := src1[63:48] + dst[127:112] := src2[63:48] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_WORDS(a[127:0], b[127:0]) + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512BW + Miscellaneous + + + + Unpack and interleave 16-bit integers from the low half of "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +INTERLEAVE_WORDS(src1[127:0], src2[127:0]){ + dst[15:0] := src1[15:0] + dst[31:16] := src2[15:0] + dst[47:32] := src1[31:16] + dst[63:48] := src2[31:16] + dst[79:64] := src1[47:32] + dst[95:80] := src2[47:32] + dst[111:96] := src1[63:48] + dst[127:112] := src2[63:48] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_WORDS(a[127:0], b[127:0]) + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Logical + + + + + Compute the bitwise XOR of packed 32-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] XOR b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Logical + + + + Compute the bitwise XOR of packed 32-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] XOR b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Logical + + + + + Compute the bitwise XOR of packed 32-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] XOR b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Logical + + + + Compute the bitwise XOR of packed 32-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] XOR b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Logical + + + + + Compute the bitwise XOR of packed 64-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] XOR b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Logical + + + + Compute the bitwise XOR of packed 64-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] XOR b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Logical + + + + + Compute the bitwise XOR of packed 64-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] XOR b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Integer + AVX512VL + AVX512F + Logical + + + + Compute the bitwise XOR of packed 64-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] XOR b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Miscellaneous + + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + + +RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) +{ + CASE opCtl[1:0] + 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] + 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] + 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] + 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] + ESAC + + CASE signSelCtl[1:0] + 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) + 1: dst[63:0] := tmp[63:0] + 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) + 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) + ESAC + + RETURN dst +} + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := RANGE(a[i+63:i], b[i+63:i], imm8[1:0], imm8[3:2]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Miscellaneous + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + + +RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) +{ + CASE opCtl[1:0] + 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] + 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] + 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] + 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] + ESAC + + CASE signSelCtl[1:0] + 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) + 1: dst[63:0] := tmp[63:0] + 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) + 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) + ESAC + + RETURN dst +} + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := RANGE(a[i+63:i], b[i+63:i], imm8[1:0], imm8[3:2]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Miscellaneous + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst". + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + + +RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) +{ + CASE opCtl[1:0] + 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] + 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] + 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] + 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] + ESAC + + CASE signSelCtl[1:0] + 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) + 1: dst[63:0] := tmp[63:0] + 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) + 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) + ESAC + + RETURN dst +} + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := RANGE(a[i+63:i], b[i+63:i], imm8[1:0], imm8[3:2]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + + +RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) +{ + CASE opCtl[1:0] + 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] + 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] + 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] + 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] + ESAC + + CASE signSelCtl[1:0] + 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) + 1: dst[63:0] := tmp[63:0] + 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) + 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) + ESAC + + RETURN dst +} + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := RANGE(a[i+63:i], b[i+63:i], imm8[1:0], imm8[3:2]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + [round_note] + + +RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) +{ + CASE opCtl[1:0] + 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] + 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] + 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] + 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] + ESAC + + CASE signSelCtl[1:0] + 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) + 1: dst[63:0] := tmp[63:0] + 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) + 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) + ESAC + + RETURN dst +} + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := RANGE(a[i+63:i], b[i+63:i], imm8[1:0], imm8[3:2]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + + +RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) +{ + CASE opCtl[1:0] + 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] + 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] + 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] + 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] + ESAC + + CASE signSelCtl[1:0] + 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) + 1: dst[63:0] := tmp[63:0] + 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) + 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) + ESAC + + RETURN dst +} + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := RANGE(a[i+63:i], b[i+63:i], imm8[1:0], imm8[3:2]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + [round_note] + + +RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) +{ + CASE opCtl[1:0] + 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] + 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] + 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] + 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] + ESAC + + CASE signSelCtl[1:0] + 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) + 1: dst[63:0] := tmp[63:0] + 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) + 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) + ESAC + + RETURN dst +} + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := RANGE(a[i+63:i], b[i+63:i], imm8[1:0], imm8[3:2]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst". + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + + +RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) +{ + CASE opCtl[1:0] + 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] + 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] + 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] + 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] + ESAC + + CASE signSelCtl[1:0] + 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) + 1: dst[63:0] := tmp[63:0] + 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) + 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) + ESAC + + RETURN dst +} + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := RANGE(a[i+63:i], b[i+63:i], imm8[1:0], imm8[3:2]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst". + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + [round_note] + + +RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) +{ + CASE opCtl[1:0] + 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] + 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] + 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] + 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] + ESAC + + CASE signSelCtl[1:0] + 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) + 1: dst[63:0] := tmp[63:0] + 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) + 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) + ESAC + + RETURN dst +} + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := RANGE(a[i+63:i], b[i+63:i], imm8[1:0], imm8[3:2]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Miscellaneous + + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + + +RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) +{ + CASE opCtl[1:0] + 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] + 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] + 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] + 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] + ESAC + + CASE signSelCtl[1:0] + 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) + 1: dst[63:0] := tmp[63:0] + 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) + 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) + ESAC + + RETURN dst +} + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := RANGE(a[i+63:i], b[i+63:i], imm8[1:0], imm8[3:2]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Miscellaneous + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + + +RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) +{ + CASE opCtl[1:0] + 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] + 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] + 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] + 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] + ESAC + + CASE signSelCtl[1:0] + 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) + 1: dst[63:0] := tmp[63:0] + 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) + 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) + ESAC + + RETURN dst +} + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := RANGE(a[i+63:i], b[i+63:i], imm8[1:0], imm8[3:2]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Miscellaneous + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst". + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + + +RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) +{ + CASE opCtl[1:0] + 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] + 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] + 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] + 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] + ESAC + + CASE signSelCtl[1:0] + 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) + 1: dst[63:0] := tmp[63:0] + 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) + 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) + ESAC + + RETURN dst +} + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := RANGE(a[i+63:i], b[i+63:i], imm8[1:0], imm8[3:2]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Miscellaneous + + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + + +RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) +{ + CASE opCtl[1:0] + 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] + 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] + 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] + 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] + ESAC + + CASE signSelCtl[1:0] + 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) + 1: dst[31:0] := tmp[63:0] + 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) + 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) + ESAC + + RETURN dst +} + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := RANGE(a[i+31:i], b[i+31:i], imm8[1:0], imm8[3:2]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Miscellaneous + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + + +RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) +{ + CASE opCtl[1:0] + 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] + 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] + 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] + 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] + ESAC + + CASE signSelCtl[1:0] + 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) + 1: dst[31:0] := tmp[63:0] + 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) + 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) + ESAC + + RETURN dst +} + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := RANGE(a[i+31:i], b[i+31:i], imm8[1:0], imm8[3:2]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Miscellaneous + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst". + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + + +RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) +{ + CASE opCtl[1:0] + 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] + 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] + 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] + 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] + ESAC + + CASE signSelCtl[1:0] + 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) + 1: dst[31:0] := tmp[63:0] + 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) + 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) + ESAC + + RETURN dst +} + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := RANGE(a[i+31:i], b[i+31:i], imm8[1:0], imm8[3:2]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + + +RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) +{ + CASE opCtl[1:0] + 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] + 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] + 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] + 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] + ESAC + + CASE signSelCtl[1:0] + 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) + 1: dst[31:0] := tmp[63:0] + 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) + 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) + ESAC + + RETURN dst +} + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := RANGE(a[i+31:i], b[i+31:i], imm8[1:0], imm8[3:2]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + [round_note] + + +RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) +{ + CASE opCtl[1:0] + 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] + 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] + 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] + 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] + ESAC + + CASE signSelCtl[1:0] + 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) + 1: dst[31:0] := tmp[63:0] + 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) + 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) + ESAC + + RETURN dst +} + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := RANGE(a[i+31:i], b[i+31:i], imm8[1:0], imm8[3:2]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + + +RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) +{ + CASE opCtl[1:0] + 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] + 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] + 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] + 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] + ESAC + + CASE signSelCtl[1:0] + 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) + 1: dst[31:0] := tmp[63:0] + 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) + 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) + ESAC + + RETURN dst +} + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := RANGE(a[i+31:i], b[i+31:i], imm8[1:0], imm8[3:2]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + [round_note] + + +RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) +{ + CASE opCtl[1:0] + 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] + 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] + 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] + 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] + ESAC + + CASE signSelCtl[1:0] + 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) + 1: dst[31:0] := tmp[63:0] + 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) + 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) + ESAC + + RETURN dst +} + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := RANGE(a[i+31:i], b[i+31:i], imm8[1:0], imm8[3:2]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst". + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + + +RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) +{ + CASE opCtl[1:0] + 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] + 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] + 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] + 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] + ESAC + + CASE signSelCtl[1:0] + 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) + 1: dst[31:0] := tmp[63:0] + 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) + 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) + ESAC + + RETURN dst +} + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := RANGE(a[i+31:i], b[i+31:i], imm8[1:0], imm8[3:2]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst". + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + [round_note] + + +RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) +{ + CASE opCtl[1:0] + 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] + 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] + 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] + 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] + ESAC + + CASE signSelCtl[1:0] + 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) + 1: dst[31:0] := tmp[63:0] + 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) + 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) + ESAC + + RETURN dst +} + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := RANGE(a[i+31:i], b[i+31:i], imm8[1:0], imm8[3:2]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Miscellaneous + + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + + +RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) +{ + CASE opCtl[1:0] + 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] + 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] + 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] + 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] + ESAC + + CASE signSelCtl[1:0] + 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) + 1: dst[31:0] := tmp[63:0] + 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) + 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) + ESAC + + RETURN dst +} + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := RANGE(a[i+31:i], b[i+31:i], imm8[1:0], imm8[3:2]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Miscellaneous + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + + +RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) +{ + CASE opCtl[1:0] + 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] + 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] + 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] + 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] + ESAC + + CASE signSelCtl[1:0] + 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) + 1: dst[31:0] := tmp[63:0] + 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) + 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) + ESAC + + RETURN dst +} + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := RANGE(a[i+31:i], b[i+31:i], imm8[1:0], imm8[3:2]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Miscellaneous + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst". + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + + +RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) +{ + CASE opCtl[1:0] + 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] + 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] + 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] + 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] + ESAC + + CASE signSelCtl[1:0] + 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) + 1: dst[31:0] := tmp[63:0] + 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) + 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) + ESAC + + RETURN dst +} + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := RANGE(a[i+31:i], b[i+31:i], imm8[1:0], imm8[3:2]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + [round_note] + + +RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) +{ + CASE opCtl[1:0] + 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] + 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] + 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] + 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] + ESAC + + CASE signSelCtl[1:0] + 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) + 1: dst[63:0] := tmp[63:0] + 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) + 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) + ESAC + + RETURN dst +} + +IF k[0] + dst[63:0]] := RANGE(a[63:0], b[63:0], imm8[1:0], imm8[3:2]) +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + + +RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) +{ + CASE opCtl[1:0] + 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] + 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] + 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] + 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] + ESAC + + CASE signSelCtl[1:0] + 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) + 1: dst[63:0] := tmp[63:0] + 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) + 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) + ESAC + + RETURN dst +} + +IF k[0] + dst[63:0]] := RANGE(a[63:0], b[63:0], imm8[1:0], imm8[3:2]) +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + [round_note] + + +RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) +{ + CASE opCtl[1:0] + 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] + 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] + 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] + 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] + ESAC + + CASE signSelCtl[1:0] + 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) + 1: dst[63:0] := tmp[63:0] + 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) + 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) + ESAC + + RETURN dst +} + +IF k[0] + dst[63:0]] := RANGE(a[63:0], b[63:0], imm8[1:0], imm8[3:2]) +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + + +RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) +{ + CASE opCtl[1:0] + 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] + 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] + 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] + 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] + ESAC + + CASE signSelCtl[1:0] + 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) + 1: dst[63:0] := tmp[63:0] + 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) + 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) + ESAC + + RETURN dst +} + +IF k[0] + dst[63:0]] := RANGE(a[63:0], b[63:0], imm8[1:0], imm8[3:2]) +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + [round_note] + + +RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) +{ + CASE opCtl[1:0] + 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] + 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] + 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] + 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] + ESAC + + CASE signSelCtl[1:0] + 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) + 1: dst[63:0] := tmp[63:0] + 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) + 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) + ESAC + + RETURN dst +} + +dst[63:0]] := RANGE(a[63:0], b[63:0], imm8[1:0], imm8[3:2]) +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + [round_note] + + +RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) +{ + CASE opCtl[1:0] + 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] + 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] + 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] + 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] + ESAC + + CASE signSelCtl[1:0] + 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) + 1: dst[31:0] := tmp[31:0] + 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) + 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) + ESAC + + RETURN dst +} + +IF k[0] + dst[31:0]] := RANGE(a[31:0], b[31:0], imm8[1:0], imm8[3:2]) +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + + +RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) +{ + CASE opCtl[1:0] + 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] + 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] + 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] + 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] + ESAC + + CASE signSelCtl[1:0] + 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) + 1: dst[31:0] := tmp[31:0] + 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) + 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) + ESAC + + RETURN dst +} + +IF k[0] + dst[31:0]] := RANGE(a[31:0], b[31:0], imm8[1:0], imm8[3:2]) +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + [round_note] + + +RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) +{ + CASE opCtl[1:0] + 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] + 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] + 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] + 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] + ESAC + + CASE signSelCtl[1:0] + 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) + 1: dst[31:0] := tmp[31:0] + 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) + 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) + ESAC + + RETURN dst +} + +IF k[0] + dst[31:0]] := RANGE(a[31:0], b[31:0], imm8[1:0], imm8[3:2]) +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + + +RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) +{ + CASE opCtl[1:0] + 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] + 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] + 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] + 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] + ESAC + + CASE signSelCtl[1:0] + 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) + 1: dst[31:0] := tmp[31:0] + 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) + 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) + ESAC + + RETURN dst +} + +IF k[0] + dst[31:0]] := RANGE(a[31:0], b[31:0], imm8[1:0], imm8[3:2]) +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + [round_note] + + +RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) +{ + CASE opCtl[1:0] + 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] + 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] + 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] + 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] + ESAC + + CASE signSelCtl[1:0] + 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) + 1: dst[31:0] := tmp[31:0] + 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) + 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) + ESAC + + RETURN dst +} + +dst[31:0]] := RANGE(a[31:0], b[31:0], imm8[1:0], imm8[3:2]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + Compute the approximate reciprocal of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := APPROXIMATE(1.0/a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + Compute the approximate reciprocal of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := APPROXIMATE(1.0/a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + Compute the approximate reciprocal of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := APPROXIMATE(1.0/a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + Compute the approximate reciprocal of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := APPROXIMATE(1.0/a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + Compute the approximate reciprocal of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := APPROXIMATE(1.0/a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + Compute the approximate reciprocal of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := APPROXIMATE(1.0/a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := APPROXIMATE(1.0/a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := APPROXIMATE(1.0/a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := APPROXIMATE(1.0/a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := APPROXIMATE(1.0/a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := APPROXIMATE(1.0/a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := APPROXIMATE(1.0/a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Miscellaneous + + + + + Extract the reduced argument of packed double-precision (64-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +ReduceArgumentPD(src1[63:0], imm8[7:0]) +{ + m := imm8[7:4] // number of fraction bits after the binary point to be preserved + rc := imm8[1:0] // round control + rc_src := imm8[2] // round ccontrol source + spe := 0 + tmp[63:0] := pow(2, -m) * ROUND(pow(2, m) * src1[63:0], spe, rc_source, rc) + tmp[63:0] := src1[63:0] - tmp[63:0] + RETURN tmp[63:0] +} + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := ReduceArgumentPD(src[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Miscellaneous + + + + Extract the reduced argument of packed double-precision (64-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +ReduceArgumentPD(src1[63:0], imm8[7:0]) +{ + m := imm8[7:4] // number of fraction bits after the binary point to be preserved + rc := imm8[1:0] // round control + rc_src := imm8[2] // round ccontrol source + spe := 0 + tmp[63:0] := pow(2, -m) * ROUND(pow(2, m) * src1[63:0], spe, rc_source, rc) + tmp[63:0] := src1[63:0] - tmp[63:0] + RETURN tmp[63:0] +} + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := ReduceArgumentPD(src[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Miscellaneous + + + Extract the reduced argument of packed double-precision (64-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst". + +ReduceArgumentPD(src1[63:0], imm8[7:0]) +{ + m := imm8[7:4] // number of fraction bits after the binary point to be preserved + rc := imm8[1:0] // round control + rc_src := imm8[2] // round ccontrol source + spe := 0 + tmp[63:0] := pow(2, -m) * ROUND(pow(2, m) * src1[63:0], spe, rc_source, rc) + tmp[63:0] := src1[63:0] - tmp[63:0] + RETURN tmp[63:0] +} + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := ReduceArgumentPD(src[i+63:i], imm8[7:0]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + + Extract the reduced argument of packed double-precision (64-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +ReduceArgumentPD(src1[63:0], imm8[7:0]) +{ + m := imm8[7:4] // number of fraction bits after the binary point to be preserved + rc := imm8[1:0] // round control + rc_src := imm8[2] // round ccontrol source + spe := 0 + tmp[63:0] := pow(2, -m) * ROUND(pow(2, m) * src1[63:0], spe, rc_source, rc) + tmp[63:0] := src1[63:0] - tmp[63:0] + RETURN tmp[63:0] +} + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ReduceArgumentPD(src[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + + + Extract the reduced argument of packed double-precision (64-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +ReduceArgumentPD(src1[63:0], imm8[7:0]) +{ + m := imm8[7:4] // number of fraction bits after the binary point to be preserved + rc := imm8[1:0] // round control + rc_src := imm8[2] // round ccontrol source + spe := 0 + tmp[63:0] := pow(2, -m) * ROUND(pow(2, m) * src1[63:0], spe, rc_source, rc) + tmp[63:0] := src1[63:0] - tmp[63:0] + RETURN tmp[63:0] +} + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ReduceArgumentPD(src[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + Extract the reduced argument of packed double-precision (64-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +ReduceArgumentPD(src1[63:0], imm8[7:0]) +{ + m := imm8[7:4] // number of fraction bits after the binary point to be preserved + rc := imm8[1:0] // round control + rc_src := imm8[2] // round ccontrol source + spe := 0 + tmp[63:0] := pow(2, -m) * ROUND(pow(2, m) * src1[63:0], spe, rc_source, rc) + tmp[63:0] := src1[63:0] - tmp[63:0] + RETURN tmp[63:0] +} + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ReduceArgumentPD(src[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + + Extract the reduced argument of packed double-precision (64-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +ReduceArgumentPD(src1[63:0], imm8[7:0]) +{ + m := imm8[7:4] // number of fraction bits after the binary point to be preserved + rc := imm8[1:0] // round control + rc_src := imm8[2] // round ccontrol source + spe := 0 + tmp[63:0] := pow(2, -m) * ROUND(pow(2, m) * src1[63:0], spe, rc_source, rc) + tmp[63:0] := src1[63:0] - tmp[63:0] + RETURN tmp[63:0] +} + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ReduceArgumentPD(src[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + Extract the reduced argument of packed double-precision (64-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst". + +ReduceArgumentPD(src1[63:0], imm8[7:0]) +{ + m := imm8[7:4] // number of fraction bits after the binary point to be preserved + rc := imm8[1:0] // round control + rc_src := imm8[2] // round ccontrol source + spe := 0 + tmp[63:0] := pow(2, -m) * ROUND(pow(2, m) * src1[63:0], spe, rc_source, rc) + tmp[63:0] := src1[63:0] - tmp[63:0] + RETURN tmp[63:0] +} + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := ReduceArgumentPD(src[i+63:i], imm8[7:0]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + Extract the reduced argument of packed double-precision (64-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst". + [round_note] + +ReduceArgumentPD(src1[63:0], imm8[7:0]) +{ + m := imm8[7:4] // number of fraction bits after the binary point to be preserved + rc := imm8[1:0] // round control + rc_src := imm8[2] // round ccontrol source + spe := 0 + tmp[63:0] := pow(2, -m) * ROUND(pow(2, m) * src1[63:0], spe, rc_source, rc) + tmp[63:0] := src1[63:0] - tmp[63:0] + RETURN tmp[63:0] +} + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := ReduceArgumentPD(src[i+63:i], imm8[7:0]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Miscellaneous + + + + + Extract the reduced argument of packed double-precision (64-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +ReduceArgumentPD(src1[63:0], imm8[7:0]) +{ + m := imm8[7:4] // number of fraction bits after the binary point to be preserved + rc := imm8[1:0] // round control + rc_src := imm8[2] // round ccontrol source + spe := 0 + tmp[63:0] := pow(2, -m) * ROUND(pow(2, m) * src1[63:0], spe, rc_source, rc) + tmp[63:0] := src1[63:0] - tmp[63:0] + RETURN tmp[63:0] +} + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := ReduceArgumentPD(src[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Miscellaneous + + + + Extract the reduced argument of packed double-precision (64-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +ReduceArgumentPD(src1[63:0], imm8[7:0]) +{ + m := imm8[7:4] // number of fraction bits after the binary point to be preserved + rc := imm8[1:0] // round control + rc_src := imm8[2] // round ccontrol source + spe := 0 + tmp[63:0] := pow(2, -m) * ROUND(pow(2, m) * src1[63:0], spe, rc_source, rc) + tmp[63:0] := src1[63:0] - tmp[63:0] + RETURN tmp[63:0] +} + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := ReduceArgumentPD(src[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Miscellaneous + + + Extract the reduced argument of packed double-precision (64-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst". + +ReduceArgumentPD(src1[63:0], imm8[7:0]) +{ + m := imm8[7:4] // number of fraction bits after the binary point to be preserved + rc := imm8[1:0] // round control + rc_src := imm8[2] // round ccontrol source + spe := 0 + tmp[63:0] := pow(2, -m) * ROUND(pow(2, m) * src1[63:0], spe, rc_source, rc) + tmp[63:0] := src1[63:0] - tmp[63:0] + RETURN tmp[63:0] +} + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := ReduceArgumentPD(src[i+63:i], imm8[7:0]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Miscellaneous + + + + + Extract the reduced argument of packed single-precision (32-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +ReduceArgumentPS(src1[31:0], imm8[7:0]) +{ + IF src1[31:0] == NAN + RETURN (convert src1[31:0] to QNaN) + FI + + m := imm8[7:4] // number of fraction bits after the binary point to be preserved + rc := imm8[1:0] // round control + rc_src := imm8[2] // round ccontrol source + spe := 0 + tmp[31:0] := pow(2, -m)*ROUND(pow(2, m)*src1[31:0], spe, rc_source, rc) + tmp[31:0] := src1[31:0] - tmp[31:0] + RETURN tmp[31:0] +} +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := ReduceArgumentPS(src[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Miscellaneous + + + + Extract the reduced argument of packed single-precision (32-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +ReduceArgumentPS(src1[31:0], imm8[7:0]) +{ + IF src1[31:0] == NAN + RETURN (convert src1[31:0] to QNaN) + FI + + m := imm8[7:4] // number of fraction bits after the binary point to be preserved + rc := imm8[1:0] // round control + rc_src := imm8[2] // round ccontrol source + spe := 0 + tmp[31:0] := pow(2, -m)*ROUND(pow(2, m)*src1[31:0], spe, rc_source, rc) + tmp[31:0] := src1[31:0] - tmp[31:0] + RETURN tmp[31:0] +} +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := ReduceArgumentPS(src[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Miscellaneous + + + Extract the reduced argument of packed single-precision (32-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst". + +ReduceArgumentPS(src1[31:0], imm8[7:0]) +{ + IF src1[31:0] == NAN + RETURN (convert src1[31:0] to QNaN) + FI + + m := imm8[7:4] // number of fraction bits after the binary point to be preserved + rc := imm8[1:0] // round control + rc_src := imm8[2] // round ccontrol source + spe := 0 + tmp[31:0] := pow(2, -m)*ROUND(pow(2, m)*src1[31:0], spe, rc_source, rc) + tmp[31:0] := src1[31:0] - tmp[31:0] + RETURN tmp[31:0] +} +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := ReduceArgumentPS(src[i+31:i], imm8[7:0]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + + Extract the reduced argument of packed single-precision (32-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +ReduceArgumentPS(src1[31:0], imm8[7:0]) +{ + IF src1[31:0] == NAN + RETURN (convert src1[31:0] to QNaN) + FI + + m := imm8[7:4] // number of fraction bits after the binary point to be preserved + rc := imm8[1:0] // round control + rc_src := imm8[2] // round ccontrol source + spe := 0 + tmp[31:0] := pow(2, -m)*ROUND(pow(2, m)*src1[31:0], spe, rc_source, rc) + tmp[31:0] := src1[31:0] - tmp[31:0] + RETURN tmp[31:0] +} +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ReduceArgumentPS(src[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + + + Extract the reduced argument of packed single-precision (32-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +ReduceArgumentPS(src1[31:0], imm8[7:0]) +{ + IF src1[31:0] == NAN + RETURN (convert src1[31:0] to QNaN) + FI + + m := imm8[7:4] // number of fraction bits after the binary point to be preserved + rc := imm8[1:0] // round control + rc_src := imm8[2] // round ccontrol source + spe := 0 + tmp[31:0] := pow(2, -m)*ROUND(pow(2, m)*src1[31:0], spe, rc_source, rc) + tmp[31:0] := src1[31:0] - tmp[31:0] + RETURN tmp[31:0] +} +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ReduceArgumentPS(src[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + Extract the reduced argument of packed single-precision (32-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +ReduceArgumentPS(src1[31:0], imm8[7:0]) +{ + IF src1[31:0] == NAN + RETURN (convert src1[31:0] to QNaN) + FI + + m := imm8[7:4] // number of fraction bits after the binary point to be preserved + rc := imm8[1:0] // round control + rc_src := imm8[2] // round ccontrol source + spe := 0 + tmp[31:0] := pow(2, -m)*ROUND(pow(2, m)*src1[31:0], spe, rc_source, rc) + tmp[31:0] := src1[31:0] - tmp[31:0] + RETURN tmp[31:0] +} +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ReduceArgumentPS(src[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + + Extract the reduced argument of packed single-precision (32-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +ReduceArgumentPS(src1[31:0], imm8[7:0]) +{ + IF src1[31:0] == NAN + RETURN (convert src1[31:0] to QNaN) + FI + + m := imm8[7:4] // number of fraction bits after the binary point to be preserved + rc := imm8[1:0] // round control + rc_src := imm8[2] // round ccontrol source + spe := 0 + tmp[31:0] := pow(2, -m)*ROUND(pow(2, m)*src1[31:0], spe, rc_source, rc) + tmp[31:0] := src1[31:0] - tmp[31:0] + RETURN tmp[31:0] +} +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ReduceArgumentPS(src[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + Extract the reduced argument of packed single-precision (32-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst". + +ReduceArgumentPS(src1[31:0], imm8[7:0]) +{ + IF src1[31:0] == NAN + RETURN (convert src1[31:0] to QNaN) + FI + + m := imm8[7:4] // number of fraction bits after the binary point to be preserved + rc := imm8[1:0] // round control + rc_src := imm8[2] // round ccontrol source + spe := 0 + tmp[31:0] := pow(2, -m)*ROUND(pow(2, m)*src1[31:0], spe, rc_source, rc) + tmp[31:0] := src1[31:0] - tmp[31:0] + RETURN tmp[31:0] +} +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := ReduceArgumentPS(src[i+31:i], imm8[7:0]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + Extract the reduced argument of packed single-precision (32-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst". + [round_note] + +ReduceArgumentPS(src1[31:0], imm8[7:0]) +{ + IF src1[31:0] == NAN + RETURN (convert src1[31:0] to QNaN) + FI + + m := imm8[7:4] // number of fraction bits after the binary point to be preserved + rc := imm8[1:0] // round control + rc_src := imm8[2] // round ccontrol source + spe := 0 + tmp[31:0] := pow(2, -m)*ROUND(pow(2, m)*src1[31:0], spe, rc_source, rc) + tmp[31:0] := src1[31:0] - tmp[31:0] + RETURN tmp[31:0] +} +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := ReduceArgumentPS(src[i+31:i], imm8[7:0]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Miscellaneous + + + + + Extract the reduced argument of packed single-precision (32-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +ReduceArgumentPS(src1[31:0], imm8[7:0]) +{ + IF src1[31:0] == NAN + RETURN (convert src1[31:0] to QNaN) + FI + + m := imm8[7:4] // number of fraction bits after the binary point to be preserved + rc := imm8[1:0] // round control + rc_src := imm8[2] // round ccontrol source + spe := 0 + tmp[31:0] := pow(2, -m)*ROUND(pow(2, m)*src1[31:0], spe, rc_source, rc) + tmp[31:0] := src1[31:0] - tmp[31:0] + RETURN tmp[31:0] +} +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := ReduceArgumentPS(src[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Miscellaneous + + + + Extract the reduced argument of packed single-precision (32-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +ReduceArgumentPS(src1[31:0], imm8[7:0]) +{ + IF src1[31:0] == NAN + RETURN (convert src1[31:0] to QNaN) + FI + + m := imm8[7:4] // number of fraction bits after the binary point to be preserved + rc := imm8[1:0] // round control + rc_src := imm8[2] // round ccontrol source + spe := 0 + tmp[31:0] := pow(2, -m)*ROUND(pow(2, m)*src1[31:0], spe, rc_source, rc) + tmp[31:0] := src1[31:0] - tmp[31:0] + RETURN tmp[31:0] +} +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := ReduceArgumentPS(src[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Miscellaneous + + + Extract the reduced argument of packed single-precision (32-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst". + +ReduceArgumentPS(src1[31:0], imm8[7:0]) +{ + IF src1[31:0] == NAN + RETURN (convert src1[31:0] to QNaN) + FI + + m := imm8[7:4] // number of fraction bits after the binary point to be preserved + rc := imm8[1:0] // round control + rc_src := imm8[2] // round ccontrol source + spe := 0 + tmp[31:0] := pow(2, -m)*ROUND(pow(2, m)*src1[31:0], spe, rc_source, rc) + tmp[31:0] := src1[31:0] - tmp[31:0] + RETURN tmp[31:0] +} +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ReduceArgumentPS(src[i+31:i], imm8[7:0]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + + + Extract the reduced argument of the lower double-precision (64-bit) floating-point element in "a" by the number of bits specified by "imm8", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "b" to the upper element of "dst". + +ReduceArgumentPD(src1[63:0], imm8[7:0]) +{ + m := imm8[7:4] // number of fraction bits after the binary point to be preserved + rc := imm8[1:0] // round control + rc_src := imm8[2] // round ccontrol source + spe := 0 + tmp[63:0] := pow(2, -m) * ROUND(pow(2, m) * src1[63:0], spe, rc_source, rc) + tmp[63:0] := src1[63:0] - tmp[63:0] + RETURN tmp[63:0] +} + +IF k[0] + dst[63:0] := ReduceArgumentPD(a[63:0], imm8[7:0]) +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := b[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + + + + Extract the reduced argument of the lower double-precision (64-bit) floating-point element in "a" by the number of bits specified by "imm8", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "b" to the upper element of "dst". + [round_note] + +ReduceArgumentPD(src1[63:0], imm8[7:0]) +{ + m := imm8[7:4] // number of fraction bits after the binary point to be preserved + rc := imm8[1:0] // round control + rc_src := imm8[2] // round ccontrol source + spe := 0 + tmp[63:0] := pow(2, -m) * ROUND(pow(2, m) * src1[63:0], spe, rc_source, rc) + tmp[63:0] := src1[63:0] - tmp[63:0] + RETURN tmp[63:0] +} + +IF k[0] + dst[63:0] := ReduceArgumentPD(a[63:0], imm8[7:0]) +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := b[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + + Extract the reduced argument of the lower double-precision (64-bit) floating-point element in "a" by the number of bits specified by "imm8", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "b" to the upper element of "dst". + +ReduceArgumentPD(src1[63:0], imm8[7:0]) +{ + m := imm8[7:4] // number of fraction bits after the binary point to be preserved + rc := imm8[1:0] // round control + rc_src := imm8[2] // round ccontrol source + spe := 0 + tmp[63:0] := pow(2, -m) * ROUND(pow(2, m) * src1[63:0], spe, rc_source, rc) + tmp[63:0] := src1[63:0] - tmp[63:0] + RETURN tmp[63:0] +} + +IF k[0] + dst[63:0] := ReduceArgumentPD(a[63:0], imm8[7:0]) +ELSE + dst[63:0] := 0 +FI +dst[127:64] := b[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + + + Extract the reduced argument of the lower double-precision (64-bit) floating-point element in "a" by the number of bits specified by "imm8", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "b" to the upper element of "dst". + [round_note] + +ReduceArgumentPD(src1[63:0], imm8[7:0]) +{ + m := imm8[7:4] // number of fraction bits after the binary point to be preserved + rc := imm8[1:0] // round control + rc_src := imm8[2] // round ccontrol source + spe := 0 + tmp[63:0] := pow(2, -m) * ROUND(pow(2, m) * src1[63:0], spe, rc_source, rc) + tmp[63:0] := src1[63:0] - tmp[63:0] + RETURN tmp[63:0] +} + +IF k[0] + dst[63:0] := ReduceArgumentPD(a[63:0], imm8[7:0]) +ELSE + dst[63:0] := 0 +FI +dst[127:64] := b[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + Extract the reduced argument of the lower double-precision (64-bit) floating-point element in "a" by the number of bits specified by "imm8", store the result in the lower element of "dst", and copy the upper element from "b" to the upper element of "dst". + +ReduceArgumentPD(src1[63:0], imm8[7:0]) +{ + m := imm8[7:4] // number of fraction bits after the binary point to be preserved + rc := imm8[1:0] // round control + rc_src := imm8[2] // round ccontrol source + spe := 0 + tmp[63:0] := pow(2, -m) * ROUND(pow(2, m) * src1[63:0], spe, rc_source, rc) + tmp[63:0] := src1[63:0] - tmp[63:0] + RETURN tmp[63:0] +} + +dst[63:0] := ReduceArgumentPD(a[63:0], imm8[7:0]) +dst[127:64] := b[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + + Extract the reduced argument of the lower double-precision (64-bit) floating-point element in "a" by the number of bits specified by "imm8", store the result in the lower element of "dst", and copy the upper element from "b" to the upper element of "dst". + [round_note] + +ReduceArgumentPD(src1[63:0], imm8[7:0]) +{ + m := imm8[7:4] // number of fraction bits after the binary point to be preserved + rc := imm8[1:0] // round control + rc_src := imm8[2] // round ccontrol source + spe := 0 + tmp[63:0] := pow(2, -m) * ROUND(pow(2, m) * src1[63:0], spe, rc_source, rc) + tmp[63:0] := src1[63:0] - tmp[63:0] + RETURN tmp[63:0] +} + +dst[63:0] := ReduceArgumentPD(a[63:0], imm8[7:0]) +dst[127:64] := b[127:64] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + + + Extract the reduced argument of the lower single-precision (32-bit) floating-point element in "a" by the number of bits specified by "imm8", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "b" to the upper elements of "dst". + +ReduceArgumentPS(src1[31:0], imm8[7:0]) +{ + IF src1[31:0] == NAN + RETURN (convert src1[31:0] to QNaN) + FI + + m := imm8[7:4] // number of fraction bits after the binary point to be preserved + rc := imm8[1:0] // round control + rc_src := imm8[2] // round ccontrol source + spe := 0 + tmp[31:0] := pow(2, -m)*ROUND(pow(2, m)*src1[31:0], spe, rc_source, rc) + tmp[31:0] := src1[31:0] - tmp[31:0] + RETURN tmp[31:0] +} + +IF k[0] + dst[31:0] := ReduceArgumentPS(a[31:0], imm8[7:0]) +ELSE + dst[31:0] := src[31:0] +FI +dst[127:64] := b[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + + + + Extract the reduced argument of the lower single-precision (32-bit) floating-point element in "a" by the number of bits specified by "imm8", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "b" to the upper elements of "dst". + [round_note] + +ReduceArgumentPS(src1[31:0], imm8[7:0]) +{ + IF src1[31:0] == NAN + RETURN (convert src1[31:0] to QNaN) + FI + + m := imm8[7:4] // number of fraction bits after the binary point to be preserved + rc := imm8[1:0] // round control + rc_src := imm8[2] // round ccontrol source + spe := 0 + tmp[31:0] := pow(2, -m)*ROUND(pow(2, m)*src1[31:0], spe, rc_source, rc) + tmp[31:0] := src1[31:0] - tmp[31:0] + RETURN tmp[31:0] +} + +IF k[0] + dst[31:0] := ReduceArgumentPS(a[31:0], imm8[7:0]) +ELSE + dst[31:0] := src[31:0] +FI +dst[127:64] := b[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + + Extract the reduced argument of the lower single-precision (32-bit) floating-point element in "a" by the number of bits specified by "imm8", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "b" to the upper elements of "dst". + +ReduceArgumentPS(src1[31:0], imm8[7:0]) +{ + IF src1[31:0] == NAN + RETURN (convert src1[31:0] to QNaN) + FI + + m := imm8[7:4] // number of fraction bits after the binary point to be preserved + rc := imm8[1:0] // round control + rc_src := imm8[2] // round ccontrol source + spe := 0 + tmp[31:0] := pow(2, -m)*ROUND(pow(2, m)*src1[31:0], spe, rc_source, rc) + tmp[31:0] := src1[31:0] - tmp[31:0] + RETURN tmp[31:0] +} + +IF k[0] + dst[31:0] := ReduceArgumentPS(a[31:0], imm8[7:0]) +ELSE + dst[31:0] := 0 +FI +dst[127:64] := b[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + + + Extract the reduced argument of the lower single-precision (32-bit) floating-point element in "a" by the number of bits specified by "imm8", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "b" to the upper elements of "dst". + [round_note] + +ReduceArgumentPS(src1[31:0], imm8[7:0]) +{ + IF src1[31:0] == NAN + RETURN (convert src1[31:0] to QNaN) + FI + + m := imm8[7:4] // number of fraction bits after the binary point to be preserved + rc := imm8[1:0] // round control + rc_src := imm8[2] // round ccontrol source + spe := 0 + tmp[31:0] := pow(2, -m)*ROUND(pow(2, m)*src1[31:0], spe, rc_source, rc) + tmp[31:0] := src1[31:0] - tmp[31:0] + RETURN tmp[31:0] +} + +IF k[0] + dst[31:0] := ReduceArgumentPS(a[31:0], imm8[7:0]) +ELSE + dst[31:0] := 0 +FI +dst[127:64] := b[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + Extract the reduced argument of the lower single-precision (32-bit) floating-point element in "a" by the number of bits specified by "imm8", store the result in the lower element of "dst", and copy the upper 3 packed elements from "b" to the upper elements of "dst". + +ReduceArgumentPS(src1[31:0], imm8[7:0]) +{ + IF src1[31:0] == NAN + RETURN (convert src1[31:0] to QNaN) + FI + + m := imm8[7:4] // number of fraction bits after the binary point to be preserved + rc := imm8[1:0] // round control + rc_src := imm8[2] // round ccontrol source + spe := 0 + tmp[31:0] := pow(2, -m)*ROUND(pow(2, m)*src1[31:0], spe, rc_source, rc) + tmp[31:0] := src1[31:0] - tmp[31:0] + RETURN tmp[31:0] +} + +dst[31:0] := ReduceArgumentPS(a[31:0], imm8[7:0]) +dst[127:64] := b[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Miscellaneous + + + + + Extract the reduced argument of the lower single-precision (32-bit) floating-point element in "a" by the number of bits specified by "imm8", store the result in the lower element of "dst", and copy the upper 3 packed elements from "b" to the upper elements of "dst". + [round_note] + +ReduceArgumentPS(src1[31:0], imm8[7:0]) +{ + IF src1[31:0] == NAN + RETURN (convert src1[31:0] to QNaN) + FI + + m := imm8[7:4] // number of fraction bits after the binary point to be preserved + rc := imm8[1:0] // round control + rc_src := imm8[2] // round ccontrol source + spe := 0 + tmp[31:0] := pow(2, -m)*ROUND(pow(2, m)*src1[31:0], spe, rc_source, rc) + tmp[31:0] := src1[31:0] - tmp[31:0] + RETURN tmp[31:0] +} + +dst[31:0] := ReduceArgumentPS(a[31:0], imm8[7:0]) +dst[127:64] := b[127:32] +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Round packed double-precision (64-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +RoundTo_IntegerPD(src[63:0], imm8[7:0]){ + IF(imm8[2] == 1) + rounding_direction := MXCSR.RC //Use the rounding mode specified by MXCSR.RC + ELSE + rounding_direction := imm8[1:0] //Use the rounding mode specified by imm8[1:0] + FI + + M := imm8[7:4] // The scaling factor (number of fraction bits to round to) + + CASE(rounding_direction) + 0: tmp[63:0] := round_to_nearest_even_integer(2^M * src[63:0]) + 1: tmp[63:0] := round_to_equal_or_smaller_integer(2^M * src[63:0]) + 2: tmp[63:0] := round_to_equal_or_larger_integer(2^M * src[63:0]) + 3: tmp[63:0] := round_to_nearest_smallest_magnitude_integer(2^M * src[63:0]) + ESAC + + dst[63:0] := 2^-M * tmp[63:0] // scale back down + + IF imm8[3] == 0 //check SPE + IF src[63:0] != dst[63:0] //check if precision has been lost + set_precision() //set #PE + FI + FI + RETURN dst[63:0] +} + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := RoundTo_IntegerPD(a[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Round packed double-precision (64-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +RoundTo_IntegerPD(src[63:0], imm8[7:0]){ + IF(imm8[2] == 1) + rounding_direction := MXCSR.RC //Use the rounding mode specified by MXCSR.RC + ELSE + rounding_direction := imm8[1:0] //Use the rounding mode specified by imm8[1:0] + FI + + M := imm8[7:4] // The scaling factor (number of fraction bits to round to) + + CASE(rounding_direction) + 0: tmp[63:0] := round_to_nearest_even_integer(2^M * src[63:0]) + 1: tmp[63:0] := round_to_equal_or_smaller_integer(2^M * src[63:0]) + 2: tmp[63:0] := round_to_equal_or_larger_integer(2^M * src[63:0]) + 3: tmp[63:0] := round_to_nearest_smallest_magnitude_integer(2^M * src[63:0]) + ESAC + + dst[63:0] := 2^-M * tmp[63:0] // scale back down + + IF imm8[3] == 0 //check SPE + IF src[63:0] != dst[63:0] //check if precision has been lost + set_precision() //set #PE + FI + FI + RETURN dst[63:0] +} + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := RoundTo_IntegerPD(a[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + Round packed double-precision (64-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst". + + +RoundTo_IntegerPD(src[63:0], imm8[7:0]){ + IF(imm8[2] == 1) + rounding_direction := MXCSR.RC //Use the rounding mode specified by MXCSR.RC + ELSE + rounding_direction := imm8[1:0] //Use the rounding mode specified by imm8[1:0] + FI + + M := imm8[7:4] // The scaling factor (number of fraction bits to round to) + + CASE(rounding_direction) + 0: tmp[63:0] := round_to_nearest_even_integer(2^M * src[63:0]) + 1: tmp[63:0] := round_to_equal_or_smaller_integer(2^M * src[63:0]) + 2: tmp[63:0] := round_to_equal_or_larger_integer(2^M * src[63:0]) + 3: tmp[63:0] := round_to_nearest_smallest_magnitude_integer(2^M * src[63:0]) + ESAC + + dst[63:0] := 2^-M * tmp[63:0] // scale back down + + IF imm8[3] == 0 //check SPE + IF src[63:0] != dst[63:0] //check if precision has been lost + set_precision() //set #PE + FI + FI + RETURN dst[63:0] +} + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := RoundTo_IntegerPD(a[i+63:i], imm8[7:0]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Round packed double-precision (64-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +RoundTo_IntegerPD(src[63:0], imm8[7:0]){ + IF(imm8[2] == 1) + rounding_direction := MXCSR.RC //Use the rounding mode specified by MXCSR.RC + ELSE + rounding_direction := imm8[1:0] //Use the rounding mode specified by imm8[1:0] + FI + + M := imm8[7:4] // The scaling factor (number of fraction bits to round to) + + CASE(rounding_direction) + 0: tmp[63:0] := round_to_nearest_even_integer(2^M * src[63:0]) + 1: tmp[63:0] := round_to_equal_or_smaller_integer(2^M * src[63:0]) + 2: tmp[63:0] := round_to_equal_or_larger_integer(2^M * src[63:0]) + 3: tmp[63:0] := round_to_nearest_smallest_magnitude_integer(2^M * src[63:0]) + ESAC + + dst[63:0] := 2^-M * tmp[63:0] // scale back down + + IF imm8[3] == 0 //check SPE + IF src[63:0] != dst[63:0] //check if precision has been lost + set_precision() //set #PE + FI + FI + RETURN dst[63:0] +} + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := RoundTo_IntegerPD(a[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Round packed double-precision (64-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +RoundTo_IntegerPD(src[63:0], imm8[7:0]){ + IF(imm8[2] == 1) + rounding_direction := MXCSR.RC //Use the rounding mode specified by MXCSR.RC + ELSE + rounding_direction := imm8[1:0] //Use the rounding mode specified by imm8[1:0] + FI + + M := imm8[7:4] // The scaling factor (number of fraction bits to round to) + + CASE(rounding_direction) + 0: tmp[63:0] := round_to_nearest_even_integer(2^M * src[63:0]) + 1: tmp[63:0] := round_to_equal_or_smaller_integer(2^M * src[63:0]) + 2: tmp[63:0] := round_to_equal_or_larger_integer(2^M * src[63:0]) + 3: tmp[63:0] := round_to_nearest_smallest_magnitude_integer(2^M * src[63:0]) + ESAC + + dst[63:0] := 2^-M * tmp[63:0] // scale back down + + IF imm8[3] == 0 //check SPE + IF src[63:0] != dst[63:0] //check if precision has been lost + set_precision() //set #PE + FI + FI + RETURN dst[63:0] +} + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := RoundTo_IntegerPD(a[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + Round packed double-precision (64-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst". + + +RoundTo_IntegerPD(src[63:0], imm8[7:0]){ + IF(imm8[2] == 1) + rounding_direction := MXCSR.RC //Use the rounding mode specified by MXCSR.RC + ELSE + rounding_direction := imm8[1:0] //Use the rounding mode specified by imm8[1:0] + FI + + M := imm8[7:4] // The scaling factor (number of fraction bits to round to) + + CASE(rounding_direction) + 0: tmp[63:0] := round_to_nearest_even_integer(2^M * src[63:0]) + 1: tmp[63:0] := round_to_equal_or_smaller_integer(2^M * src[63:0]) + 2: tmp[63:0] := round_to_equal_or_larger_integer(2^M * src[63:0]) + 3: tmp[63:0] := round_to_nearest_smallest_magnitude_integer(2^M * src[63:0]) + ESAC + + dst[63:0] := 2^-M * tmp[63:0] // scale back down + + IF imm8[3] == 0 //check SPE + IF src[63:0] != dst[63:0] //check if precision has been lost + set_precision() //set #PE + FI + FI + RETURN dst[63:0] +} + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := RoundTo_IntegerPD(a[i+63:i], imm8[7:0]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Round packed single-precision (32-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +RoundTo_IntegerPS(src[31:0], imm8[7:0]){ + IF(imm8[2] == 1) + rounding_direction := MXCSR.RC //Use the rounding mode specified by MXCSR.RC + ELSE + rounding_direction := imm8[1:0] //Use the rounding mode specified by imm8[1:0] + FI + + M := imm8[7:4] // The scaling factor (number of fraction bits to round to) + + CASE(rounding_direction) + 0: tmp[31:0] := round_to_nearest_even_integer(2^M * src[31:0]) + 1: tmp[31:0] := round_to_equal_or_smaller_integer(2^M * src[31:0]) + 2: tmp[31:0] := round_to_equal_or_larger_integer(2^M * src[31:0]) + 3: tmp[31:0] := round_to_nearest_smallest_magnitude_integer(2^M * src[31:0]) + ESAC + + dst[31:0] := 2^-M * tmp[31:0] // scale back down + + IF imm8[3] == 0 //check SPE + IF src[31:0] != dst[31:0] //check if precision has been lost + set_precision() //set #PE + FI + FI + RETURN dst[31:0] +} + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := RoundTo_IntegerPS(a[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Round packed single-precision (32-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +RoundTo_IntegerPS(src[31:0], imm8[7:0]){ + IF(imm8[2] == 1) + rounding_direction := MXCSR.RC //Use the rounding mode specified by MXCSR.RC + ELSE + rounding_direction := imm8[1:0] //Use the rounding mode specified by imm8[1:0] + FI + + M := imm8[7:4] // The scaling factor (number of fraction bits to round to) + + CASE(rounding_direction) + 0: tmp[31:0] := round_to_nearest_even_integer(2^M * src[31:0]) + 1: tmp[31:0] := round_to_equal_or_smaller_integer(2^M * src[31:0]) + 2: tmp[31:0] := round_to_equal_or_larger_integer(2^M * src[31:0]) + 3: tmp[31:0] := round_to_nearest_smallest_magnitude_integer(2^M * src[31:0]) + ESAC + + dst[31:0] := 2^-M * tmp[31:0] // scale back down + + IF imm8[3] == 0 //check SPE + IF src[31:0] != dst[31:0] //check if precision has been lost + set_precision() //set #PE + FI + FI + RETURN dst[31:0] +} + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := RoundTo_IntegerPS(a[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + Round packed single-precision (32-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst". + + +RoundTo_IntegerPS(src[31:0], imm8[7:0]){ + IF(imm8[2] == 1) + rounding_direction := MXCSR.RC //Use the rounding mode specified by MXCSR.RC + ELSE + rounding_direction := imm8[1:0] //Use the rounding mode specified by imm8[1:0] + FI + + M := imm8[7:4] // The scaling factor (number of fraction bits to round to) + + CASE(rounding_direction) + 0: tmp[31:0] := round_to_nearest_even_integer(2^M * src[31:0]) + 1: tmp[31:0] := round_to_equal_or_smaller_integer(2^M * src[31:0]) + 2: tmp[31:0] := round_to_equal_or_larger_integer(2^M * src[31:0]) + 3: tmp[31:0] := round_to_nearest_smallest_magnitude_integer(2^M * src[31:0]) + ESAC + + dst[31:0] := 2^-M * tmp[31:0] // scale back down + + IF imm8[3] == 0 //check SPE + IF src[31:0] != dst[31:0] //check if precision has been lost + set_precision() //set #PE + FI + FI + RETURN dst[31:0] +} + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := RoundTo_IntegerPS(a[i+31:i], imm8[7:0]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Round packed single-precision (32-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +RoundTo_IntegerPS(src[31:0], imm8[7:0]){ + IF(imm8[2] == 1) + rounding_direction := MXCSR.RC //Use the rounding mode specified by MXCSR.RC + ELSE + rounding_direction := imm8[1:0] //Use the rounding mode specified by imm8[1:0] + FI + + M := imm8[7:4] // The scaling factor (number of fraction bits to round to) + + CASE(rounding_direction) + 0: tmp[31:0] := round_to_nearest_even_integer(2^M * src[31:0]) + 1: tmp[31:0] := round_to_equal_or_smaller_integer(2^M * src[31:0]) + 2: tmp[31:0] := round_to_equal_or_larger_integer(2^M * src[31:0]) + 3: tmp[31:0] := round_to_nearest_smallest_magnitude_integer(2^M * src[31:0]) + ESAC + + dst[31:0] := 2^-M * tmp[31:0] // scale back down + + IF imm8[3] == 0 //check SPE + IF src[31:0] != dst[31:0] //check if precision has been lost + set_precision() //set #PE + FI + FI + RETURN dst[31:0] +} + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := RoundTo_IntegerPS(a[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Round packed single-precision (32-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +RoundTo_IntegerPS(src[31:0], imm8[7:0]){ + IF(imm8[2] == 1) + rounding_direction := MXCSR.RC //Use the rounding mode specified by MXCSR.RC + ELSE + rounding_direction := imm8[1:0] //Use the rounding mode specified by imm8[1:0] + FI + + M := imm8[7:4] // The scaling factor (number of fraction bits to round to) + + CASE(rounding_direction) + 0: tmp[31:0] := round_to_nearest_even_integer(2^M * src[31:0]) + 1: tmp[31:0] := round_to_equal_or_smaller_integer(2^M * src[31:0]) + 2: tmp[31:0] := round_to_equal_or_larger_integer(2^M * src[31:0]) + 3: tmp[31:0] := round_to_nearest_smallest_magnitude_integer(2^M * src[31:0]) + ESAC + + dst[31:0] := 2^-M * tmp[31:0] // scale back down + + IF imm8[3] == 0 //check SPE + IF src[31:0] != dst[31:0] //check if precision has been lost + set_precision() //set #PE + FI + FI + RETURN dst[31:0] +} + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := RoundTo_IntegerPS(a[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + Round packed single-precision (32-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst". + + +RoundTo_IntegerPS(src[31:0], imm8[7:0]){ + IF(imm8[2] == 1) + rounding_direction := MXCSR.RC //Use the rounding mode specified by MXCSR.RC + ELSE + rounding_direction := imm8[1:0] //Use the rounding mode specified by imm8[1:0] + FI + + M := imm8[7:4] // The scaling factor (number of fraction bits to round to) + + CASE(rounding_direction) + 0: tmp[31:0] := round_to_nearest_even_integer(2^M * src[31:0]) + 1: tmp[31:0] := round_to_equal_or_smaller_integer(2^M * src[31:0]) + 2: tmp[31:0] := round_to_equal_or_larger_integer(2^M * src[31:0]) + 3: tmp[31:0] := round_to_nearest_smallest_magnitude_integer(2^M * src[31:0]) + ESAC + + dst[31:0] := 2^-M * tmp[31:0] // scale back down + + IF imm8[3] == 0 //check SPE + IF src[31:0] != dst[31:0] //check if precision has been lost + set_precision() //set #PE + FI + FI + RETURN dst[31:0] +} + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := RoundTo_IntegerPS(a[i+31:i], imm8[7:0]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + Compute the approximate reciprocal square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := APPROXIMATE(1.0 / SQRT(a[i+63:i])) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + Compute the approximate reciprocal square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := APPROXIMATE(1.0 / SQRT(a[i+63:i])) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + Compute the approximate reciprocal square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := APPROXIMATE(1.0 / SQRT(a[i+63:i])) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + Compute the approximate reciprocal square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := APPROXIMATE(1.0 / SQRT(a[i+63:i])) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + Compute the approximate reciprocal square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := APPROXIMATE(1.0 / SQRT(a[i+31:i])) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + Compute the approximate reciprocal square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := APPROXIMATE(1.0 / SQRT(a[i+31:i])) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + Compute the approximate reciprocal square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := APPROXIMATE(1.0 / SQRT(a[i+31:i])) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + Compute the approximate reciprocal square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := APPROXIMATE(1.0 / SQRT(a[i+31:i])) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +SCALE(src1, src2){ + IF (src2 == NaN) + IF (src2 == SNaN) + RETURN QNAN(src2) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) + RETURN QNAN(src1) + FI + IF (src2 != INF) + RETURN QNAN(src1) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (src2 is denormal AND MXCSR.DAZ) + tmp_src2 := 0 + FI + IF (src1 is denormal AND MXCSR.DAZ) + tmp_src1 := 0 + FI + FI + dst[63:0] := tmp_src1[63:0] * POW(2, FLOOR(tmp_src2[63:0])) + RETURN dst[63:0] +} + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := SCALE(a[i+63:0], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +SCALE(src1, src2){ + IF (src2 == NaN) + IF (src2 == SNaN) + RETURN QNAN(src2) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) + RETURN QNAN(src1) + FI + IF (src2 != INF) + RETURN QNAN(src1) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (src2 is denormal AND MXCSR.DAZ) + tmp_src2 := 0 + FI + IF (src1 is denormal AND MXCSR.DAZ) + tmp_src1 := 0 + FI + FI + dst[63:0] := tmp_src1[63:0] * POW(2, FLOOR(tmp_src2[63:0])) + RETURN dst[63:0] +} + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := SCALE(a[i+63:0], b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", and store the results in "dst". + +SCALE(src1, src2){ + IF (src2 == NaN) + IF (src2 == SNaN) + RETURN QNAN(src2) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) + RETURN QNAN(src1) + FI + IF (src2 != INF) + RETURN QNAN(src1) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (src2 is denormal AND MXCSR.DAZ) + tmp_src2 := 0 + FI + IF (src1 is denormal AND MXCSR.DAZ) + tmp_src1 := 0 + FI + FI + dst[63:0] := tmp_src1[63:0] * POW(2, FLOOR(tmp_src2[63:0])) + RETURN dst[63:0] +} + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := SCALE(a[i+63:0], b[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +SCALE(src1, src2){ + IF (src2 == NaN) + IF (src2 == SNaN) + RETURN QNAN(src2) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) + RETURN QNAN(src1) + FI + IF (src2 != INF) + RETURN QNAN(src1) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (src2 is denormal AND MXCSR.DAZ) + tmp_src2 := 0 + FI + IF (src1 is denormal AND MXCSR.DAZ) + tmp_src1 := 0 + FI + FI + dst[63:0] := tmp_src1[63:0] * POW(2, FLOOR(tmp_src2[63:0])) + RETURN dst[63:0] +} + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := SCALE(a[i+63:0], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +SCALE(src1, src2){ + IF (src2 == NaN) + IF (src2 == SNaN) + RETURN QNAN(src2) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) + RETURN QNAN(src1) + FI + IF (src2 != INF) + RETURN QNAN(src1) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (src2 is denormal AND MXCSR.DAZ) + tmp_src2 := 0 + FI + IF (src1 is denormal AND MXCSR.DAZ) + tmp_src1 := 0 + FI + FI + dst[63:0] := tmp_src1[63:0] * POW(2, FLOOR(tmp_src2[63:0])) + RETURN dst[63:0] +} + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := SCALE(a[i+63:0], b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", and store the results in "dst". + +SCALE(src1, src2){ + IF (src2 == NaN) + IF (src2 == SNaN) + RETURN QNAN(src2) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) + RETURN QNAN(src1) + FI + IF (src2 != INF) + RETURN QNAN(src1) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (src2 is denormal AND MXCSR.DAZ) + tmp_src2 := 0 + FI + IF (src1 is denormal AND MXCSR.DAZ) + tmp_src1 := 0 + FI + FI + dst[63:0] := tmp_src1[63:0] * POW(2, FLOOR(tmp_src2[63:0])) + RETURN dst[63:0] +} + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := SCALE(a[i+63:0], b[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +SCALE(src1, src2){ + IF (src2 == NaN) + IF (src2 == SNaN) + RETURN QNAN(src2) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) + RETURN QNAN(src1) + FI + IF (src2 != INF) + RETURN QNAN(src1) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (src2 is denormal AND MXCSR.DAZ) + tmp_src2 := 0 + FI + IF (src1 is denormal AND MXCSR.DAZ) + tmp_src1 := 0 + FI + FI + dst[31:0] := tmp_src1[31:0] * POW(2, FLOOR(tmp_src2[31:0])) + RETURN dst[31:0] +} + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := SCALE(a[i+31:0], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +SCALE(src1, src2){ + IF (src2 == NaN) + IF (src2 == SNaN) + RETURN QNAN(src2) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) + RETURN QNAN(src1) + FI + IF (src2 != INF) + RETURN QNAN(src1) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (src2 is denormal AND MXCSR.DAZ) + tmp_src2 := 0 + FI + IF (src1 is denormal AND MXCSR.DAZ) + tmp_src1 := 0 + FI + FI + dst[31:0] := tmp_src1[31:0] * POW(2, FLOOR(tmp_src2[31:0])) + RETURN dst[31:0] +} + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := SCALE(a[i+31:0], b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", and store the results in "dst". + +SCALE(src1, src2){ + IF (src2 == NaN) + IF (src2 == SNaN) + RETURN QNAN(src2) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) + RETURN QNAN(src1) + FI + IF (src2 != INF) + RETURN QNAN(src1) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (src2 is denormal AND MXCSR.DAZ) + tmp_src2 := 0 + FI + IF (src1 is denormal AND MXCSR.DAZ) + tmp_src1 := 0 + FI + FI + dst[31:0] := tmp_src1[31:0] * POW(2, FLOOR(tmp_src2[31:0])) + RETURN dst[31:0] +} + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := SCALE(a[i+31:0], b[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +SCALE(src1, src2){ + IF (src2 == NaN) + IF (src2 == SNaN) + RETURN QNAN(src2) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) + RETURN QNAN(src1) + FI + IF (src2 != INF) + RETURN QNAN(src1) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (src2 is denormal AND MXCSR.DAZ) + tmp_src2 := 0 + FI + IF (src1 is denormal AND MXCSR.DAZ) + tmp_src1 := 0 + FI + FI + dst[31:0] := tmp_src1[31:0] * POW(2, FLOOR(tmp_src2[31:0])) + RETURN dst[31:0] +} + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := SCALE(a[i+31:0], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +SCALE(src1, src2){ + IF (src2 == NaN) + IF (src2 == SNaN) + RETURN QNAN(src2) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) + RETURN QNAN(src1) + FI + IF (src2 != INF) + RETURN QNAN(src1) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (src2 is denormal AND MXCSR.DAZ) + tmp_src2 := 0 + FI + IF (src1 is denormal AND MXCSR.DAZ) + tmp_src1 := 0 + FI + FI + dst[31:0] := tmp_src1[31:0] * POW(2, FLOOR(tmp_src2[31:0])) + RETURN dst[31:0] +} + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := SCALE(a[i+31:0], b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", and store the results in "dst". + +SCALE(src1, src2){ + IF (src2 == NaN) + IF (src2 == SNaN) + RETURN QNAN(src2) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) + RETURN QNAN(src1) + FI + IF (src2 != INF) + RETURN QNAN(src1) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (src2 is denormal AND MXCSR.DAZ) + tmp_src2 := 0 + FI + IF (src1 is denormal AND MXCSR.DAZ) + tmp_src1 := 0 + FI + FI + dst[31:0] := tmp_src1[31:0] * POW(2, FLOOR(tmp_src2[31:0])) + RETURN dst[31:0] +} + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := SCALE(a[i+31:0], b[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Store + + + + + Scatter double-precision (64-bit) floating-point elements from "a" into memory using 32-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 3 + i := j*64 + l := j*32 + MEM[base_addr + SignExtend(vindex[l+31:l])*scale] := a[i+63:i] +ENDFOR + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Store + + + + + + Scatter double-precision (64-bit) floating-point elements from "a" into memory using 32-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 3 + i := j*64 + l := j*32 + IF k[j] + MEM[base_addr + SignExtend(vindex[l+31:l])*scale] := a[i+63:i] + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Store + + + + + Scatter double-precision (64-bit) floating-point elements from "a" into memory using 32-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 1 + i := j*64 + l := j*32 + MEM[base_addr + SignExtend(vindex[l+31:l])*scale] := a[i+63:i] +ENDFOR + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Store + + + + + + Scatter double-precision (64-bit) floating-point elements from "a" into memory using 32-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 1 + i := j*64 + l := j*32 + IF k[j] + MEM[base_addr + SignExtend(vindex[l+31:l])*scale] := a[i+63:i] + k[j] := 0 + FI +ENDFOR +k[MAX:2] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Store + + + + + Scatter single-precision (32-bit) floating-point elements from "a" into memory using 32-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 7 + i := j*32 + MEM[base_addr + SignExtend(vindex[i+31:i])*scale] := a[i+31:i] +ENDFOR + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Store + + + + + + Scatter single-precision (32-bit) floating-point elements from "a" into memory using 32-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 7 + i := j*32 + IF k[j] + MEM[base_addr + SignExtend(vindex[i+31:i])*scale] := a[i+31:i] + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Store + + + + + Scatter single-precision (32-bit) floating-point elements from "a" into memory using 32-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 3 + i := j*32 + MEM[base_addr + SignExtend(vindex[i+31:i])*scale] := a[i+31:i] +ENDFOR + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Store + + + + + + Scatter single-precision (32-bit) floating-point elements from "a" into memory using 32-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 3 + i := j*32 + IF k[j] + MEM[base_addr + SignExtend(vindex[i+31:i])*scale] := a[i+31:i] + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Store + + + + + Scatter double-precision (64-bit) floating-point elements from "a" into memory using 64-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 3 + i := j*64 + MEM[base_addr + SignExtend(vindex[i+63:i])*scale] := a[i+63:i] +ENDFOR + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Store + + + + + + Scatter double-precision (64-bit) floating-point elements from "a" into memory using 64-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 3 + i := j*64 + IF k[j] + MEM[base_addr + SignExtend(vindex[i+63:i])*scale] := a[i+63:i] + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Store + + + + + Scatter double-precision (64-bit) floating-point elements from "a" into memory using 64-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 1 + i := j*64 + MEM[base_addr + SignExtend(vindex[i+63:i])*scale] := a[i+63:i] +ENDFOR + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Store + + + + + + Scatter double-precision (64-bit) floating-point elements from "a" into memory using 64-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 1 + i := j*64 + IF k[j] + MEM[base_addr + SignExtend(vindex[i+63:i])*scale] := a[i+63:i] + k[j] := 0 + FI +ENDFOR +k[MAX:2] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Store + + + + + Scatter single-precision (32-bit) floating-point elements from "a" into memory using 64-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 3 + i := j*32 + l := j*64 + MEM[base_addr + SignExtend(vindex[l+63:l])*scale] := a[i+31:i] +ENDFOR + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Store + + + + + + Scatter single-precision (32-bit) floating-point elements from "a" into memory using 64-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 3 + i := j*32 + l := j*64 + IF k[j] + MEM[base_addr + SignExtend(vindex[l+63:l])*scale] := a[i+31:i] + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Store + + + + + Scatter single-precision (32-bit) floating-point elements from "a" into memory using 64-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 1 + i := j*32 + l := j*64 + MEM[base_addr + SignExtend(vindex[l+63:l])*scale] := a[i+31:i] +ENDFOR + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Store + + + + + + Scatter single-precision (32-bit) floating-point elements from "a" into memory using 64-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + + +FOR j := 0 to 1 + i := j*32 + l := j*64 + IF k[j] + MEM[base_addr + SignExtend(vindex[l+63:l])*scale] := a[i+31:i] + k[j] := 0 + FI +ENDFOR +k[MAX:2] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + + Shuffle 128-bits (composed of 4 single-precision (32-bit) floating-point elements) selected by "imm8" from "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +SELECT2(src, control){ + CASE(control[0]) + 0: tmp[127:0] := src[127:0] + 1: tmp[127:0] := src[255:128] + ESAC + RETURN tmp[127:0] +} + +tmp_dst[127:0] := SELECT2(a[255:0], imm8[0]) +tmp_dst[255:128] := SELECT2(b[255:0], imm8[1]) + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Shuffle 128-bits (composed of 4 single-precision (32-bit) floating-point elements) selected by "imm8" from "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +SELECT2(src, control){ + CASE(control[0]) + 0: tmp[127:0] := src[127:0] + 1: tmp[127:0] := src[255:128] + ESAC + RETURN tmp[127:0] +} + +tmp_dst[127:0] := SELECT2(a[255:0], imm8[0]) +tmp_dst[255:128] := SELECT2(b[255:0], imm8[1]) + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Shuffle 128-bits (composed of 4 single-precision (32-bit) floating-point elements) selected by "imm8" from "a" and "b", and store the results in "dst". + + +SELECT2(src, control){ + CASE(control[0]) + 0: tmp[127:0] := src[127:0] + 1: tmp[127:0] := src[255:128] + ESAC + RETURN tmp[127:0] +} + +dst[127:0] := SELECT2(a[255:0], imm8[0]) +dst[255:128] := SELECT2(b[255:0], imm8[1]) +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + + Shuffle 128-bits (composed of 2 double-precision (64-bit) floating-point elements) selected by "imm8" from "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +SELECT4(src, control){ + CASE(control[0]) + 0: tmp[127:0] := src[127:0] + 1: tmp[127:0] := src[255:128] + ESAC + RETURN tmp[127:0] +} + +tmp_dst[127:0] := SELECT2(a[255:0], imm8[0]) +tmp_dst[255:128] := SELECT2(b[255:0], imm8[1]) + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Shuffle 128-bits (composed of 2 double-precision (64-bit) floating-point elements) selected by "imm8" from "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +SELECT4(src, control){ + CASE(control[0]) + 0: tmp[127:0] := src[127:0] + 1: tmp[127:0] := src[255:128] + ESAC + RETURN tmp[127:0] +} + +tmp_dst[127:0] := SELECT2(a[255:0], imm8[0]) +tmp_dst[255:128] := SELECT2(b[255:0], imm8[1]) + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Shuffle 128-bits (composed of 2 double-precision (64-bit) floating-point elements) selected by "imm8" from "a" and "b", and store the results in "dst". + +SELECT4(src, control){ + CASE(control[0]) + 0: tmp[127:0] := src[127:0] + 1: tmp[127:0] := src[255:128] + ESAC + RETURN tmp[127:0] +} + +dst[127:0] := SELECT2(a[255:0], imm8[0]) +dst[255:128] := SELECT2(b[255:0], imm8[1]) +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + AVX512VL + AVX512F + Miscellaneous + + + + + + Shuffle 128-bits (composed of 4 32-bit integers) selected by "imm8" from "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +SELECT2(src, control){ + CASE(control[0]) + 0: tmp[127:0] := src[127:0] + 1: tmp[127:0] := src[255:128] + ESAC + RETURN tmp[127:0] +} + +tmp_dst[127:0] := SELECT2(a[255:0], imm8[1:0]) +tmp_dst[255:128] := SELECT2(b[255:0], imm8[3:2]) + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + AVX512VL + AVX512F + Miscellaneous + + + + + Shuffle 128-bits (composed of 4 32-bit integers) selected by "imm8" from "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +SELECT2(src, control){ + CASE(control[0]) + 0: tmp[127:0] := src[127:0] + 1: tmp[127:0] := src[255:128] + ESAC + RETURN tmp[127:0] +} + +tmp_dst[127:0] := SELECT2(a[255:0], imm8[1:0]) +tmp_dst[255:128] := SELECT2(b[255:0], imm8[3:2]) + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + AVX512VL + AVX512F + Miscellaneous + + + + Shuffle 128-bits (composed of 4 32-bit integers) selected by "imm8" from "a" and "b", and store the results in "dst". + +SELECT2(src, control){ + CASE(control[0]) + 0: tmp[127:0] := src[127:0] + 1: tmp[127:0] := src[255:128] + ESAC + RETURN tmp[127:0] +} + +dst[127:0] := SELECT2(a[255:0], imm8[1:0]) +dst[255:128] := SELECT2(b[255:0], imm8[3:2]) +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + AVX512VL + AVX512F + Miscellaneous + + + + + + Shuffle 128-bits (composed of 2 64-bit integers) selected by "imm8" from "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +SELECT2(src, control){ + CASE(control[0]) + 0: tmp[127:0] := src[127:0] + 1: tmp[127:0] := src[255:128] + ESAC + RETURN tmp[127:0] +} + +tmp_dst[127:0] := SELECT2(a[255:0], imm8[1:0]) +tmp_dst[255:128] := SELECT2(b[255:0], imm8[3:2]) + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + AVX512VL + AVX512F + Miscellaneous + + + + + Shuffle 128-bits (composed of 2 64-bit integers) selected by "imm8" from "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +SELECT2(src, control){ + CASE(control[0]) + 0: tmp[127:0] := src[127:0] + 1: tmp[127:0] := src[255:128] + ESAC + RETURN tmp[127:0] +} + +tmp_dst[127:0] := SELECT2(a[255:0], imm8[1:0]) +tmp_dst[255:128] := SELECT2(b[255:0], imm8[3:2]) + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + AVX512VL + AVX512F + Miscellaneous + + + + Shuffle 128-bits (composed of 2 64-bit integers) selected by "imm8" from "a" and "b", and store the results in "dst". + +SELECT2(src, control){ + CASE(control[0]) + 0: tmp[127:0] := src[127:0] + 1: tmp[127:0] := src[255:128] + ESAC + RETURN tmp[127:0] +} + +dst[127:0] := SELECT2(a[255:0], imm8[1:0]) +dst[255:128] := SELECT2(b[255:0], imm8[3:2]) +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + + Shuffle double-precision (64-bit) floating-point elements within 128-bit lanes using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp_dst[63:0] := (imm8[0] == 0) ? a[63:0] : a[127:64] +tmp_dst[127:64] := (imm8[1] == 0) ? b[63:0] : b[127:64] +tmp_dst[191:128] := (imm8[2] == 0) ? a[191:128] : a[255:192] +tmp_dst[255:192] := (imm8[3] == 0) ? b[191:128] : b[255:192] + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Shuffle double-precision (64-bit) floating-point elements within 128-bit lanes using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp_dst[63:0] := (imm8[0] == 0) ? a[63:0] : a[127:64] +tmp_dst[127:64] := (imm8[1] == 0) ? b[63:0] : b[127:64] +tmp_dst[191:128] := (imm8[2] == 0) ? a[191:128] : a[255:192] +tmp_dst[255:192] := (imm8[3] == 0) ? b[191:128] : b[255:192] + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + + Shuffle double-precision (64-bit) floating-point elements using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp_dst[63:0] := (imm8[0] == 0) ? a[63:0] : a[127:64] +tmp_dst[127:64] := (imm8[1] == 0) ? b[63:0] : b[127:64] + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Shuffle double-precision (64-bit) floating-point elements using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp_dst[63:0] := (imm8[0] == 0) ? a[63:0] : a[127:64] +tmp_dst[127:64] := (imm8[1] == 0) ? b[63:0] : b[127:64] + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} + +tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +tmp_dst[95:64] := SELECT4(b[127:0], imm8[5:4]) +tmp_dst[127:96] := SELECT4(b[127:0], imm8[7:6]) +tmp_dst[159:128] := SELECT4(a[255:128], imm8[1:0]) +tmp_dst[191:160] := SELECT4(a[255:128], imm8[3:2]) +tmp_dst[223:192] := SELECT4(b[255:128], imm8[5:4]) +tmp_dst[255:224] := SELECT4(b[255:128], imm8[7:6]) + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} + +tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +tmp_dst[95:64] := SELECT4(b[127:0], imm8[5:4]) +tmp_dst[127:96] := SELECT4(b[127:0], imm8[7:6]) +tmp_dst[159:128] := SELECT4(a[255:128], imm8[1:0]) +tmp_dst[191:160] := SELECT4(a[255:128], imm8[3:2]) +tmp_dst[223:192] := SELECT4(b[255:128], imm8[5:4]) +tmp_dst[255:224] := SELECT4(b[255:128], imm8[7:6]) + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} + +tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +tmp_dst[95:64] := SELECT4(b[127:0], imm8[5:4]) +tmp_dst[127:96] := SELECT4(b[127:0], imm8[7:6]) + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +SELECT4(src, control){ + CASE(control[1:0]) + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} + +tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +tmp_dst[95:64] := SELECT4(b[127:0], imm8[5:4]) +tmp_dst[127:96] := SELECT4(b[127:0], imm8[7:6]) + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + Compute the square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := SQRT(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + Compute the square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := SQRT(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + Compute the square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := SQRT(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + Compute the square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := SQRT(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + Compute the square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := SQRT(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + Compute the square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := SQRT(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + Compute the square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := SQRT(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + Compute the square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := SQRT(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Subtract packed double-precision (64-bit) floating-point elements in "b" from packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] - b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + Subtract packed double-precision (64-bit) floating-point elements in "b" from packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] - b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Subtract packed double-precision (64-bit) floating-point elements in "b" from packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] - b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + Subtract packed double-precision (64-bit) floating-point elements in "b" from packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] - b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Subtract packed single-precision (32-bit) floating-point elements in "b" from packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] - b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + Subtract packed single-precision (32-bit) floating-point elements in "b" from packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] - b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + + Subtract packed single-precision (32-bit) floating-point elements in "b" from packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] - b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Arithmetic + + + + Subtract packed single-precision (32-bit) floating-point elements in "b" from packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] - b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Unpack and interleave double-precision (64-bit) floating-point elements from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]){ + dst[63:0] := src1[127:64] + dst[127:64] := src2[127:64] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_HIGH_QWORDS(a[255:128], b[255:128]) + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Unpack and interleave double-precision (64-bit) floating-point elements from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]){ + dst[63:0] := src1[127:64] + dst[127:64] := src2[127:64] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_HIGH_QWORDS(a[255:128], b[255:128]) + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Unpack and interleave double-precision (64-bit) floating-point elements from the high half of "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]){ + dst[63:0] := src1[127:64] + dst[127:64] := src2[127:64] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Unpack and interleave double-precision (64-bit) floating-point elements from the high half of "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]){ + dst[63:0] := src1[127:64] + dst[127:64] := src2[127:64] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Unpack and interleave single-precision (32-bit) floating-point elements from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]){ + dst[31:0] := src1[95:64] + dst[63:32] := src2[95:64] + dst[95:64] := src1[127:96] + dst[127:96] := src2[127:96] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_HIGH_DWORDS(a[255:128], b[255:128]) + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Unpack and interleave single-precision (32-bit) floating-point elements from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]){ + dst[31:0] := src1[95:64] + dst[63:32] := src2[95:64] + dst[95:64] := src1[127:96] + dst[127:96] := src2[127:96] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_HIGH_DWORDS(a[255:128], b[255:128]) + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Unpack and interleave single-precision (32-bit) floating-point elements from the high half of "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]){ + dst[31:0] := src1[95:64] + dst[63:32] := src2[95:64] + dst[95:64] := src1[127:96] + dst[127:96] := src2[127:96] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Unpack and interleave single-precision (32-bit) floating-point elements from the high half of "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]){ + dst[31:0] := src1[95:64] + dst[63:32] := src2[95:64] + dst[95:64] := src1[127:96] + dst[127:96] := src2[127:96] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Unpack and interleave double-precision (64-bit) floating-point elements from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +INTERLEAVE_QWORDS(src1[127:0], src2[127:0]){ + dst[63:0] := src1[63:0] + dst[127:64] := src2[63:0] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_QWORDS(a[255:128], b[255:128]) + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Unpack and interleave double-precision (64-bit) floating-point elements from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +INTERLEAVE_QWORDS(src1[127:0], src2[127:0]){ + dst[63:0] := src1[63:0] + dst[127:64] := src2[63:0] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_QWORDS(a[255:128], b[255:128]) + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Unpack and interleave double-precision (64-bit) floating-point elements from the low half of "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +INTERLEAVE_QWORDS(src1[127:0], src2[127:0]){ + dst[63:0] := src1[63:0] + dst[127:64] := src2[63:0] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Unpack and interleave double-precision (64-bit) floating-point elements from the low half of "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +INTERLEAVE_QWORDS(src1[127:0], src2[127:0]){ + dst[63:0] := src1[63:0] + dst[127:64] := src2[63:0] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Unpack and interleave single-precision (32-bit) floating-point elements from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +INTERLEAVE_DWORDS(src1[127:0], src2[127:0]){ + dst[31:0] := src1[31:0] + dst[63:32] := src2[31:0] + dst[95:64] := src1[63:32] + dst[127:96] := src2[63:32] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_DWORDS(a[255:128], b[255:128]) + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Unpack and interleave single-precision (32-bit) floating-point elements from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +INTERLEAVE_DWORDS(src1[127:0], src2[127:0]){ + dst[31:0] := src1[31:0] + dst[63:32] := src2[31:0] + dst[95:64] := src1[63:32] + dst[127:96] := src2[63:32] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_DWORDS(a[255:128], b[255:128]) + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + + Unpack and interleave single-precision (32-bit) floating-point elements from the low half of "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +INTERLEAVE_DWORDS(src1[127:0], src2[127:0]){ + dst[31:0] := src1[31:0] + dst[63:32] := src2[31:0] + dst[95:64] := src1[63:32] + dst[127:96] := src2[63:32] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512F + Miscellaneous + + + + Unpack and interleave single-precision (32-bit) floating-point elements from the low half of "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +INTERLEAVE_DWORDS(src1[127:0], src2[127:0]){ + dst[31:0] := src1[31:0] + dst[63:32] := src2[31:0] + dst[95:64] := src1[63:32] + dst[127:96] := src2[63:32] + RETURN dst[127:0] +} + +tmp_dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Logical + + + + + Compute the bitwise XOR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] XOR b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Logical + + + + Compute the bitwise XOR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] XOR b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Logical + + + + + Compute the bitwise XOR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] XOR b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Logical + + + + Compute the bitwise XOR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] XOR b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Logical + + + Compute the bitwise XOR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst". + + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := a[i+63:i] XOR b[i+63:i] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Logical + + + + + Compute the bitwise XOR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] XOR b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Logical + + + + Compute the bitwise XOR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] XOR b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Logical + + + + + Compute the bitwise XOR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] XOR b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Logical + + + + Compute the bitwise XOR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] XOR b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Logical + + + + + Compute the bitwise XOR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] XOR b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Logical + + + + Compute the bitwise XOR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] XOR b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512DQ + Logical + + + Compute the bitwise XOR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst". + + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := a[i+31:i] XOR b[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Logical + + + + + Compute the bitwise XOR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] XOR b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + Floating Point + AVX512VL + AVX512DQ + Logical + + + + Compute the bitwise XOR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] XOR b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + AVX512IFMA52 + Arithmetic + + + + Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the low 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) + dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[51:0]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + AVX512IFMA52 + Arithmetic + + + + + Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the low 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) + dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[51:0]) + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + AVX512IFMA52 + Arithmetic + + + + + Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the low 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) + dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[51:0]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + AVX512IFMA52 + AVX512VL + Arithmetic + + + + Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the low 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) + dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[51:0]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + AVX512IFMA52 + AVX512VL + Arithmetic + + + + + Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the low 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) + dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[51:0]) + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + AVX512IFMA52 + AVX512VL + Arithmetic + + + + + Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the low 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) + dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[51:0]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + AVX512IFMA52 + AVX512VL + Arithmetic + + + + Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the low 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) + dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[51:0]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + AVX512IFMA52 + AVX512VL + Arithmetic + + + + + Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the low 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) + dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[51:0]) + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + AVX512IFMA52 + AVX512VL + Arithmetic + + + + + Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the low 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) + dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[51:0]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + AVX512IFMA52 + Arithmetic + + + + Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the high 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) + dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[103:52]) +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + AVX512IFMA52 + Arithmetic + + + + + Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the high 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) + dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[103:52]) + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + AVX512IFMA52 + Arithmetic + + + + + Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the high 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) + dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[103:52]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + AVX512IFMA52 + AVX512VL + Arithmetic + + + + Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the high 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) + dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[103:52]) +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + AVX512IFMA52 + AVX512VL + Arithmetic + + + + + Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the high 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) + dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[103:52]) + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + AVX512IFMA52 + AVX512VL + Arithmetic + + + + + Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the high 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) + dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[103:52]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + AVX512IFMA52 + AVX512VL + Arithmetic + + + + Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the high 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) + dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[103:52]) +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + AVX512IFMA52 + AVX512VL + Arithmetic + + + + + Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the high 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) + dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[103:52]) + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + AVX512IFMA52 + AVX512VL + Arithmetic + + + + + Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the high 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) + dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[103:52]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + AVX512VBMI + Bit Manipulation + + + For each 64-bit element in "b", select 8 unaligned bytes using a byte-granular shift control within the corresponding 64-bit element of "a", and store the 8 assembled bytes to the corresponding 64-bit element of "dst". + +FOR i := 0 to 7 + q := i * 64 + FOR j := 0 to 7 + tmp8 := 0 + ctrl := a[q+j*8+7:q+j*8] & 63 + FOR l := 0 to 7 + tmp8[l] := b[q+((ctrl+l) & 63)] + ENDFOR + dst[q+j*8+7:q+j*8] := tmp8[7:0] + ENDFOR +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + AVX512VBMI + Bit Manipulation + + + + + For each 64-bit element in "b", select 8 unaligned bytes using a byte-granular shift control within the corresponding 64-bit element of "a", and store the 8 assembled bytes to the corresponding 64-bit element of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR i := 0 to 7 + q := i * 64 + FOR j := 0 to 7 + tmp8 := 0 + ctrl := a[q+j*8+7:q+j*8] & 63 + FOR l := 0 to 7 + tmp8[l] := b[q+((ctrl+l) & 63)] + ENDFOR + IF k[i*8+j] + dst[q+j*8+7:q+j*8] := tmp8[7:0] + ELSE + dst[q+j*8+7:q+j*8] := src[q+j*8+7:q+j*8] + FI + ENDFOR +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + AVX512VBMI + Bit Manipulation + + + + For each 64-bit element in "b", select 8 unaligned bytes using a byte-granular shift control within the corresponding 64-bit element of "a", and store the 8 assembled bytes to the corresponding 64-bit element of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR i := 0 to 7 + q := i * 64 + FOR j := 0 to 7 + tmp8 := 0 + ctrl := a[q+j*8+7:q+j*8] & 63 + FOR l := 0 to 7 + tmp8[l] := b[q+((ctrl+l) & 63)] + ENDFOR + IF k[i*8+j] + dst[q+j*8+7:q+j*8] := tmp8[7:0] + ELSE + dst[q+j*8+7:q+j*8] := 0 + FI + ENDFOR +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + AVX512VBMI + AVX512VL + Bit Manipulation + + + For each 64-bit element in "b", select 8 unaligned bytes using a byte-granular shift control within the corresponding 64-bit element of "a", and store the 8 assembled bytes to the corresponding 64-bit element of "dst". + +FOR i := 0 to 3 + q := i * 64 + FOR j := 0 to 7 + tmp8 := 0 + ctrl := a[q+j*8+7:q+j*8] & 63 + FOR l := 0 to 7 + tmp8[l] := b[q+((ctrl+l) & 63)] + ENDFOR + dst[q+j*8+7:q+j*8] := tmp8[7:0] + ENDFOR +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + AVX512VBMI + AVX512VL + Bit Manipulation + + + + + For each 64-bit element in "b", select 8 unaligned bytes using a byte-granular shift control within the corresponding 64-bit element of "a", and store the 8 assembled bytes to the corresponding 64-bit element of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR i := 0 to 3 + q := i * 64 + FOR j := 0 to 7 + tmp8 := 0 + ctrl := a[q+j*8+7:q+j*8] & 63 + FOR l := 0 to 7 + tmp8[l] := b[q+((ctrl+l) & 63)] + ENDFOR + IF k[i*8+j] + dst[q+j*8+7:q+j*8] := tmp8[7:0] + ELSE + dst[q+j*8+7:q+j*8] := src[q+j*8+7:q+j*8] + FI + ENDFOR +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + AVX512VBMI + AVX512VL + Bit Manipulation + + + + For each 64-bit element in "b", select 8 unaligned bytes using a byte-granular shift control within the corresponding 64-bit element of "a", and store the 8 assembled bytes to the corresponding 64-bit element of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR i := 0 to 3 + q := i * 64 + FOR j := 0 to 7 + tmp8 := 0 + ctrl := a[q+j*8+7:q+j*8] & 63 + FOR l := 0 to 7 + tmp8[l] := b[q+((ctrl+l) & 63)] + ENDFOR + IF k[i*8+j] + dst[q+j*8+7:q+j*8] := tmp8[7:0] + ELSE + dst[q+j*8+7:q+j*8] := 0 + FI + ENDFOR +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + AVX512VBMI + AVX512VL + Bit Manipulation + + + For each 64-bit element in "b", select 8 unaligned bytes using a byte-granular shift control within the corresponding 64-bit element of "a", and store the 8 assembled bytes to the corresponding 64-bit element of "dst". + +FOR i := 0 to 1 + q := i * 64 + FOR j := 0 to 7 + tmp8 := 0 + ctrl := a[q+j*8+7:q+j*8] & 63 + FOR l := 0 to 7 + tmp8[l] := b[q+((ctrl+l) & 63)] + ENDFOR + dst[q+j*8+7:q+j*8] := tmp8[7:0] + ENDFOR +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + AVX512VBMI + AVX512VL + Bit Manipulation + + + + + For each 64-bit element in "b", select 8 unaligned bytes using a byte-granular shift control within the corresponding 64-bit element of "a", and store the 8 assembled bytes to the corresponding 64-bit element of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR i := 0 to 1 + q := i * 64 + FOR j := 0 to 7 + tmp8 := 0 + ctrl := a[q+j*8+7:q+j*8] & 63 + FOR l := 0 to 7 + tmp8[l] := b[q+((ctrl+l) & 63)] + ENDFOR + IF k[i*8+j] + dst[q+j*8+7:q+j*8] := tmp8[7:0] + ELSE + dst[q+j*8+7:q+j*8] := src[q+j*8+7:q+j*8] + FI + ENDFOR +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + AVX512VBMI + AVX512VL + Bit Manipulation + + + + For each 64-bit element in "b", select 8 unaligned bytes using a byte-granular shift control within the corresponding 64-bit element of "a", and store the 8 assembled bytes to the corresponding 64-bit element of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR i := 0 to 1 + q := i * 64 + FOR j := 0 to 7 + tmp8 := 0 + ctrl := a[q+j*8+7:q+j*8] & 63 + FOR l := 0 to 7 + tmp8[l] := b[q+((ctrl+l) & 63)] + ENDFOR + IF k[i*8+j] + dst[q+j*8+7:q+j*8] := tmp8[7:0] + ELSE + dst[q+j*8+7:q+j*8] := 0 + FI + ENDFOR +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + AVX512VBMI + Swizzle + + + Shuffle 8-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst". + +FOR j := 0 to 63 + i := j*8 + id := idx[i+5:i]*8 + dst[i+7:i] := a[id+7:id] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + AVX512VBMI + Swizzle + + + + + Shuffle 8-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + id := idx[i+5:i]*8 + IF k[j] + dst[i+7:i] := a[id+7:id] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + AVX512VBMI + Swizzle + + + + Shuffle 8-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + id := idx[i+5:i]*8 + IF k[j] + dst[i+7:i] := a[id+7:id] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + AVX512VBMI + AVX512VL + Swizzle + + + Shuffle 8-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst". + +FOR j := 0 to 31 + i := j*8 + id := idx[i+4:i]*8 + dst[i+7:i] := a[id+7:id] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + AVX512VBMI + AVX512VL + Swizzle + + + + + Shuffle 8-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + id := idx[i+4:i]*8 + IF k[j] + dst[i+7:i] := a[id+7:id] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + AVX512VBMI + AVX512VL + Swizzle + + + + Shuffle 8-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + id := idx[i+4:i]*8 + IF k[j] + dst[i+7:i] := a[id+7:id] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + AVX512VBMI + AVX512VL + Swizzle + + + Shuffle 8-bit integers in "a" using the corresponding index in "idx", and store the results in "dst". + +FOR j := 0 to 15 + i := j*8 + id := idx[i+3:i]*8 + dst[i+7:i] := a[id+7:id] +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + AVX512VBMI + AVX512VL + Swizzle + + + + + Shuffle 8-bit integers in "a" using the corresponding index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + id := idx[i+3:i]*8 + IF k[j] + dst[i+7:i] := a[id+7:id] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + AVX512VBMI + AVX512VL + Swizzle + + + + Shuffle 8-bit integers in "a" using the corresponding index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + id := idx[i+3:i]*8 + IF k[j] + dst[i+7:i] := a[id+7:id] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + + + + + + AVX512VBMI + Swizzle + + + + Shuffle 8-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst". + +FOR j := 0 to 63 + i := j*8 + off := 8*idx[i+5:i] + dst[i+7:i] := idx[i+6] ? b[off+7:off] : a[off+7:off] +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + AVX512VBMI + Swizzle + + + + + Shuffle 8-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + off := 8*idx[i+5:i] + dst[i+7:i] := idx[i+6] ? b[off+7:off] : a[off+7:off] + ELSE + dst[i+7:i] := a[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + AVX512VBMI + Swizzle + + + + + Shuffle 8-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + off := 8*idx[i+5:i] + dst[i+7:i] := idx[i+6] ? b[off+7:off] : a[off+7:off] + ELSE + dst[i+7:i] := a[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + +
immintrin.h
+
+ + AVX512VBMI + Swizzle + + + + + Shuffle 8-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + off := 8*idx[i+5:i] + dst[i+7:i] := idx[i+6] ? b[off+7:off] : a[off+7:off] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + +
immintrin.h
+
+ + AVX512VBMI + AVX512VL + Swizzle + + + + Shuffle 8-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst". + +FOR j := 0 to 31 + i := j*8 + off := 8*idx[i+4:i] + dst[i+7:i] := idx[i+6] ? b[off+5:off] : a[off+7:off] +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + AVX512VBMI + AVX512VL + Swizzle + + + + + Shuffle 8-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + off := 8*idx[i+4:i] + dst[i+7:i] := idx[i+5] ? b[off+7:off] : a[off+7:off] + ELSE + dst[i+7:i] := a[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + AVX512VBMI + AVX512VL + Swizzle + + + + + Shuffle 8-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + off := 8*idx[i+4:i] + dst[i+7:i] := idx[i+5] ? b[off+7:off] : a[off+7:off] + ELSE + dst[i+7:i] := a[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + +
immintrin.h
+
+ + AVX512VBMI + AVX512VL + Swizzle + + + + + Shuffle 8-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + off := 8*idx[i+4:i] + dst[i+7:i] := idx[i+5] ? b[off+7:off] : a[off+7:off] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + +
immintrin.h
+
+ + AVX512VBMI + AVX512VL + Swizzle + + + + Shuffle 8-bit integers in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst". + +FOR j := 0 to 15 + i := j*8 + off := 8*idx[i+3:i] + dst[i+7:i] := idx[i+4] ? b[off+7:off] : a[off+7:off] +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + AVX512VBMI + AVX512VL + Swizzle + + + + + Shuffle 8-bit integers in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + off := 8*idx[i+3:i] + dst[i+7:i] := idx[i+4] ? b[off+7:off] : a[off+7:off] + ELSE + dst[i+7:i] := a[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + AVX512VBMI + AVX512VL + Swizzle + + + + + Shuffle 8-bit integers in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + off := 8*idx[i+3:i] + dst[i+7:i] := idx[i+4] ? b[off+7:off] : a[off+7:off] + ELSE + dst[i+7:i] := a[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + +
immintrin.h
+
+ + AVX512VBMI + AVX512VL + Swizzle + + + + + Shuffle 8-bit integers in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + off := 8*idx[i+3:i] + dst[i+7:i] := idx[i+4] ? b[off+7:off] : a[off+7:off] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + +
immintrin.h
+
+