3PL_ALWAYS_INLINE plSimdVec4i::plSimdVec4i()
5 PL_CHECK_SIMD_ALIGNMENT(
this);
7#if PL_ENABLED(PL_MATH_CHECK_FOR_NAN)
8 m_v = vmovq_n_u32(0xCDCDCDCD);
12PL_ALWAYS_INLINE plSimdVec4i::plSimdVec4i(plInt32 xyzw)
14 PL_CHECK_SIMD_ALIGNMENT(
this);
16 m_v = vmovq_n_s32(xyzw);
19PL_ALWAYS_INLINE plSimdVec4i::plSimdVec4i(plInt32 x, plInt32 y, plInt32 z, plInt32 w)
21 PL_CHECK_SIMD_ALIGNMENT(
this);
23 alignas(16) plInt32 values[4] = {x, y, z, w};
24 m_v = vld1q_s32(values);
34 return vmovq_n_s32(0);
37PL_ALWAYS_INLINE
void plSimdVec4i::Set(plInt32 xyzw)
39 m_v = vmovq_n_s32(xyzw);
42PL_ALWAYS_INLINE
void plSimdVec4i::Set(plInt32 x, plInt32 y, plInt32 z, plInt32 w)
44 alignas(16) plInt32 values[4] = {x, y, z, w};
45 m_v = vld1q_s32(values);
48PL_ALWAYS_INLINE
void plSimdVec4i::SetZero()
54PL_ALWAYS_INLINE
void plSimdVec4i::Load<1>(
const plInt32* pInts)
56 m_v = vld1q_lane_s32(pInts, vmovq_n_s32(0), 0);
60PL_ALWAYS_INLINE
void plSimdVec4i::Load<2>(
const plInt32* pInts)
62 m_v = vreinterpretq_s32_s64(vld1q_lane_s64(
reinterpret_cast<const int64_t*
>(pInts), vmovq_n_s64(0), 0));
66PL_ALWAYS_INLINE
void plSimdVec4i::Load<3>(
const plInt32* pInts)
68 m_v = vcombine_s32(vld1_s32(pInts), vld1_lane_s32(pInts + 2, vmov_n_s32(0), 0));
72PL_ALWAYS_INLINE
void plSimdVec4i::Load<4>(
const plInt32* pInts)
74 m_v = vld1q_s32(pInts);
78PL_ALWAYS_INLINE
void plSimdVec4i::Store<1>(plInt32* pInts)
const
80 vst1q_lane_s32(pInts, m_v, 0);
84PL_ALWAYS_INLINE
void plSimdVec4i::Store<2>(plInt32* pInts)
const
86 vst1q_lane_s64(
reinterpret_cast<int64_t*
>(pInts), vreinterpretq_s64_s32(m_v), 0);
90PL_ALWAYS_INLINE
void plSimdVec4i::Store<3>(plInt32* pInts)
const
92 vst1q_lane_s64(
reinterpret_cast<int64_t*
>(pInts), vreinterpretq_s64_s32(m_v), 0);
93 vst1q_lane_s32(pInts + 2, m_v, 2);
97PL_ALWAYS_INLINE
void plSimdVec4i::Store<4>(plInt32* pInts)
const
99 vst1q_s32(pInts, m_v);
102PL_ALWAYS_INLINE
plSimdVec4f plSimdVec4i::ToFloat()
const
104 return vcvtq_f32_s32(m_v);
110 return vcvtq_s32_f32(f.m_v);
114PL_ALWAYS_INLINE plInt32 plSimdVec4i::GetComponent()
const
116 return vgetq_lane_s32(m_v, N);
119PL_ALWAYS_INLINE plInt32 plSimdVec4i::x()
const
121 return GetComponent<0>();
124PL_ALWAYS_INLINE plInt32 plSimdVec4i::y()
const
126 return GetComponent<1>();
129PL_ALWAYS_INLINE plInt32 plSimdVec4i::z()
const
131 return GetComponent<2>();
134PL_ALWAYS_INLINE plInt32 plSimdVec4i::w()
const
136 return GetComponent<3>();
139template <plSwizzle::Enum s>
140PL_ALWAYS_INLINE
plSimdVec4i plSimdVec4i::Get()
const
142 return __builtin_shufflevector(m_v, m_v, PL_TO_SHUFFLE(s));
145template <plSwizzle::Enum s>
148 return __builtin_shufflevector(m_v, other.m_v, PL_TO_SHUFFLE(s));
151PL_ALWAYS_INLINE
plSimdVec4i plSimdVec4i::operator-()
const
153 return vnegq_s32(m_v);
158 return vaddq_s32(m_v, v.m_v);
163 return vsubq_s32(m_v, v.m_v);
168 return vmulq_s32(m_v, v.m_v);
178 for (plUInt32 i = 0; i < 4; ++i)
190 return vorrq_s32(m_v, v.m_v);
195 return vandq_s32(m_v, v.m_v);
200 return veorq_s32(m_v, v.m_v);
203PL_ALWAYS_INLINE
plSimdVec4i plSimdVec4i::operator~()
const
205 return vmvnq_s32(m_v);
208PL_ALWAYS_INLINE
plSimdVec4i plSimdVec4i::operator<<(plUInt32 uiShift)
const
210 return vshlq_s32(m_v, vmovq_n_s32(uiShift));
213PL_ALWAYS_INLINE
plSimdVec4i plSimdVec4i::operator>>(plUInt32 uiShift)
const
215 return vshlq_s32(m_v, vmovq_n_s32(-uiShift));
220 return vshlq_s32(m_v, v.m_v);
225 return vshlq_s32(m_v, vnegq_s32(v.m_v));
230 m_v = vaddq_s32(m_v, v.m_v);
236 m_v = vsubq_s32(m_v, v.m_v);
242 m_v = vorrq_s32(m_v, v.m_v);
248 m_v = vandq_s32(m_v, v.m_v);
254 m_v = veorq_s32(m_v, v.m_v);
258PL_ALWAYS_INLINE
plSimdVec4i& plSimdVec4i::operator<<=(plUInt32 uiShift)
260 m_v = vshlq_s32(m_v, vmovq_n_s32(uiShift));
264PL_ALWAYS_INLINE
plSimdVec4i& plSimdVec4i::operator>>=(plUInt32 uiShift)
266 m_v = vshlq_s32(m_v, vmovq_n_s32(-uiShift));
272 return vminq_s32(m_v, v.m_v);
277 return vmaxq_s32(m_v, v.m_v);
280PL_ALWAYS_INLINE
plSimdVec4i plSimdVec4i::Abs()
const
282 return vabsq_s32(m_v);
287 return vceqq_s32(m_v, v.m_v);
292 return vmvnq_u32(vceqq_s32(m_v, v.m_v));
297 return vcleq_s32(m_v, v.m_v);
302 return vcltq_s32(m_v, v.m_v);
307 return vcgeq_s32(m_v, v.m_v);
312 return vcgtq_s32(m_v, v.m_v);
318 return vbslq_s32(vCmp.m_v, vTrue.m_v, vFalse.m_v);
325 int32x4x2_t P0 = vzipq_s32(v0.m_v, v2.m_v);
326 int32x4x2_t P1 = vzipq_s32(v1.m_v, v3.m_v);
328 int32x4x2_t T0 = vzipq_s32(P0.val[0], P1.val[0]);
329 int32x4x2_t T1 = vzipq_s32(P0.val[1], P1.val[1]);
A 4-component SIMD vector class.
Definition SimdVec4f.h:8
A SIMD 4-component vector class of signed 32b integers.
Definition SimdVec4i.h:9
plSimdVec4i GetCombined(const plSimdVec4i &other) const
x = this[s0], y = this[s1], z = other[s2], w = other[s3]
static plSimdVec4i MakeZero()
Creates an plSimdVec4i that is initialized to zero.
Definition FPUVec4i_inl.h:25