1// Copyright 2018 The Go Authors. All rights reserved. 2// Use of this source code is governed by a BSD-style 3// license that can be found in the LICENSE file. 4 5// +build !gccgo,!purego 6 7#include "textflag.h" 8 9// This implementation of Poly1305 uses the vector facility (vx) 10// to process up to 2 blocks (32 bytes) per iteration using an 11// algorithm based on the one described in: 12// 13// NEON crypto, Daniel J. Bernstein & Peter Schwabe 14// https://cryptojedi.org/papers/neoncrypto-20120320.pdf 15// 16// This algorithm uses 5 26-bit limbs to represent a 130-bit 17// value. These limbs are, for the most part, zero extended and 18// placed into 64-bit vector register elements. Each vector 19// register is 128-bits wide and so holds 2 of these elements. 20// Using 26-bit limbs allows us plenty of headroom to accomodate 21// accumulations before and after multiplication without 22// overflowing either 32-bits (before multiplication) or 64-bits 23// (after multiplication). 24// 25// In order to parallelise the operations required to calculate 26// the sum we use two separate accumulators and then sum those 27// in an extra final step. For compatibility with the generic 28// implementation we perform this summation at the end of every 29// updateVX call. 30// 31// To use two accumulators we must multiply the message blocks 32// by r² rather than r. Only the final message block should be 33// multiplied by r. 34// 35// Example: 36// 37// We want to calculate the sum (h) for a 64 byte message (m): 38// 39// h = m[0:16]r⁴ + m[16:32]r³ + m[32:48]r² + m[48:64]r 40// 41// To do this we split the calculation into the even indices 42// and odd indices of the message. These form our SIMD 'lanes': 43// 44// h = m[ 0:16]r⁴ + m[32:48]r² + <- lane 0 45// m[16:32]r³ + m[48:64]r <- lane 1 46// 47// To calculate this iteratively we refactor so that both lanes 48// are written in terms of r² and r: 49// 50// h = (m[ 0:16]r² + m[32:48])r² + <- lane 0 51// (m[16:32]r² + m[48:64])r <- lane 1 52// ^ ^ 53// | coefficients for second iteration 54// coefficients for first iteration 55// 56// So in this case we would have two iterations. In the first 57// both lanes are multiplied by r². In the second only the 58// first lane is multiplied by r² and the second lane is 59// instead multiplied by r. This gives use the odd and even 60// powers of r that we need from the original equation. 61// 62// Notation: 63// 64// h - accumulator 65// r - key 66// m - message 67// 68// [a, b] - SIMD register holding two 64-bit values 69// [a, b, c, d] - SIMD register holding four 32-bit values 70// xᵢ[n] - limb n of variable x with bit width i 71// 72// Limbs are expressed in little endian order, so for 26-bit 73// limbs x₂₆[4] will be the most significant limb and x₂₆[0] 74// will be the least significant limb. 75 76// masking constants 77#define MOD24 V0 // [0x0000000000ffffff, 0x0000000000ffffff] - mask low 24-bits 78#define MOD26 V1 // [0x0000000003ffffff, 0x0000000003ffffff] - mask low 26-bits 79 80// expansion constants (see EXPAND macro) 81#define EX0 V2 82#define EX1 V3 83#define EX2 V4 84 85// key (r², r or 1 depending on context) 86#define R_0 V5 87#define R_1 V6 88#define R_2 V7 89#define R_3 V8 90#define R_4 V9 91 92// precalculated coefficients (5r², 5r or 0 depending on context) 93#define R5_1 V10 94#define R5_2 V11 95#define R5_3 V12 96#define R5_4 V13 97 98// message block (m) 99#define M_0 V14 100#define M_1 V15 101#define M_2 V16 102#define M_3 V17 103#define M_4 V18 104 105// accumulator (h) 106#define H_0 V19 107#define H_1 V20 108#define H_2 V21 109#define H_3 V22 110#define H_4 V23 111 112// temporary registers (for short-lived values) 113#define T_0 V24 114#define T_1 V25 115#define T_2 V26 116#define T_3 V27 117#define T_4 V28 118 119GLOBL ·constants<>(SB), RODATA, $0x30 120// EX0 121DATA ·constants<>+0x00(SB)/8, $0x0006050403020100 122DATA ·constants<>+0x08(SB)/8, $0x1016151413121110 123// EX1 124DATA ·constants<>+0x10(SB)/8, $0x060c0b0a09080706 125DATA ·constants<>+0x18(SB)/8, $0x161c1b1a19181716 126// EX2 127DATA ·constants<>+0x20(SB)/8, $0x0d0d0d0d0d0f0e0d 128DATA ·constants<>+0x28(SB)/8, $0x1d1d1d1d1d1f1e1d 129 130// MULTIPLY multiplies each lane of f and g, partially reduced 131// modulo 2¹³⁰ - 5. The result, h, consists of partial products 132// in each lane that need to be reduced further to produce the 133// final result. 134// 135// h₁₃₀ = (f₁₃₀g₁₃₀) % 2¹³⁰ + (5f₁₃₀g₁₃₀) / 2¹³⁰ 136// 137// Note that the multiplication by 5 of the high bits is 138// achieved by precalculating the multiplication of four of the 139// g coefficients by 5. These are g51-g54. 140#define MULTIPLY(f0, f1, f2, f3, f4, g0, g1, g2, g3, g4, g51, g52, g53, g54, h0, h1, h2, h3, h4) \ 141 VMLOF f0, g0, h0 \ 142 VMLOF f0, g3, h3 \ 143 VMLOF f0, g1, h1 \ 144 VMLOF f0, g4, h4 \ 145 VMLOF f0, g2, h2 \ 146 VMLOF f1, g54, T_0 \ 147 VMLOF f1, g2, T_3 \ 148 VMLOF f1, g0, T_1 \ 149 VMLOF f1, g3, T_4 \ 150 VMLOF f1, g1, T_2 \ 151 VMALOF f2, g53, h0, h0 \ 152 VMALOF f2, g1, h3, h3 \ 153 VMALOF f2, g54, h1, h1 \ 154 VMALOF f2, g2, h4, h4 \ 155 VMALOF f2, g0, h2, h2 \ 156 VMALOF f3, g52, T_0, T_0 \ 157 VMALOF f3, g0, T_3, T_3 \ 158 VMALOF f3, g53, T_1, T_1 \ 159 VMALOF f3, g1, T_4, T_4 \ 160 VMALOF f3, g54, T_2, T_2 \ 161 VMALOF f4, g51, h0, h0 \ 162 VMALOF f4, g54, h3, h3 \ 163 VMALOF f4, g52, h1, h1 \ 164 VMALOF f4, g0, h4, h4 \ 165 VMALOF f4, g53, h2, h2 \ 166 VAG T_0, h0, h0 \ 167 VAG T_3, h3, h3 \ 168 VAG T_1, h1, h1 \ 169 VAG T_4, h4, h4 \ 170 VAG T_2, h2, h2 171 172// REDUCE performs the following carry operations in four 173// stages, as specified in Bernstein & Schwabe: 174// 175// 1: h₂₆[0]->h₂₆[1] h₂₆[3]->h₂₆[4] 176// 2: h₂₆[1]->h₂₆[2] h₂₆[4]->h₂₆[0] 177// 3: h₂₆[0]->h₂₆[1] h₂₆[2]->h₂₆[3] 178// 4: h₂₆[3]->h₂₆[4] 179// 180// The result is that all of the limbs are limited to 26-bits 181// except for h₂₆[1] and h₂₆[4] which are limited to 27-bits. 182// 183// Note that although each limb is aligned at 26-bit intervals 184// they may contain values that exceed 2²⁶ - 1, hence the need 185// to carry the excess bits in each limb. 186#define REDUCE(h0, h1, h2, h3, h4) \ 187 VESRLG $26, h0, T_0 \ 188 VESRLG $26, h3, T_1 \ 189 VN MOD26, h0, h0 \ 190 VN MOD26, h3, h3 \ 191 VAG T_0, h1, h1 \ 192 VAG T_1, h4, h4 \ 193 VESRLG $26, h1, T_2 \ 194 VESRLG $26, h4, T_3 \ 195 VN MOD26, h1, h1 \ 196 VN MOD26, h4, h4 \ 197 VESLG $2, T_3, T_4 \ 198 VAG T_3, T_4, T_4 \ 199 VAG T_2, h2, h2 \ 200 VAG T_4, h0, h0 \ 201 VESRLG $26, h2, T_0 \ 202 VESRLG $26, h0, T_1 \ 203 VN MOD26, h2, h2 \ 204 VN MOD26, h0, h0 \ 205 VAG T_0, h3, h3 \ 206 VAG T_1, h1, h1 \ 207 VESRLG $26, h3, T_2 \ 208 VN MOD26, h3, h3 \ 209 VAG T_2, h4, h4 210 211// EXPAND splits the 128-bit little-endian values in0 and in1 212// into 26-bit big-endian limbs and places the results into 213// the first and second lane of d₂₆[0:4] respectively. 214// 215// The EX0, EX1 and EX2 constants are arrays of byte indices 216// for permutation. The permutation both reverses the bytes 217// in the input and ensures the bytes are copied into the 218// destination limb ready to be shifted into their final 219// position. 220#define EXPAND(in0, in1, d0, d1, d2, d3, d4) \ 221 VPERM in0, in1, EX0, d0 \ 222 VPERM in0, in1, EX1, d2 \ 223 VPERM in0, in1, EX2, d4 \ 224 VESRLG $26, d0, d1 \ 225 VESRLG $30, d2, d3 \ 226 VESRLG $4, d2, d2 \ 227 VN MOD26, d0, d0 \ // [in0₂₆[0], in1₂₆[0]] 228 VN MOD26, d3, d3 \ // [in0₂₆[3], in1₂₆[3]] 229 VN MOD26, d1, d1 \ // [in0₂₆[1], in1₂₆[1]] 230 VN MOD24, d4, d4 \ // [in0₂₆[4], in1₂₆[4]] 231 VN MOD26, d2, d2 // [in0₂₆[2], in1₂₆[2]] 232 233// func updateVX(state *macState, msg []byte) 234TEXT ·updateVX(SB), NOSPLIT, $0 235 MOVD state+0(FP), R1 236 LMG msg+8(FP), R2, R3 // R2=msg_base, R3=msg_len 237 238 // load EX0, EX1 and EX2 239 MOVD $·constants<>(SB), R5 240 VLM (R5), EX0, EX2 241 242 // generate masks 243 VGMG $(64-24), $63, MOD24 // [0x00ffffff, 0x00ffffff] 244 VGMG $(64-26), $63, MOD26 // [0x03ffffff, 0x03ffffff] 245 246 // load h (accumulator) and r (key) from state 247 VZERO T_1 // [0, 0] 248 VL 0(R1), T_0 // [h₆₄[0], h₆₄[1]] 249 VLEG $0, 16(R1), T_1 // [h₆₄[2], 0] 250 VL 24(R1), T_2 // [r₆₄[0], r₆₄[1]] 251 VPDI $0, T_0, T_2, T_3 // [h₆₄[0], r₆₄[0]] 252 VPDI $5, T_0, T_2, T_4 // [h₆₄[1], r₆₄[1]] 253 254 // unpack h and r into 26-bit limbs 255 // note: h₆₄[2] may have the low 3 bits set, so h₂₆[4] is a 27-bit value 256 VN MOD26, T_3, H_0 // [h₂₆[0], r₂₆[0]] 257 VZERO H_1 // [0, 0] 258 VZERO H_3 // [0, 0] 259 VGMG $(64-12-14), $(63-12), T_0 // [0x03fff000, 0x03fff000] - 26-bit mask with low 12 bits masked out 260 VESLG $24, T_1, T_1 // [h₆₄[2]<<24, 0] 261 VERIMG $-26&63, T_3, MOD26, H_1 // [h₂₆[1], r₂₆[1]] 262 VESRLG $+52&63, T_3, H_2 // [h₂₆[2], r₂₆[2]] - low 12 bits only 263 VERIMG $-14&63, T_4, MOD26, H_3 // [h₂₆[1], r₂₆[1]] 264 VESRLG $40, T_4, H_4 // [h₂₆[4], r₂₆[4]] - low 24 bits only 265 VERIMG $+12&63, T_4, T_0, H_2 // [h₂₆[2], r₂₆[2]] - complete 266 VO T_1, H_4, H_4 // [h₂₆[4], r₂₆[4]] - complete 267 268 // replicate r across all 4 vector elements 269 VREPF $3, H_0, R_0 // [r₂₆[0], r₂₆[0], r₂₆[0], r₂₆[0]] 270 VREPF $3, H_1, R_1 // [r₂₆[1], r₂₆[1], r₂₆[1], r₂₆[1]] 271 VREPF $3, H_2, R_2 // [r₂₆[2], r₂₆[2], r₂₆[2], r₂₆[2]] 272 VREPF $3, H_3, R_3 // [r₂₆[3], r₂₆[3], r₂₆[3], r₂₆[3]] 273 VREPF $3, H_4, R_4 // [r₂₆[4], r₂₆[4], r₂₆[4], r₂₆[4]] 274 275 // zero out lane 1 of h 276 VLEIG $1, $0, H_0 // [h₂₆[0], 0] 277 VLEIG $1, $0, H_1 // [h₂₆[1], 0] 278 VLEIG $1, $0, H_2 // [h₂₆[2], 0] 279 VLEIG $1, $0, H_3 // [h₂₆[3], 0] 280 VLEIG $1, $0, H_4 // [h₂₆[4], 0] 281 282 // calculate 5r (ignore least significant limb) 283 VREPIF $5, T_0 284 VMLF T_0, R_1, R5_1 // [5r₂₆[1], 5r₂₆[1], 5r₂₆[1], 5r₂₆[1]] 285 VMLF T_0, R_2, R5_2 // [5r₂₆[2], 5r₂₆[2], 5r₂₆[2], 5r₂₆[2]] 286 VMLF T_0, R_3, R5_3 // [5r₂₆[3], 5r₂₆[3], 5r₂₆[3], 5r₂₆[3]] 287 VMLF T_0, R_4, R5_4 // [5r₂₆[4], 5r₂₆[4], 5r₂₆[4], 5r₂₆[4]] 288 289 // skip r² calculation if we are only calculating one block 290 CMPBLE R3, $16, skip 291 292 // calculate r² 293 MULTIPLY(R_0, R_1, R_2, R_3, R_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, M_0, M_1, M_2, M_3, M_4) 294 REDUCE(M_0, M_1, M_2, M_3, M_4) 295 VGBM $0x0f0f, T_0 296 VERIMG $0, M_0, T_0, R_0 // [r₂₆[0], r²₂₆[0], r₂₆[0], r²₂₆[0]] 297 VERIMG $0, M_1, T_0, R_1 // [r₂₆[1], r²₂₆[1], r₂₆[1], r²₂₆[1]] 298 VERIMG $0, M_2, T_0, R_2 // [r₂₆[2], r²₂₆[2], r₂₆[2], r²₂₆[2]] 299 VERIMG $0, M_3, T_0, R_3 // [r₂₆[3], r²₂₆[3], r₂₆[3], r²₂₆[3]] 300 VERIMG $0, M_4, T_0, R_4 // [r₂₆[4], r²₂₆[4], r₂₆[4], r²₂₆[4]] 301 302 // calculate 5r² (ignore least significant limb) 303 VREPIF $5, T_0 304 VMLF T_0, R_1, R5_1 // [5r₂₆[1], 5r²₂₆[1], 5r₂₆[1], 5r²₂₆[1]] 305 VMLF T_0, R_2, R5_2 // [5r₂₆[2], 5r²₂₆[2], 5r₂₆[2], 5r²₂₆[2]] 306 VMLF T_0, R_3, R5_3 // [5r₂₆[3], 5r²₂₆[3], 5r₂₆[3], 5r²₂₆[3]] 307 VMLF T_0, R_4, R5_4 // [5r₂₆[4], 5r²₂₆[4], 5r₂₆[4], 5r²₂₆[4]] 308 309loop: 310 CMPBLE R3, $32, b2 // 2 or fewer blocks remaining, need to change key coefficients 311 312 // load next 2 blocks from message 313 VLM (R2), T_0, T_1 314 315 // update message slice 316 SUB $32, R3 317 MOVD $32(R2), R2 318 319 // unpack message blocks into 26-bit big-endian limbs 320 EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4) 321 322 // add 2¹²⁸ to each message block value 323 VLEIB $4, $1, M_4 324 VLEIB $12, $1, M_4 325 326multiply: 327 // accumulate the incoming message 328 VAG H_0, M_0, M_0 329 VAG H_3, M_3, M_3 330 VAG H_1, M_1, M_1 331 VAG H_4, M_4, M_4 332 VAG H_2, M_2, M_2 333 334 // multiply the accumulator by the key coefficient 335 MULTIPLY(M_0, M_1, M_2, M_3, M_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, H_0, H_1, H_2, H_3, H_4) 336 337 // carry and partially reduce the partial products 338 REDUCE(H_0, H_1, H_2, H_3, H_4) 339 340 CMPBNE R3, $0, loop 341 342finish: 343 // sum lane 0 and lane 1 and put the result in lane 1 344 VZERO T_0 345 VSUMQG H_0, T_0, H_0 346 VSUMQG H_3, T_0, H_3 347 VSUMQG H_1, T_0, H_1 348 VSUMQG H_4, T_0, H_4 349 VSUMQG H_2, T_0, H_2 350 351 // reduce again after summation 352 // TODO(mundaym): there might be a more efficient way to do this 353 // now that we only have 1 active lane. For example, we could 354 // simultaneously pack the values as we reduce them. 355 REDUCE(H_0, H_1, H_2, H_3, H_4) 356 357 // carry h[1] through to h[4] so that only h[4] can exceed 2²⁶ - 1 358 // TODO(mundaym): in testing this final carry was unnecessary. 359 // Needs a proof before it can be removed though. 360 VESRLG $26, H_1, T_1 361 VN MOD26, H_1, H_1 362 VAQ T_1, H_2, H_2 363 VESRLG $26, H_2, T_2 364 VN MOD26, H_2, H_2 365 VAQ T_2, H_3, H_3 366 VESRLG $26, H_3, T_3 367 VN MOD26, H_3, H_3 368 VAQ T_3, H_4, H_4 369 370 // h is now < 2(2¹³⁰ - 5) 371 // Pack each lane in h₂₆[0:4] into h₁₂₈[0:1]. 372 VESLG $26, H_1, H_1 373 VESLG $26, H_3, H_3 374 VO H_0, H_1, H_0 375 VO H_2, H_3, H_2 376 VESLG $4, H_2, H_2 377 VLEIB $7, $48, H_1 378 VSLB H_1, H_2, H_2 379 VO H_0, H_2, H_0 380 VLEIB $7, $104, H_1 381 VSLB H_1, H_4, H_3 382 VO H_3, H_0, H_0 383 VLEIB $7, $24, H_1 384 VSRLB H_1, H_4, H_1 385 386 // update state 387 VSTEG $1, H_0, 0(R1) 388 VSTEG $0, H_0, 8(R1) 389 VSTEG $1, H_1, 16(R1) 390 RET 391 392b2: // 2 or fewer blocks remaining 393 CMPBLE R3, $16, b1 394 395 // Load the 2 remaining blocks (17-32 bytes remaining). 396 MOVD $-17(R3), R0 // index of final byte to load modulo 16 397 VL (R2), T_0 // load full 16 byte block 398 VLL R0, 16(R2), T_1 // load final (possibly partial) block and pad with zeros to 16 bytes 399 400 // The Poly1305 algorithm requires that a 1 bit be appended to 401 // each message block. If the final block is less than 16 bytes 402 // long then it is easiest to insert the 1 before the message 403 // block is split into 26-bit limbs. If, on the other hand, the 404 // final message block is 16 bytes long then we append the 1 bit 405 // after expansion as normal. 406 MOVBZ $1, R0 407 MOVD $-16(R3), R3 // index of byte in last block to insert 1 at (could be 16) 408 CMPBEQ R3, $16, 2(PC) // skip the insertion if the final block is 16 bytes long 409 VLVGB R3, R0, T_1 // insert 1 into the byte at index R3 410 411 // Split both blocks into 26-bit limbs in the appropriate lanes. 412 EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4) 413 414 // Append a 1 byte to the end of the second to last block. 415 VLEIB $4, $1, M_4 416 417 // Append a 1 byte to the end of the last block only if it is a 418 // full 16 byte block. 419 CMPBNE R3, $16, 2(PC) 420 VLEIB $12, $1, M_4 421 422 // Finally, set up the coefficients for the final multiplication. 423 // We have previously saved r and 5r in the 32-bit even indexes 424 // of the R_[0-4] and R5_[1-4] coefficient registers. 425 // 426 // We want lane 0 to be multiplied by r² so that can be kept the 427 // same. We want lane 1 to be multiplied by r so we need to move 428 // the saved r value into the 32-bit odd index in lane 1 by 429 // rotating the 64-bit lane by 32. 430 VGBM $0x00ff, T_0 // [0, 0xffffffffffffffff] - mask lane 1 only 431 VERIMG $32, R_0, T_0, R_0 // [_, r²₂₆[0], _, r₂₆[0]] 432 VERIMG $32, R_1, T_0, R_1 // [_, r²₂₆[1], _, r₂₆[1]] 433 VERIMG $32, R_2, T_0, R_2 // [_, r²₂₆[2], _, r₂₆[2]] 434 VERIMG $32, R_3, T_0, R_3 // [_, r²₂₆[3], _, r₂₆[3]] 435 VERIMG $32, R_4, T_0, R_4 // [_, r²₂₆[4], _, r₂₆[4]] 436 VERIMG $32, R5_1, T_0, R5_1 // [_, 5r²₂₆[1], _, 5r₂₆[1]] 437 VERIMG $32, R5_2, T_0, R5_2 // [_, 5r²₂₆[2], _, 5r₂₆[2]] 438 VERIMG $32, R5_3, T_0, R5_3 // [_, 5r²₂₆[3], _, 5r₂₆[3]] 439 VERIMG $32, R5_4, T_0, R5_4 // [_, 5r²₂₆[4], _, 5r₂₆[4]] 440 441 MOVD $0, R3 442 BR multiply 443 444skip: 445 CMPBEQ R3, $0, finish 446 447b1: // 1 block remaining 448 449 // Load the final block (1-16 bytes). This will be placed into 450 // lane 0. 451 MOVD $-1(R3), R0 452 VLL R0, (R2), T_0 // pad to 16 bytes with zeros 453 454 // The Poly1305 algorithm requires that a 1 bit be appended to 455 // each message block. If the final block is less than 16 bytes 456 // long then it is easiest to insert the 1 before the message 457 // block is split into 26-bit limbs. If, on the other hand, the 458 // final message block is 16 bytes long then we append the 1 bit 459 // after expansion as normal. 460 MOVBZ $1, R0 461 CMPBEQ R3, $16, 2(PC) 462 VLVGB R3, R0, T_0 463 464 // Set the message block in lane 1 to the value 0 so that it 465 // can be accumulated without affecting the final result. 466 VZERO T_1 467 468 // Split the final message block into 26-bit limbs in lane 0. 469 // Lane 1 will be contain 0. 470 EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4) 471 472 // Append a 1 byte to the end of the last block only if it is a 473 // full 16 byte block. 474 CMPBNE R3, $16, 2(PC) 475 VLEIB $4, $1, M_4 476 477 // We have previously saved r and 5r in the 32-bit even indexes 478 // of the R_[0-4] and R5_[1-4] coefficient registers. 479 // 480 // We want lane 0 to be multiplied by r so we need to move the 481 // saved r value into the 32-bit odd index in lane 0. We want 482 // lane 1 to be set to the value 1. This makes multiplication 483 // a no-op. We do this by setting lane 1 in every register to 0 484 // and then just setting the 32-bit index 3 in R_0 to 1. 485 VZERO T_0 486 MOVD $0, R0 487 MOVD $0x10111213, R12 488 VLVGP R12, R0, T_1 // [_, 0x10111213, _, 0x00000000] 489 VPERM T_0, R_0, T_1, R_0 // [_, r₂₆[0], _, 0] 490 VPERM T_0, R_1, T_1, R_1 // [_, r₂₆[1], _, 0] 491 VPERM T_0, R_2, T_1, R_2 // [_, r₂₆[2], _, 0] 492 VPERM T_0, R_3, T_1, R_3 // [_, r₂₆[3], _, 0] 493 VPERM T_0, R_4, T_1, R_4 // [_, r₂₆[4], _, 0] 494 VPERM T_0, R5_1, T_1, R5_1 // [_, 5r₂₆[1], _, 0] 495 VPERM T_0, R5_2, T_1, R5_2 // [_, 5r₂₆[2], _, 0] 496 VPERM T_0, R5_3, T_1, R5_3 // [_, 5r₂₆[3], _, 0] 497 VPERM T_0, R5_4, T_1, R5_4 // [_, 5r₂₆[4], _, 0] 498 499 // Set the value of lane 1 to be 1. 500 VLEIF $3, $1, R_0 // [_, r₂₆[0], _, 1] 501 502 MOVD $0, R3 503 BR multiply 504