xref: /netbsd/external/lgpl3/gmp/dist/mpn/arm64/copyd.asm (revision 671ea119)
1dnl  ARM64 mpn_copyd.
2
3dnl  Copyright 2013 Free Software Foundation, Inc.
4
5dnl  This file is part of the GNU MP Library.
6dnl
7dnl  The GNU MP Library is free software; you can redistribute it and/or modify
8dnl  it under the terms of either:
9dnl
10dnl    * the GNU Lesser General Public License as published by the Free
11dnl      Software Foundation; either version 3 of the License, or (at your
12dnl      option) any later version.
13dnl
14dnl  or
15dnl
16dnl    * the GNU General Public License as published by the Free Software
17dnl      Foundation; either version 2 of the License, or (at your option) any
18dnl      later version.
19dnl
20dnl  or both in parallel, as here.
21dnl
22dnl  The GNU MP Library is distributed in the hope that it will be useful, but
23dnl  WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
24dnl  or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
25dnl  for more details.
26dnl
27dnl  You should have received copies of the GNU General Public License and the
28dnl  GNU Lesser General Public License along with the GNU MP Library.  If not,
29dnl  see https://www.gnu.org/licenses/.
30
31include(`../config.m4')
32
33C	     cycles/limb
34C Cortex-A53	 ?
35C Cortex-A57	 ?
36
37changecom(blah)
38
39define(`rp', `x0')
40define(`up', `x1')
41define(`n',  `x2')
42
43ASM_START()
44PROLOGUE(mpn_copyd)
45	add	rp, rp, n, lsl #3
46	add	up, up, n, lsl #3
47
48	cmp	n, #3
49	b.le	L(bc)
50
51C Copy until rp is 128-bit aligned
52	tbz	rp, #3, L(al2)
53	sub	up, up, #8
54	ld1	{v22.1d}, [up]
55	sub	n, n, #1
56	sub	rp, rp, #8
57	st1	{v22.1d}, [rp]
58
59L(al2):	sub	up, up, #16
60	ld1	{v26.2d}, [up]
61	sub	n, n, #6
62	sub	rp, rp, #16			C offset rp for loop
63	tbnz	n, #63, L(end)
64
65	sub	up, up, #16			C offset up for loop
66	mov	x12, #-16
67
68	ALIGN(16)
69L(top):	ld1	{v22.2d}, [up], x12
70	st1	{v26.2d}, [rp], x12
71	ld1	{v26.2d}, [up], x12
72	st1	{v22.2d}, [rp], x12
73	sub	n, n, #4
74	tbz	n, #63, L(top)
75
76	add	up, up, #16			C undo up offset
77
78L(end):	st1	{v26.2d}, [rp]
79
80C Copy last 0-3 limbs.  Note that rp is aligned after loop, but not when we
81C arrive here via L(bc)
82L(bc):	tbz	n, #1, L(tl1)
83	sub	up, up, #16
84	ld1	{v22.2d}, [up]
85	sub	rp, rp, #16
86	st1	{v22.2d}, [rp]
87L(tl1):	tbz	n, #0, L(tl2)
88	sub	up, up, #8
89	ld1	{v22.1d}, [up]
90	sub	rp, rp, #8
91	st1	{v22.1d}, [rp]
92L(tl2):	ret
93EPILOGUE()
94