1/* i80386 submul_1 -- Multiply a limb vector with a limb and add
2 *		      the result to a second limb vector.
3 *
4 *      Copyright (C) 1992, 1994, 1998,
5 *                    2001, 2002 Free Software Foundation, Inc.
6 *
7 * This file is part of Libgcrypt.
8 *
9 * Libgcrypt is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as
11 * published by the Free Software Foundation; either version 2.1 of
12 * the License, or (at your option) any later version.
13 *
14 * Libgcrypt is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17 * GNU Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
22 *
23 * Note: This code is heavily based on the GNU MP Library.
24 *	 Actually it's the same code with only minor changes in the
25 *	 way the data is stored; this is to support the abstraction
26 *	 of an optional secure memory allocation which may be used
27 *	 to avoid revealing of sensitive data due to paging etc.
28 */
29
30
31#include "sysdep.h"
32#include "asm-syntax.h"
33
34
35/*******************
36 * mpi_limb_t
37 * _gcry_mpih_submul_1( mpi_ptr_t res_ptr,      (sp + 4)
38 *		     mpi_ptr_t s1_ptr,	     (sp + 8)
39 *		     mpi_size_t s1_size,     (sp + 12)
40 *		     mpi_limb_t s2_limb)     (sp + 16)
41 */
42
43#define res_ptr edi
44#define s1_ptr	esi
45#define size	ecx
46#define s2_limb ebp
47
48	TEXT
49	ALIGN (2)
50	GLOBL	C_SYMBOL_NAME(_gcry_mpih_submul_1)
51C_SYMBOL_NAME(_gcry_mpih_submul_1:)
52
53	CFI_STARTPROC()
54	INSN1(push,l	,R(edi))
55	CFI_PUSH(%edi)
56	INSN1(push,l	,R(esi))
57	CFI_PUSH(%esi)
58	INSN1(push,l	,R(ebx))
59	CFI_PUSH(%ebx)
60	INSN1(push,l	,R(ebp))
61	CFI_PUSH(%ebp)
62
63	INSN2(mov,l	,R(res_ptr),MEM_DISP(esp,20))
64	INSN2(mov,l	,R(s1_ptr),MEM_DISP(esp,24))
65	INSN2(mov,l	,R(size),MEM_DISP(esp,28))
66	INSN2(mov,l	,R(s2_limb),MEM_DISP(esp,32))
67
68	INSN2(lea,l	,R(res_ptr),MEM_INDEX(res_ptr,size,4))
69	INSN2(lea,l	,R(s1_ptr),MEM_INDEX(s1_ptr,size,4))
70	INSN1(neg,l	,R(size))
71	INSN2(xor,l	,R(ebx),R(ebx))
72	ALIGN (2)
73Loop:
74	INSN2(mov,l	,R(eax),MEM_INDEX(s1_ptr,size,4))
75	INSN1(mul,l	,R(s2_limb))
76	INSN2(add,l	,R(eax),R(ebx))
77	INSN2(adc,l	,R(edx),$0)
78	INSN2(sub,l	,MEM_INDEX(res_ptr,size,4),R(eax))
79	INSN2(adc,l	,R(edx),$0)
80	INSN2(mov,l	,R(ebx),R(edx))
81
82	INSN1(inc,l	,R(size))
83	INSN1(jnz,	,Loop)
84	INSN2(mov,l	,R(eax),R(ebx))
85
86	INSN1(pop,l	,R(ebp))
87	CFI_POP(%ebp)
88	INSN1(pop,l	,R(ebx))
89	CFI_POP(%ebx)
90	INSN1(pop,l	,R(esi))
91	CFI_POP(%esi)
92	INSN1(pop,l	,R(edi))
93	CFI_POP(%edi)
94	ret
95	CFI_ENDPROC()
96
97