xref: /netbsd/sys/arch/sparc64/include/elf_support.h (revision a0c0dc61)
1*a0c0dc61Sjoerg /*	$NetBSD: elf_support.h,v 1.1 2018/03/29 13:23:40 joerg Exp $	*/
2*a0c0dc61Sjoerg 
3*a0c0dc61Sjoerg /*-
4*a0c0dc61Sjoerg  * Copyright (c) 2000 Eduardo Horvath.
5*a0c0dc61Sjoerg  * Copyright (c) 2018 The NetBSD Foundation, Inc.
6*a0c0dc61Sjoerg  * All rights reserved.
7*a0c0dc61Sjoerg  *
8*a0c0dc61Sjoerg  * Redistribution and use in source and binary forms, with or without
9*a0c0dc61Sjoerg  * modification, are permitted provided that the following conditions
10*a0c0dc61Sjoerg  * are met:
11*a0c0dc61Sjoerg  * 1. Redistributions of source code must retain the above copyright
12*a0c0dc61Sjoerg  *    notice, this list of conditions and the following disclaimer.
13*a0c0dc61Sjoerg  * 2. Redistributions in binary form must reproduce the above copyright
14*a0c0dc61Sjoerg  *    notice, this list of conditions and the following disclaimer in the
15*a0c0dc61Sjoerg  *    documentation and/or other materials provided with the distribution.
16*a0c0dc61Sjoerg  *
17*a0c0dc61Sjoerg  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18*a0c0dc61Sjoerg  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19*a0c0dc61Sjoerg  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20*a0c0dc61Sjoerg  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21*a0c0dc61Sjoerg  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22*a0c0dc61Sjoerg  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23*a0c0dc61Sjoerg  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24*a0c0dc61Sjoerg  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25*a0c0dc61Sjoerg  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26*a0c0dc61Sjoerg  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27*a0c0dc61Sjoerg  * POSSIBILITY OF SUCH DAMAGE.
28*a0c0dc61Sjoerg  */
29*a0c0dc61Sjoerg #ifndef _SPARC64_ELF_SUPPORT_H
30*a0c0dc61Sjoerg #define _SPARC64_ELF_SUPPORT_H
31*a0c0dc61Sjoerg 
32*a0c0dc61Sjoerg #ifdef __arch64__
33*a0c0dc61Sjoerg /*
34*a0c0dc61Sjoerg  * Create a jump to the location `target` starting at `where`.
35*a0c0dc61Sjoerg  * This requires up to 6 instructions.
36*a0c0dc61Sjoerg  * The first instruction is written last as it replaces a branch
37*a0c0dc61Sjoerg  * in the PLT during lazy binding.
38*a0c0dc61Sjoerg  * The resulting code can trash %g1 and %g5.
39*a0c0dc61Sjoerg  */
40*a0c0dc61Sjoerg static inline void
sparc_write_branch(void * where_,void * target)41*a0c0dc61Sjoerg sparc_write_branch(void *where_, void *target)
42*a0c0dc61Sjoerg {
43*a0c0dc61Sjoerg 	const unsigned int BAA     = 0x30800000U; /* ba,a  (offset / 4) */
44*a0c0dc61Sjoerg 	const unsigned int SETHI   = 0x03000000U; /* sethi %hi(0), %g1 */
45*a0c0dc61Sjoerg 	const unsigned int JMP     = 0x81c06000U; /* jmpl  %g1+%lo(0), %g0 */
46*a0c0dc61Sjoerg 	const unsigned int OR      = 0x82106000U; /* or    %g1, 0, %g1 */
47*a0c0dc61Sjoerg 	const unsigned int XOR     = 0x82186000U; /* xor   %g1, 0, %g1 */
48*a0c0dc61Sjoerg 	const unsigned int MOV71   = 0x8213e000U; /* or    %o7, 0, %g1 */
49*a0c0dc61Sjoerg 	const unsigned int MOV17   = 0x9e106000U; /* or    %g1, 0, %o7 */
50*a0c0dc61Sjoerg 	const unsigned int CALL    = 0x40000000U; /* call  0 */
51*a0c0dc61Sjoerg 	const unsigned int SLLX    = 0x83287000U; /* sllx  %g1, 0, %g1 */
52*a0c0dc61Sjoerg 	const unsigned int NEG     = 0x82200001U; /* neg   %g1 */
53*a0c0dc61Sjoerg 	const unsigned int SETHIG5 = 0x0b000000U; /* sethi %hi(0), %g5 */
54*a0c0dc61Sjoerg 	const unsigned int ORG5    = 0x82104005U; /* or    %g1, %g5, %g1 */
55*a0c0dc61Sjoerg 
56*a0c0dc61Sjoerg 	unsigned int *where = (unsigned int *)where_;
57*a0c0dc61Sjoerg 	unsigned long value = (unsigned long)target;
58*a0c0dc61Sjoerg 	unsigned long offset = value - (unsigned long)where;
59*a0c0dc61Sjoerg 
60*a0c0dc61Sjoerg #define	HIVAL(v, s)	(((v) >> (s)) & 0x003fffffU)
61*a0c0dc61Sjoerg #define	LOVAL(v, s)	(((v) >> (s)) & 0x000003ffU)
62*a0c0dc61Sjoerg 	if (offset + 0x800000 <= 0x7ffffc) {
63*a0c0dc61Sjoerg 		/* Displacement is within 8MB, use a direct branch. */
64*a0c0dc61Sjoerg 		where[0] = BAA | ((offset >> 2) & 0x3fffff);
65*a0c0dc61Sjoerg 		__asm volatile("iflush %0+0" : : "r" (where));
66*a0c0dc61Sjoerg 		return;
67*a0c0dc61Sjoerg 	}
68*a0c0dc61Sjoerg 
69*a0c0dc61Sjoerg 	if (value <= 0xffffffffUL) {
70*a0c0dc61Sjoerg 		/*
71*a0c0dc61Sjoerg 		 * The absolute address is a 32bit value.
72*a0c0dc61Sjoerg 		 * This can be encoded as:
73*a0c0dc61Sjoerg 		 *	sethi	%hi(value), %g1
74*a0c0dc61Sjoerg 		 *	jmp	%g1+%lo(value)
75*a0c0dc61Sjoerg 		 */
76*a0c0dc61Sjoerg 		where[1] = JMP   | LOVAL(value, 0);
77*a0c0dc61Sjoerg 		__asm volatile("iflush %0+4" : : "r" (where));
78*a0c0dc61Sjoerg 		where[0] = SETHI | HIVAL(value, 10);
79*a0c0dc61Sjoerg 		__asm volatile("iflush %0+0" : : "r" (where));
80*a0c0dc61Sjoerg 		return;
81*a0c0dc61Sjoerg 	}
82*a0c0dc61Sjoerg 
83*a0c0dc61Sjoerg 	if (value >= 0xffffffff00000000UL) {
84*a0c0dc61Sjoerg 		/*
85*a0c0dc61Sjoerg 		 * The top 32bit address range can be encoded as:
86*a0c0dc61Sjoerg 		 *	sethi	%hix(addr), %g1
87*a0c0dc61Sjoerg 		 *	xor	%g1, %lox(addr), %g1
88*a0c0dc61Sjoerg 		 *	jmp	%g1
89*a0c0dc61Sjoerg 		 */
90*a0c0dc61Sjoerg 		where[2] = JMP;
91*a0c0dc61Sjoerg 		where[1] = XOR | (value & 0x00003ff) | 0x1c00;
92*a0c0dc61Sjoerg 		__asm volatile("iflush %0+4" : : "r" (where));
93*a0c0dc61Sjoerg 		__asm volatile("iflush %0+8" : : "r" (where));
94*a0c0dc61Sjoerg 		where[0] = SETHI | HIVAL(~value, 10);
95*a0c0dc61Sjoerg 		__asm volatile("iflush %0+0" : : "r" (where));
96*a0c0dc61Sjoerg 		return;
97*a0c0dc61Sjoerg 	}
98*a0c0dc61Sjoerg 
99*a0c0dc61Sjoerg 	if ((offset + 4) + 0x80000000UL <= 0x100000000UL) {
100*a0c0dc61Sjoerg 		/*
101*a0c0dc61Sjoerg 		 * Displacement of the second instruction is within
102*a0c0dc61Sjoerg 		 * +-2GB. This can use a direct call instruction:
103*a0c0dc61Sjoerg 		 *	mov	%o7, %g1
104*a0c0dc61Sjoerg 		 *	call	(value - .)
105*a0c0dc61Sjoerg 		 *	 mov	%g1, %o7
106*a0c0dc61Sjoerg 		 */
107*a0c0dc61Sjoerg 		where[1] = CALL | ((-(offset + 4)>> 2) & 0x3fffffffU);
108*a0c0dc61Sjoerg 		where[2] = MOV17;
109*a0c0dc61Sjoerg 		__asm volatile("iflush %0+4" : : "r" (where));
110*a0c0dc61Sjoerg 		__asm volatile("iflush %0+8" : : "r" (where));
111*a0c0dc61Sjoerg 		where[0] = MOV71;
112*a0c0dc61Sjoerg 		__asm volatile("iflush %0+0" : : "r" (where));
113*a0c0dc61Sjoerg 		return;
114*a0c0dc61Sjoerg 	}
115*a0c0dc61Sjoerg 
116*a0c0dc61Sjoerg 	if (value < 0x100000000000UL) {
117*a0c0dc61Sjoerg 		/*
118*a0c0dc61Sjoerg 		 * The absolute address is a 44bit value.
119*a0c0dc61Sjoerg 		 * This can be encoded as:
120*a0c0dc61Sjoerg 		 *	sethi	%h44(addr), %g1
121*a0c0dc61Sjoerg 		 *	or	%g1, %m44(addr), %g1
122*a0c0dc61Sjoerg 		 *	sllx	%g1, 12, %g1
123*a0c0dc61Sjoerg 		 *	jmp	%g1+%l44(addr)
124*a0c0dc61Sjoerg 		 */
125*a0c0dc61Sjoerg 		where[1] = OR    | (((value) >> 12) & 0x00001fff);
126*a0c0dc61Sjoerg 		where[2] = SLLX  | 12;
127*a0c0dc61Sjoerg 		where[3] = JMP   | LOVAL(value, 0);
128*a0c0dc61Sjoerg 		__asm volatile("iflush %0+4" : : "r" (where));
129*a0c0dc61Sjoerg 		__asm volatile("iflush %0+8" : : "r" (where));
130*a0c0dc61Sjoerg 		__asm volatile("iflush %0+12" : : "r" (where));
131*a0c0dc61Sjoerg 		where[0] = SETHI | HIVAL(value, 22);
132*a0c0dc61Sjoerg 		__asm volatile("iflush %0+0" : : "r" (where));
133*a0c0dc61Sjoerg 		return;
134*a0c0dc61Sjoerg 	}
135*a0c0dc61Sjoerg 
136*a0c0dc61Sjoerg 	if (value > 0xfffff00000000000UL) {
137*a0c0dc61Sjoerg 		/*
138*a0c0dc61Sjoerg 		 * The top 44bit address range can be encoded as:
139*a0c0dc61Sjoerg 		 *	sethi	%hi((-addr)>>12), %g1
140*a0c0dc61Sjoerg 		 *	or	%g1, %lo((-addr)>>12), %g1
141*a0c0dc61Sjoerg 		 *	neg	%g1
142*a0c0dc61Sjoerg 		 *	sllx	%g1, 12, %g1
143*a0c0dc61Sjoerg 		 *	jmp	%g1+(addr&0x0fff)
144*a0c0dc61Sjoerg 		 */
145*a0c0dc61Sjoerg 		unsigned long neg = (-value)>>12;
146*a0c0dc61Sjoerg 		where[1] = OR    | (LOVAL(neg, 0)+1);
147*a0c0dc61Sjoerg 		where[2] = NEG;
148*a0c0dc61Sjoerg 		where[3] = SLLX  | 12;
149*a0c0dc61Sjoerg 		where[4] = JMP   | (value & 0x0fff);
150*a0c0dc61Sjoerg 		__asm volatile("iflush %0+4" : : "r" (where));
151*a0c0dc61Sjoerg 		__asm volatile("iflush %0+8" : : "r" (where));
152*a0c0dc61Sjoerg 		__asm volatile("iflush %0+12" : : "r" (where));
153*a0c0dc61Sjoerg 		__asm volatile("iflush %0+16" : : "r" (where));
154*a0c0dc61Sjoerg 		where[0] = SETHI | HIVAL(neg, 10);
155*a0c0dc61Sjoerg 		__asm volatile("iflush %0+0" : : "r" (where));
156*a0c0dc61Sjoerg 		return;
157*a0c0dc61Sjoerg 	}
158*a0c0dc61Sjoerg 
159*a0c0dc61Sjoerg 	/*
160*a0c0dc61Sjoerg 	 * The general case of a 64bit address is encoded as:
161*a0c0dc61Sjoerg 	 *	sethi	%hh(addr), %g1
162*a0c0dc61Sjoerg 	 *	sethi	%lm(addr), %g5
163*a0c0dc61Sjoerg 	 *	or	%g1, %hm(addr), %g1
164*a0c0dc61Sjoerg 	 *	sllx	%g1, 32, %g1
165*a0c0dc61Sjoerg 	 *	or	%g1, %g5, %g1
166*a0c0dc61Sjoerg 	 *	jmp	%g1+%lo(addr)
167*a0c0dc61Sjoerg 	 */
168*a0c0dc61Sjoerg 	where[1] = SETHIG5 | HIVAL(value, 10);
169*a0c0dc61Sjoerg 	where[2] = OR      | LOVAL(value, 32);
170*a0c0dc61Sjoerg 	where[3] = SLLX    | 32;
171*a0c0dc61Sjoerg 	where[4] = ORG5;
172*a0c0dc61Sjoerg 	where[5] = JMP     | LOVAL(value, 0);
173*a0c0dc61Sjoerg 	__asm volatile("iflush %0+4" : : "r" (where));
174*a0c0dc61Sjoerg 	__asm volatile("iflush %0+8" : : "r" (where));
175*a0c0dc61Sjoerg 	__asm volatile("iflush %0+12" : : "r" (where));
176*a0c0dc61Sjoerg 	__asm volatile("iflush %0+16" : : "r" (where));
177*a0c0dc61Sjoerg 	__asm volatile("iflush %0+20" : : "r" (where));
178*a0c0dc61Sjoerg 	where[0] = SETHI   | HIVAL(value, 42);
179*a0c0dc61Sjoerg 	__asm volatile("iflush %0+0" : : "r" (where));
180*a0c0dc61Sjoerg #undef	HIVAL
181*a0c0dc61Sjoerg #undef	LOVAL
182*a0c0dc61Sjoerg }
183*a0c0dc61Sjoerg #else
184*a0c0dc61Sjoerg #include <sparc/elf_support.h>
185*a0c0dc61Sjoerg #endif
186*a0c0dc61Sjoerg #endif
187