1 #define _GNU_SOURCE 1
2
3 #include <assert.h>
4 #include <stdlib.h>
5 #include <signal.h>
6 #include <endian.h>
7
8
9 char x[32] __attribute__((aligned(16))) = {
10 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
11 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10,
12 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
13 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20,
14 };
15 void * volatile p = (void *)&x + 15;
16
sigbus(int sig,siginfo_t * info,void * uc)17 void sigbus(int sig, siginfo_t *info, void *uc)
18 {
19 assert(sig == SIGBUS);
20 assert(info->si_signo == SIGBUS);
21 #ifdef BUS_ADRALN
22 assert(info->si_code == BUS_ADRALN);
23 #endif
24 assert(info->si_addr == p);
25 exit(EXIT_SUCCESS);
26 }
27
main()28 int main()
29 {
30 struct sigaction sa = {
31 .sa_sigaction = sigbus,
32 .sa_flags = SA_SIGINFO
33 };
34 int allow_fail = 0;
35 int tmp;
36
37 tmp = sigaction(SIGBUS, &sa, NULL);
38 assert(tmp == 0);
39
40 /*
41 * Select an operation that's likely to enforce alignment.
42 * On many guests that support unaligned accesses by default,
43 * this is often an atomic operation.
44 */
45 #if defined(__aarch64__)
46 asm volatile("ldxr %w0,[%1]" : "=r"(tmp) : "r"(p) : "memory");
47 #elif defined(__alpha__)
48 asm volatile("ldl_l %0,0(%1)" : "=r"(tmp) : "r"(p) : "memory");
49 #elif defined(__arm__)
50 asm volatile("ldrex %0,[%1]" : "=r"(tmp) : "r"(p) : "memory");
51 #elif defined(__powerpc__)
52 asm volatile("lwarx %0,0,%1" : "=r"(tmp) : "r"(p) : "memory");
53 #elif defined(__riscv_atomic)
54 asm volatile("lr.w %0,(%1)" : "=r"(tmp) : "r"(p) : "memory");
55 #else
56 /* No insn known to fault unaligned -- try for a straight load. */
57 allow_fail = 1;
58 tmp = *(volatile int *)p;
59 #endif
60
61 assert(allow_fail);
62
63 /*
64 * We didn't see a signal.
65 * We might as well validate the unaligned load worked.
66 */
67 if (BYTE_ORDER == LITTLE_ENDIAN) {
68 assert(tmp == 0x13121110);
69 } else {
70 assert(tmp == 0x10111213);
71 }
72 return EXIT_SUCCESS;
73 }
74