1 /* { dg-do compile } */
2 /* { dg-skip-if "incompatible options" { arm*-*-* } { "-march=*" } { "-march=armv7-a" } } */
3 /* { dg-options "-O2 -fno-omit-frame-pointer -marm -march=armv7-a -mfpu=vfp3" } */
4 
5 struct event {
6  unsigned long long id;
7  unsigned int flag;
8 };
9 
dummy(void)10 void dummy(void)
11 {
12   /* This is here to ensure that the offset of perf_event_id below
13      relative to the LANCHOR symbol exceeds the allowed displacement.  */
14   static int __warned[300];
15  __warned[0] = 1;
16 }
17 
18 extern void *kmem_cache_alloc_trace (void *cachep);
19 extern void *cs_cachep;
20 extern int nr_cpu_ids;
21 
22 struct event *
event_alloc(int cpu)23 event_alloc (int cpu)
24 {
25  static unsigned long long __attribute__((aligned(8))) perf_event_id;
26  struct event *event;
27  unsigned long long result;
28  unsigned long tmp;
29 
30  if (cpu >= nr_cpu_ids)
31   return 0;
32 
33  event = kmem_cache_alloc_trace (cs_cachep);
34 
35  __asm__ __volatile__ ("dmb" : : : "memory");
36 
37  __asm__ __volatile__("@ atomic64_add_return\n"
38 "1:	ldrexd	%0, %H0, [%3]\n"
39 "	adds	%0, %0, %4\n"
40 "	adc	%H0, %H0, %H4\n"
41 "	strexd	%1, %0, %H0, [%3]\n"
42 "	teq	%1, #0\n"
43 "	bne	1b"
44  : "=&r" (result), "=&r" (tmp), "+Qo" (perf_event_id)
45  : "r" (&perf_event_id), "r" (1LL)
46  : "cc");
47 
48  __asm__ __volatile__ ("dmb" : : : "memory");
49 
50  event->id = result;
51 
52  if (cpu)
53   event->flag = 1;
54 
55  for (cpu = 0; cpu < nr_cpu_ids; cpu++)
56    kmem_cache_alloc_trace (cs_cachep);
57 
58  return event;
59 }
60 
61