1 /* { dg-do compile } */
2 /* { dg-skip-if "incompatible options" { arm*-*-* } { "-march=*" } { "-march=armv7-a" } } */
3 /* { dg-skip-if "-mpure-code supports M-profile only" { *-*-* } { "-mpure-code" } } */
4 /* { dg-options "-O2 -fno-omit-frame-pointer -marm -march=armv7-a -mfpu=vfp3" } */
5 
6 struct event {
7  unsigned long long id;
8  unsigned int flag;
9 };
10 
dummy(void)11 void dummy(void)
12 {
13   /* This is here to ensure that the offset of perf_event_id below
14      relative to the LANCHOR symbol exceeds the allowed displacement.  */
15   static int __warned[300];
16  __warned[0] = 1;
17 }
18 
19 extern void *kmem_cache_alloc_trace (void *cachep);
20 extern void *cs_cachep;
21 extern int nr_cpu_ids;
22 
23 struct event *
event_alloc(int cpu)24 event_alloc (int cpu)
25 {
26  static unsigned long long __attribute__((aligned(8))) perf_event_id;
27  struct event *event;
28  unsigned long long result;
29  unsigned long tmp;
30 
31  if (cpu >= nr_cpu_ids)
32   return 0;
33 
34  event = kmem_cache_alloc_trace (cs_cachep);
35 
36  __asm__ __volatile__ ("dmb" : : : "memory");
37 
38  __asm__ __volatile__("@ atomic64_add_return\n"
39 "1:	ldrexd	%0, %H0, [%3]\n"
40 "	adds	%0, %0, %4\n"
41 "	adc	%H0, %H0, %H4\n"
42 "	strexd	%1, %0, %H0, [%3]\n"
43 "	teq	%1, #0\n"
44 "	bne	1b"
45  : "=&r" (result), "=&r" (tmp), "+Qo" (perf_event_id)
46  : "r" (&perf_event_id), "r" (1LL)
47  : "cc");
48 
49  __asm__ __volatile__ ("dmb" : : : "memory");
50 
51  event->id = result;
52 
53  if (cpu)
54   event->flag = 1;
55 
56  for (cpu = 0; cpu < nr_cpu_ids; cpu++)
57    kmem_cache_alloc_trace (cs_cachep);
58 
59  return event;
60 }
61 
62