xref: /linux/arch/arm/mach-mvebu/coherency_ll.S (revision 0be3ff0c)
1/*
2 * Coherency fabric: low level functions
3 *
4 * Copyright (C) 2012 Marvell
5 *
6 * Gregory CLEMENT <gregory.clement@free-electrons.com>
7 *
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2.  This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
11 *
12 * This file implements the assembly function to add a CPU to the
13 * coherency fabric. This function is called by each of the secondary
14 * CPUs during their early boot in an SMP kernel, this why this
15 * function have to callable from assembly. It can also be called by a
16 * primary CPU from C code during its boot.
17 */
18
19#include <linux/linkage.h>
20#define ARMADA_XP_CFB_CTL_REG_OFFSET 0x0
21#define ARMADA_XP_CFB_CFG_REG_OFFSET 0x4
22
23#include <asm/assembler.h>
24#include <asm/cp15.h>
25
26	.text
27/*
28 * Returns the coherency base address in r1 (r0 is untouched), or 0 if
29 * the coherency fabric is not enabled.
30 */
31ENTRY(ll_get_coherency_base)
32	mrc	p15, 0, r1, c1, c0, 0
33	tst	r1, #CR_M @ Check MMU bit enabled
34	bne	1f
35
36	/*
37	 * MMU is disabled, use the physical address of the coherency
38	 * base address, (or 0x0 if the coherency fabric is not mapped)
39	 */
40	adr	r1, 3f
41	ldr	r3, [r1]
42	ldr	r1, [r1, r3]
43	b	2f
441:
45	/*
46	 * MMU is enabled, use the virtual address of the coherency
47	 * base address.
48	 */
49	ldr	r1, =coherency_base
50	ldr	r1, [r1]
512:
52	ret	lr
53ENDPROC(ll_get_coherency_base)
54
55/*
56 * Returns the coherency CPU mask in r3 (r0 is untouched). This
57 * coherency CPU mask can be used with the coherency fabric
58 * configuration and control registers. Note that the mask is already
59 * endian-swapped as appropriate so that the calling functions do not
60 * have to care about endianness issues while accessing the coherency
61 * fabric registers
62 */
63ENTRY(ll_get_coherency_cpumask)
64	mrc	p15, 0, r3, cr0, cr0, 5
65	and	r3, r3, #15
66	mov	r2, #(1 << 24)
67	lsl	r3, r2, r3
68ARM_BE8(rev	r3, r3)
69	ret	lr
70ENDPROC(ll_get_coherency_cpumask)
71
72/*
73 * ll_add_cpu_to_smp_group(), ll_enable_coherency() and
74 * ll_disable_coherency() use the strex/ldrex instructions while the
75 * MMU can be disabled. The Armada XP SoC has an exclusive monitor
76 * that tracks transactions to Device and/or SO memory and thanks to
77 * that, exclusive transactions are functional even when the MMU is
78 * disabled.
79 */
80
81ENTRY(ll_add_cpu_to_smp_group)
82	/*
83	 * As r0 is not modified by ll_get_coherency_base() and
84	 * ll_get_coherency_cpumask(), we use it to temporarly save lr
85	 * and avoid it being modified by the branch and link
86	 * calls. This function is used very early in the secondary
87	 * CPU boot, and no stack is available at this point.
88	 */
89	mov 	r0, lr
90	bl	ll_get_coherency_base
91	/* Bail out if the coherency is not enabled */
92	cmp	r1, #0
93	reteq	r0
94	bl	ll_get_coherency_cpumask
95	mov 	lr, r0
96	add	r0, r1, #ARMADA_XP_CFB_CFG_REG_OFFSET
971:
98	ldrex	r2, [r0]
99	orr	r2, r2, r3
100	strex	r1, r2, [r0]
101	cmp	r1, #0
102	bne	1b
103	ret	lr
104ENDPROC(ll_add_cpu_to_smp_group)
105
106ENTRY(ll_enable_coherency)
107	/*
108	 * As r0 is not modified by ll_get_coherency_base() and
109	 * ll_get_coherency_cpumask(), we use it to temporarly save lr
110	 * and avoid it being modified by the branch and link
111	 * calls. This function is used very early in the secondary
112	 * CPU boot, and no stack is available at this point.
113	 */
114	mov r0, lr
115	bl	ll_get_coherency_base
116	/* Bail out if the coherency is not enabled */
117	cmp	r1, #0
118	reteq	r0
119	bl	ll_get_coherency_cpumask
120	mov lr, r0
121	add	r0, r1, #ARMADA_XP_CFB_CTL_REG_OFFSET
1221:
123	ldrex	r2, [r0]
124	orr	r2, r2, r3
125	strex	r1, r2, [r0]
126	cmp	r1, #0
127	bne	1b
128	dsb
129	mov	r0, #0
130	ret	lr
131ENDPROC(ll_enable_coherency)
132
133ENTRY(ll_disable_coherency)
134	/*
135	 * As r0 is not modified by ll_get_coherency_base() and
136	 * ll_get_coherency_cpumask(), we use it to temporarly save lr
137	 * and avoid it being modified by the branch and link
138	 * calls. This function is used very early in the secondary
139	 * CPU boot, and no stack is available at this point.
140	 */
141	mov 	r0, lr
142	bl	ll_get_coherency_base
143	/* Bail out if the coherency is not enabled */
144	cmp	r1, #0
145	reteq	r0
146	bl	ll_get_coherency_cpumask
147	mov 	lr, r0
148	add	r0, r1, #ARMADA_XP_CFB_CTL_REG_OFFSET
1491:
150	ldrex	r2, [r0]
151	bic	r2, r2, r3
152	strex	r1, r2, [r0]
153	cmp	r1, #0
154	bne	1b
155	dsb
156	ret	lr
157ENDPROC(ll_disable_coherency)
158
159	.align 2
1603:
161	.long	coherency_phys_base - .
162