1*5d9d9091SRichard Lowe/* 2*5d9d9091SRichard Lowe * CDDL HEADER START 3*5d9d9091SRichard Lowe * 4*5d9d9091SRichard Lowe * The contents of this file are subject to the terms of the 5*5d9d9091SRichard Lowe * Common Development and Distribution License (the "License"). 6*5d9d9091SRichard Lowe * You may not use this file except in compliance with the License. 7*5d9d9091SRichard Lowe * 8*5d9d9091SRichard Lowe * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9*5d9d9091SRichard Lowe * or http://www.opensolaris.org/os/licensing. 10*5d9d9091SRichard Lowe * See the License for the specific language governing permissions 11*5d9d9091SRichard Lowe * and limitations under the License. 12*5d9d9091SRichard Lowe * 13*5d9d9091SRichard Lowe * When distributing Covered Code, include this CDDL HEADER in each 14*5d9d9091SRichard Lowe * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15*5d9d9091SRichard Lowe * If applicable, add the following below this CDDL HEADER, with the 16*5d9d9091SRichard Lowe * fields enclosed by brackets "[]" replaced with your own identifying 17*5d9d9091SRichard Lowe * information: Portions Copyright [yyyy] [name of copyright owner] 18*5d9d9091SRichard Lowe * 19*5d9d9091SRichard Lowe * CDDL HEADER END 20*5d9d9091SRichard Lowe */ 21*5d9d9091SRichard Lowe 22*5d9d9091SRichard Lowe/* 23*5d9d9091SRichard Lowe * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24*5d9d9091SRichard Lowe * Use is subject to license terms. 25*5d9d9091SRichard Lowe */ 26*5d9d9091SRichard Lowe 27*5d9d9091SRichard Lowe .file "atomic.s" 28*5d9d9091SRichard Lowe 29*5d9d9091SRichard Lowe#include <sys/asm_linkage.h> 30*5d9d9091SRichard Lowe 31*5d9d9091SRichard Lowe/* 32*5d9d9091SRichard Lowe * ATOMIC_BO_ENABLE_SHIFT can be selectively defined by processors 33*5d9d9091SRichard Lowe * to enable exponential backoff. No definition means backoff is 34*5d9d9091SRichard Lowe * not desired i.e. backoff should be disabled. 35*5d9d9091SRichard Lowe * By default, the shift value is used to generate a power of 2 36*5d9d9091SRichard Lowe * value for backoff limit. In the kernel, processors scale this 37*5d9d9091SRichard Lowe * shift value with the number of online cpus. 38*5d9d9091SRichard Lowe */ 39*5d9d9091SRichard Lowe 40*5d9d9091SRichard Lowe#if defined(_KERNEL) 41*5d9d9091SRichard Lowe /* 42*5d9d9091SRichard Lowe * Legacy kernel interfaces; they will go away the moment our closed 43*5d9d9091SRichard Lowe * bins no longer require them. 44*5d9d9091SRichard Lowe */ 45*5d9d9091SRichard Lowe ANSI_PRAGMA_WEAK2(cas8,atomic_cas_8,function) 46*5d9d9091SRichard Lowe ANSI_PRAGMA_WEAK2(cas32,atomic_cas_32,function) 47*5d9d9091SRichard Lowe ANSI_PRAGMA_WEAK2(cas64,atomic_cas_64,function) 48*5d9d9091SRichard Lowe ANSI_PRAGMA_WEAK2(caslong,atomic_cas_ulong,function) 49*5d9d9091SRichard Lowe ANSI_PRAGMA_WEAK2(casptr,atomic_cas_ptr,function) 50*5d9d9091SRichard Lowe ANSI_PRAGMA_WEAK2(atomic_and_long,atomic_and_ulong,function) 51*5d9d9091SRichard Lowe ANSI_PRAGMA_WEAK2(atomic_or_long,atomic_or_ulong,function) 52*5d9d9091SRichard Lowe ANSI_PRAGMA_WEAK2(swapl,atomic_swap_32,function) 53*5d9d9091SRichard Lowe 54*5d9d9091SRichard Lowe#ifdef ATOMIC_BO_ENABLE_SHIFT 55*5d9d9091SRichard Lowe 56*5d9d9091SRichard Lowe#if !defined(lint) 57*5d9d9091SRichard Lowe .weak cpu_atomic_delay 58*5d9d9091SRichard Lowe .type cpu_atomic_delay, #function 59*5d9d9091SRichard Lowe#endif /* lint */ 60*5d9d9091SRichard Lowe 61*5d9d9091SRichard Lowe/* 62*5d9d9091SRichard Lowe * For the kernel, invoke processor specific delay routine to perform 63*5d9d9091SRichard Lowe * low-impact spin delay. The value of ATOMIC_BO_ENABLE_SHIFT is tuned 64*5d9d9091SRichard Lowe * with respect to the specific spin delay implementation. 65*5d9d9091SRichard Lowe */ 66*5d9d9091SRichard Lowe#define DELAY_SPIN(label, tmp1, tmp2) \ 67*5d9d9091SRichard Lowe /* ; \ 68*5d9d9091SRichard Lowe * Define a pragma weak reference to a cpu specific ; \ 69*5d9d9091SRichard Lowe * delay routine for atomic backoff. For CPUs that ; \ 70*5d9d9091SRichard Lowe * have no such delay routine defined, the delay becomes ; \ 71*5d9d9091SRichard Lowe * just a simple tight loop. ; \ 72*5d9d9091SRichard Lowe * ; \ 73*5d9d9091SRichard Lowe * tmp1 = holds CPU specific delay routine ; \ 74*5d9d9091SRichard Lowe * tmp2 = holds atomic routine's callee return address ; \ 75*5d9d9091SRichard Lowe */ ; \ 76*5d9d9091SRichard Lowe sethi %hi(cpu_atomic_delay), tmp1 ; \ 77*5d9d9091SRichard Lowe or tmp1, %lo(cpu_atomic_delay), tmp1 ; \ 78*5d9d9091SRichard Lowelabel##0: ; \ 79*5d9d9091SRichard Lowe brz,pn tmp1, label##1 ; \ 80*5d9d9091SRichard Lowe mov %o7, tmp2 ; \ 81*5d9d9091SRichard Lowe jmpl tmp1, %o7 /* call CPU specific delay routine */ ; \ 82*5d9d9091SRichard Lowe nop /* delay slot : do nothing */ ; \ 83*5d9d9091SRichard Lowe mov tmp2, %o7 /* restore callee's return address */ ; \ 84*5d9d9091SRichard Lowelabel##1: 85*5d9d9091SRichard Lowe 86*5d9d9091SRichard Lowe/* 87*5d9d9091SRichard Lowe * For the kernel, we take into consideration of cas failures 88*5d9d9091SRichard Lowe * and also scale the backoff limit w.r.t. the number of cpus. 89*5d9d9091SRichard Lowe * For cas failures, we reset the backoff value to 1 if the cas 90*5d9d9091SRichard Lowe * failures exceed or equal to the number of online cpus. This 91*5d9d9091SRichard Lowe * will enforce some degree of fairness and prevent starvation. 92*5d9d9091SRichard Lowe * We also scale/normalize the processor provided specific 93*5d9d9091SRichard Lowe * ATOMIC_BO_ENABLE_SHIFT w.r.t. the number of online cpus to 94*5d9d9091SRichard Lowe * obtain the actual final limit to use. 95*5d9d9091SRichard Lowe */ 96*5d9d9091SRichard Lowe#define ATOMIC_BACKOFF_CPU(val, limit, ncpu, cas_cnt, label) \ 97*5d9d9091SRichard Lowe brnz,pt ncpu, label##0 ; \ 98*5d9d9091SRichard Lowe inc cas_cnt ; \ 99*5d9d9091SRichard Lowe sethi %hi(ncpus_online), ncpu ; \ 100*5d9d9091SRichard Lowe ld [ncpu + %lo(ncpus_online)], ncpu ; \ 101*5d9d9091SRichard Lowelabel##0: ; \ 102*5d9d9091SRichard Lowe cmp cas_cnt, ncpu ; \ 103*5d9d9091SRichard Lowe blu,pt %xcc, label##1 ; \ 104*5d9d9091SRichard Lowe sllx ncpu, ATOMIC_BO_ENABLE_SHIFT, limit ; \ 105*5d9d9091SRichard Lowe mov %g0, cas_cnt ; \ 106*5d9d9091SRichard Lowe mov 1, val ; \ 107*5d9d9091SRichard Lowelabel##1: 108*5d9d9091SRichard Lowe#endif /* ATOMIC_BO_ENABLE_SHIFT */ 109*5d9d9091SRichard Lowe 110*5d9d9091SRichard Lowe#else /* _KERNEL */ 111*5d9d9091SRichard Lowe 112*5d9d9091SRichard Lowe/* 113*5d9d9091SRichard Lowe * ATOMIC_BO_ENABLE_SHIFT may be enabled/defined here for generic 114*5d9d9091SRichard Lowe * libc atomics. None for now. 115*5d9d9091SRichard Lowe */ 116*5d9d9091SRichard Lowe#ifdef ATOMIC_BO_ENABLE_SHIFT 117*5d9d9091SRichard Lowe#define DELAY_SPIN(label, tmp1, tmp2) \ 118*5d9d9091SRichard Lowelabel##0: 119*5d9d9091SRichard Lowe 120*5d9d9091SRichard Lowe#define ATOMIC_BACKOFF_CPU(val, limit, ncpu, cas_cnt, label) \ 121*5d9d9091SRichard Lowe set 1 << ATOMIC_BO_ENABLE_SHIFT, limit 122*5d9d9091SRichard Lowe#endif /* ATOMIC_BO_ENABLE_SHIFT */ 123*5d9d9091SRichard Lowe#endif /* _KERNEL */ 124*5d9d9091SRichard Lowe 125*5d9d9091SRichard Lowe#ifdef ATOMIC_BO_ENABLE_SHIFT 126*5d9d9091SRichard Lowe/* 127*5d9d9091SRichard Lowe * ATOMIC_BACKOFF_INIT macro for initialization. 128*5d9d9091SRichard Lowe * backoff val is initialized to 1. 129*5d9d9091SRichard Lowe * ncpu is initialized to 0 130*5d9d9091SRichard Lowe * The cas_cnt counts the cas instruction failure and is 131*5d9d9091SRichard Lowe * initialized to 0. 132*5d9d9091SRichard Lowe */ 133*5d9d9091SRichard Lowe#define ATOMIC_BACKOFF_INIT(val, ncpu, cas_cnt) \ 134*5d9d9091SRichard Lowe mov 1, val ; \ 135*5d9d9091SRichard Lowe mov %g0, ncpu ; \ 136*5d9d9091SRichard Lowe mov %g0, cas_cnt 137*5d9d9091SRichard Lowe 138*5d9d9091SRichard Lowe#define ATOMIC_BACKOFF_BRANCH(cr, backoff, loop) \ 139*5d9d9091SRichard Lowe bne,a,pn cr, backoff 140*5d9d9091SRichard Lowe 141*5d9d9091SRichard Lowe/* 142*5d9d9091SRichard Lowe * Main ATOMIC_BACKOFF_BACKOFF macro for backoff. 143*5d9d9091SRichard Lowe */ 144*5d9d9091SRichard Lowe#define ATOMIC_BACKOFF_BACKOFF(val, limit, ncpu, cas_cnt, label, retlabel) \ 145*5d9d9091SRichard Lowe ATOMIC_BACKOFF_CPU(val, limit, ncpu, cas_cnt, label##_0) ; \ 146*5d9d9091SRichard Lowe cmp val, limit ; \ 147*5d9d9091SRichard Lowe blu,a,pt %xcc, label##_1 ; \ 148*5d9d9091SRichard Lowe mov val, limit ; \ 149*5d9d9091SRichard Lowelabel##_1: ; \ 150*5d9d9091SRichard Lowe mov limit, val ; \ 151*5d9d9091SRichard Lowe DELAY_SPIN(label##_2, %g2, %g3) ; \ 152*5d9d9091SRichard Lowe deccc limit ; \ 153*5d9d9091SRichard Lowe bgu,pn %xcc, label##_20 /* branch to middle of DELAY_SPIN */ ; \ 154*5d9d9091SRichard Lowe nop ; \ 155*5d9d9091SRichard Lowe ba retlabel ; \ 156*5d9d9091SRichard Lowe sllx val, 1, val 157*5d9d9091SRichard Lowe 158*5d9d9091SRichard Lowe#else /* ATOMIC_BO_ENABLE_SHIFT */ 159*5d9d9091SRichard Lowe#define ATOMIC_BACKOFF_INIT(val, ncpu, cas_cnt) 160*5d9d9091SRichard Lowe 161*5d9d9091SRichard Lowe#define ATOMIC_BACKOFF_BRANCH(cr, backoff, loop) \ 162*5d9d9091SRichard Lowe bne,a,pn cr, loop 163*5d9d9091SRichard Lowe 164*5d9d9091SRichard Lowe#define ATOMIC_BACKOFF_BACKOFF(val, limit, ncpu, cas_cnt, label, retlabel) 165*5d9d9091SRichard Lowe#endif /* ATOMIC_BO_ENABLE_SHIFT */ 166*5d9d9091SRichard Lowe 167*5d9d9091SRichard Lowe /* 168*5d9d9091SRichard Lowe * NOTE: If atomic_inc_8 and atomic_inc_8_nv are ever 169*5d9d9091SRichard Lowe * separated, you need to also edit the libc sparcv9 platform 170*5d9d9091SRichard Lowe * specific mapfile and remove the NODYNSORT attribute 171*5d9d9091SRichard Lowe * from atomic_inc_8_nv. 172*5d9d9091SRichard Lowe */ 173*5d9d9091SRichard Lowe ENTRY(atomic_inc_8) 174*5d9d9091SRichard Lowe ALTENTRY(atomic_inc_8_nv) 175*5d9d9091SRichard Lowe ALTENTRY(atomic_inc_uchar) 176*5d9d9091SRichard Lowe ALTENTRY(atomic_inc_uchar_nv) 177*5d9d9091SRichard Lowe ba add_8 178*5d9d9091SRichard Lowe add %g0, 1, %o1 179*5d9d9091SRichard Lowe SET_SIZE(atomic_inc_uchar_nv) 180*5d9d9091SRichard Lowe SET_SIZE(atomic_inc_uchar) 181*5d9d9091SRichard Lowe SET_SIZE(atomic_inc_8_nv) 182*5d9d9091SRichard Lowe SET_SIZE(atomic_inc_8) 183*5d9d9091SRichard Lowe 184*5d9d9091SRichard Lowe /* 185*5d9d9091SRichard Lowe * NOTE: If atomic_dec_8 and atomic_dec_8_nv are ever 186*5d9d9091SRichard Lowe * separated, you need to also edit the libc sparcv9 platform 187*5d9d9091SRichard Lowe * specific mapfile and remove the NODYNSORT attribute 188*5d9d9091SRichard Lowe * from atomic_dec_8_nv. 189*5d9d9091SRichard Lowe */ 190*5d9d9091SRichard Lowe ENTRY(atomic_dec_8) 191*5d9d9091SRichard Lowe ALTENTRY(atomic_dec_8_nv) 192*5d9d9091SRichard Lowe ALTENTRY(atomic_dec_uchar) 193*5d9d9091SRichard Lowe ALTENTRY(atomic_dec_uchar_nv) 194*5d9d9091SRichard Lowe ba add_8 195*5d9d9091SRichard Lowe sub %g0, 1, %o1 196*5d9d9091SRichard Lowe SET_SIZE(atomic_dec_uchar_nv) 197*5d9d9091SRichard Lowe SET_SIZE(atomic_dec_uchar) 198*5d9d9091SRichard Lowe SET_SIZE(atomic_dec_8_nv) 199*5d9d9091SRichard Lowe SET_SIZE(atomic_dec_8) 200*5d9d9091SRichard Lowe 201*5d9d9091SRichard Lowe /* 202*5d9d9091SRichard Lowe * NOTE: If atomic_add_8 and atomic_add_8_nv are ever 203*5d9d9091SRichard Lowe * separated, you need to also edit the libc sparcv9 platform 204*5d9d9091SRichard Lowe * specific mapfile and remove the NODYNSORT attribute 205*5d9d9091SRichard Lowe * from atomic_add_8_nv. 206*5d9d9091SRichard Lowe */ 207*5d9d9091SRichard Lowe ENTRY(atomic_add_8) 208*5d9d9091SRichard Lowe ALTENTRY(atomic_add_8_nv) 209*5d9d9091SRichard Lowe ALTENTRY(atomic_add_char) 210*5d9d9091SRichard Lowe ALTENTRY(atomic_add_char_nv) 211*5d9d9091SRichard Loweadd_8: 212*5d9d9091SRichard Lowe and %o0, 0x3, %o4 ! %o4 = byte offset, left-to-right 213*5d9d9091SRichard Lowe xor %o4, 0x3, %g1 ! %g1 = byte offset, right-to-left 214*5d9d9091SRichard Lowe sll %g1, 3, %g1 ! %g1 = bit offset, right-to-left 215*5d9d9091SRichard Lowe set 0xff, %o3 ! %o3 = mask 216*5d9d9091SRichard Lowe sll %o3, %g1, %o3 ! %o3 = shifted to bit offset 217*5d9d9091SRichard Lowe sll %o1, %g1, %o1 ! %o1 = shifted to bit offset 218*5d9d9091SRichard Lowe and %o1, %o3, %o1 ! %o1 = single byte value 219*5d9d9091SRichard Lowe andn %o0, 0x3, %o0 ! %o0 = word address 220*5d9d9091SRichard Lowe ld [%o0], %o2 ! read old value 221*5d9d9091SRichard Lowe1: 222*5d9d9091SRichard Lowe add %o2, %o1, %o5 ! add value to the old value 223*5d9d9091SRichard Lowe and %o5, %o3, %o5 ! clear other bits 224*5d9d9091SRichard Lowe andn %o2, %o3, %o4 ! clear target bits 225*5d9d9091SRichard Lowe or %o4, %o5, %o5 ! insert the new value 226*5d9d9091SRichard Lowe cas [%o0], %o2, %o5 227*5d9d9091SRichard Lowe cmp %o2, %o5 228*5d9d9091SRichard Lowe bne,a,pn %icc, 1b 229*5d9d9091SRichard Lowe mov %o5, %o2 ! %o2 = old value 230*5d9d9091SRichard Lowe add %o2, %o1, %o5 231*5d9d9091SRichard Lowe and %o5, %o3, %o5 232*5d9d9091SRichard Lowe retl 233*5d9d9091SRichard Lowe srl %o5, %g1, %o0 ! %o0 = new value 234*5d9d9091SRichard Lowe SET_SIZE(atomic_add_char_nv) 235*5d9d9091SRichard Lowe SET_SIZE(atomic_add_char) 236*5d9d9091SRichard Lowe SET_SIZE(atomic_add_8_nv) 237*5d9d9091SRichard Lowe SET_SIZE(atomic_add_8) 238*5d9d9091SRichard Lowe 239*5d9d9091SRichard Lowe /* 240*5d9d9091SRichard Lowe * NOTE: If atomic_inc_16 and atomic_inc_16_nv are ever 241*5d9d9091SRichard Lowe * separated, you need to also edit the libc sparcv9 platform 242*5d9d9091SRichard Lowe * specific mapfile and remove the NODYNSORT attribute 243*5d9d9091SRichard Lowe * from atomic_inc_16_nv. 244*5d9d9091SRichard Lowe */ 245*5d9d9091SRichard Lowe ENTRY(atomic_inc_16) 246*5d9d9091SRichard Lowe ALTENTRY(atomic_inc_16_nv) 247*5d9d9091SRichard Lowe ALTENTRY(atomic_inc_ushort) 248*5d9d9091SRichard Lowe ALTENTRY(atomic_inc_ushort_nv) 249*5d9d9091SRichard Lowe ba add_16 250*5d9d9091SRichard Lowe add %g0, 1, %o1 251*5d9d9091SRichard Lowe SET_SIZE(atomic_inc_ushort_nv) 252*5d9d9091SRichard Lowe SET_SIZE(atomic_inc_ushort) 253*5d9d9091SRichard Lowe SET_SIZE(atomic_inc_16_nv) 254*5d9d9091SRichard Lowe SET_SIZE(atomic_inc_16) 255*5d9d9091SRichard Lowe 256*5d9d9091SRichard Lowe /* 257*5d9d9091SRichard Lowe * NOTE: If atomic_dec_16 and atomic_dec_16_nv are ever 258*5d9d9091SRichard Lowe * separated, you need to also edit the libc sparcv9 platform 259*5d9d9091SRichard Lowe * specific mapfile and remove the NODYNSORT attribute 260*5d9d9091SRichard Lowe * from atomic_dec_16_nv. 261*5d9d9091SRichard Lowe */ 262*5d9d9091SRichard Lowe ENTRY(atomic_dec_16) 263*5d9d9091SRichard Lowe ALTENTRY(atomic_dec_16_nv) 264*5d9d9091SRichard Lowe ALTENTRY(atomic_dec_ushort) 265*5d9d9091SRichard Lowe ALTENTRY(atomic_dec_ushort_nv) 266*5d9d9091SRichard Lowe ba add_16 267*5d9d9091SRichard Lowe sub %g0, 1, %o1 268*5d9d9091SRichard Lowe SET_SIZE(atomic_dec_ushort_nv) 269*5d9d9091SRichard Lowe SET_SIZE(atomic_dec_ushort) 270*5d9d9091SRichard Lowe SET_SIZE(atomic_dec_16_nv) 271*5d9d9091SRichard Lowe SET_SIZE(atomic_dec_16) 272*5d9d9091SRichard Lowe 273*5d9d9091SRichard Lowe /* 274*5d9d9091SRichard Lowe * NOTE: If atomic_add_16 and atomic_add_16_nv are ever 275*5d9d9091SRichard Lowe * separated, you need to also edit the libc sparcv9 platform 276*5d9d9091SRichard Lowe * specific mapfile and remove the NODYNSORT attribute 277*5d9d9091SRichard Lowe * from atomic_add_16_nv. 278*5d9d9091SRichard Lowe */ 279*5d9d9091SRichard Lowe ENTRY(atomic_add_16) 280*5d9d9091SRichard Lowe ALTENTRY(atomic_add_16_nv) 281*5d9d9091SRichard Lowe ALTENTRY(atomic_add_short) 282*5d9d9091SRichard Lowe ALTENTRY(atomic_add_short_nv) 283*5d9d9091SRichard Loweadd_16: 284*5d9d9091SRichard Lowe and %o0, 0x2, %o4 ! %o4 = byte offset, left-to-right 285*5d9d9091SRichard Lowe xor %o4, 0x2, %g1 ! %g1 = byte offset, right-to-left 286*5d9d9091SRichard Lowe sll %o4, 3, %o4 ! %o4 = bit offset, left-to-right 287*5d9d9091SRichard Lowe sll %g1, 3, %g1 ! %g1 = bit offset, right-to-left 288*5d9d9091SRichard Lowe sethi %hi(0xffff0000), %o3 ! %o3 = mask 289*5d9d9091SRichard Lowe srl %o3, %o4, %o3 ! %o3 = shifted to bit offset 290*5d9d9091SRichard Lowe sll %o1, %g1, %o1 ! %o1 = shifted to bit offset 291*5d9d9091SRichard Lowe and %o1, %o3, %o1 ! %o1 = single short value 292*5d9d9091SRichard Lowe andn %o0, 0x2, %o0 ! %o0 = word address 293*5d9d9091SRichard Lowe ! if low-order bit is 1, we will properly get an alignment fault here 294*5d9d9091SRichard Lowe ld [%o0], %o2 ! read old value 295*5d9d9091SRichard Lowe1: 296*5d9d9091SRichard Lowe add %o1, %o2, %o5 ! add value to the old value 297*5d9d9091SRichard Lowe and %o5, %o3, %o5 ! clear other bits 298*5d9d9091SRichard Lowe andn %o2, %o3, %o4 ! clear target bits 299*5d9d9091SRichard Lowe or %o4, %o5, %o5 ! insert the new value 300*5d9d9091SRichard Lowe cas [%o0], %o2, %o5 301*5d9d9091SRichard Lowe cmp %o2, %o5 302*5d9d9091SRichard Lowe bne,a,pn %icc, 1b 303*5d9d9091SRichard Lowe mov %o5, %o2 ! %o2 = old value 304*5d9d9091SRichard Lowe add %o1, %o2, %o5 305*5d9d9091SRichard Lowe and %o5, %o3, %o5 306*5d9d9091SRichard Lowe retl 307*5d9d9091SRichard Lowe srl %o5, %g1, %o0 ! %o0 = new value 308*5d9d9091SRichard Lowe SET_SIZE(atomic_add_short_nv) 309*5d9d9091SRichard Lowe SET_SIZE(atomic_add_short) 310*5d9d9091SRichard Lowe SET_SIZE(atomic_add_16_nv) 311*5d9d9091SRichard Lowe SET_SIZE(atomic_add_16) 312*5d9d9091SRichard Lowe 313*5d9d9091SRichard Lowe /* 314*5d9d9091SRichard Lowe * NOTE: If atomic_inc_32 and atomic_inc_32_nv are ever 315*5d9d9091SRichard Lowe * separated, you need to also edit the libc sparcv9 platform 316*5d9d9091SRichard Lowe * specific mapfile and remove the NODYNSORT attribute 317*5d9d9091SRichard Lowe * from atomic_inc_32_nv. 318*5d9d9091SRichard Lowe */ 319*5d9d9091SRichard Lowe ENTRY(atomic_inc_32) 320*5d9d9091SRichard Lowe ALTENTRY(atomic_inc_32_nv) 321*5d9d9091SRichard Lowe ALTENTRY(atomic_inc_uint) 322*5d9d9091SRichard Lowe ALTENTRY(atomic_inc_uint_nv) 323*5d9d9091SRichard Lowe ba add_32 324*5d9d9091SRichard Lowe add %g0, 1, %o1 325*5d9d9091SRichard Lowe SET_SIZE(atomic_inc_uint_nv) 326*5d9d9091SRichard Lowe SET_SIZE(atomic_inc_uint) 327*5d9d9091SRichard Lowe SET_SIZE(atomic_inc_32_nv) 328*5d9d9091SRichard Lowe SET_SIZE(atomic_inc_32) 329*5d9d9091SRichard Lowe 330*5d9d9091SRichard Lowe /* 331*5d9d9091SRichard Lowe * NOTE: If atomic_dec_32 and atomic_dec_32_nv are ever 332*5d9d9091SRichard Lowe * separated, you need to also edit the libc sparcv9 platform 333*5d9d9091SRichard Lowe * specific mapfile and remove the NODYNSORT attribute 334*5d9d9091SRichard Lowe * from atomic_dec_32_nv. 335*5d9d9091SRichard Lowe */ 336*5d9d9091SRichard Lowe ENTRY(atomic_dec_32) 337*5d9d9091SRichard Lowe ALTENTRY(atomic_dec_32_nv) 338*5d9d9091SRichard Lowe ALTENTRY(atomic_dec_uint) 339*5d9d9091SRichard Lowe ALTENTRY(atomic_dec_uint_nv) 340*5d9d9091SRichard Lowe ba add_32 341*5d9d9091SRichard Lowe sub %g0, 1, %o1 342*5d9d9091SRichard Lowe SET_SIZE(atomic_dec_uint_nv) 343*5d9d9091SRichard Lowe SET_SIZE(atomic_dec_uint) 344*5d9d9091SRichard Lowe SET_SIZE(atomic_dec_32_nv) 345*5d9d9091SRichard Lowe SET_SIZE(atomic_dec_32) 346*5d9d9091SRichard Lowe 347*5d9d9091SRichard Lowe /* 348*5d9d9091SRichard Lowe * NOTE: If atomic_add_32 and atomic_add_32_nv are ever 349*5d9d9091SRichard Lowe * separated, you need to also edit the libc sparcv9 platform 350*5d9d9091SRichard Lowe * specific mapfile and remove the NODYNSORT attribute 351*5d9d9091SRichard Lowe * from atomic_add_32_nv. 352*5d9d9091SRichard Lowe */ 353*5d9d9091SRichard Lowe ENTRY(atomic_add_32) 354*5d9d9091SRichard Lowe ALTENTRY(atomic_add_32_nv) 355*5d9d9091SRichard Lowe ALTENTRY(atomic_add_int) 356*5d9d9091SRichard Lowe ALTENTRY(atomic_add_int_nv) 357*5d9d9091SRichard Loweadd_32: 358*5d9d9091SRichard Lowe ATOMIC_BACKOFF_INIT(%o4, %g4, %g5) 359*5d9d9091SRichard Lowe0: 360*5d9d9091SRichard Lowe ld [%o0], %o2 361*5d9d9091SRichard Lowe1: 362*5d9d9091SRichard Lowe add %o2, %o1, %o3 363*5d9d9091SRichard Lowe cas [%o0], %o2, %o3 364*5d9d9091SRichard Lowe cmp %o2, %o3 365*5d9d9091SRichard Lowe ATOMIC_BACKOFF_BRANCH(%icc, 2f, 1b) 366*5d9d9091SRichard Lowe mov %o3, %o2 367*5d9d9091SRichard Lowe retl 368*5d9d9091SRichard Lowe add %o2, %o1, %o0 ! return new value 369*5d9d9091SRichard Lowe2: 370*5d9d9091SRichard Lowe ATOMIC_BACKOFF_BACKOFF(%o4, %o5, %g4, %g5, add32, 0b) 371*5d9d9091SRichard Lowe SET_SIZE(atomic_add_int_nv) 372*5d9d9091SRichard Lowe SET_SIZE(atomic_add_int) 373*5d9d9091SRichard Lowe SET_SIZE(atomic_add_32_nv) 374*5d9d9091SRichard Lowe SET_SIZE(atomic_add_32) 375*5d9d9091SRichard Lowe 376*5d9d9091SRichard Lowe /* 377*5d9d9091SRichard Lowe * NOTE: If atomic_inc_64 and atomic_inc_64_nv are ever 378*5d9d9091SRichard Lowe * separated, you need to also edit the libc sparcv9 platform 379*5d9d9091SRichard Lowe * specific mapfile and remove the NODYNSORT attribute 380*5d9d9091SRichard Lowe * from atomic_inc_64_nv. 381*5d9d9091SRichard Lowe */ 382*5d9d9091SRichard Lowe ENTRY(atomic_inc_64) 383*5d9d9091SRichard Lowe ALTENTRY(atomic_inc_64_nv) 384*5d9d9091SRichard Lowe ALTENTRY(atomic_inc_ulong) 385*5d9d9091SRichard Lowe ALTENTRY(atomic_inc_ulong_nv) 386*5d9d9091SRichard Lowe ba add_64 387*5d9d9091SRichard Lowe add %g0, 1, %o1 388*5d9d9091SRichard Lowe SET_SIZE(atomic_inc_ulong_nv) 389*5d9d9091SRichard Lowe SET_SIZE(atomic_inc_ulong) 390*5d9d9091SRichard Lowe SET_SIZE(atomic_inc_64_nv) 391*5d9d9091SRichard Lowe SET_SIZE(atomic_inc_64) 392*5d9d9091SRichard Lowe 393*5d9d9091SRichard Lowe /* 394*5d9d9091SRichard Lowe * NOTE: If atomic_dec_64 and atomic_dec_64_nv are ever 395*5d9d9091SRichard Lowe * separated, you need to also edit the libc sparcv9 platform 396*5d9d9091SRichard Lowe * specific mapfile and remove the NODYNSORT attribute 397*5d9d9091SRichard Lowe * from atomic_dec_64_nv. 398*5d9d9091SRichard Lowe */ 399*5d9d9091SRichard Lowe ENTRY(atomic_dec_64) 400*5d9d9091SRichard Lowe ALTENTRY(atomic_dec_64_nv) 401*5d9d9091SRichard Lowe ALTENTRY(atomic_dec_ulong) 402*5d9d9091SRichard Lowe ALTENTRY(atomic_dec_ulong_nv) 403*5d9d9091SRichard Lowe ba add_64 404*5d9d9091SRichard Lowe sub %g0, 1, %o1 405*5d9d9091SRichard Lowe SET_SIZE(atomic_dec_ulong_nv) 406*5d9d9091SRichard Lowe SET_SIZE(atomic_dec_ulong) 407*5d9d9091SRichard Lowe SET_SIZE(atomic_dec_64_nv) 408*5d9d9091SRichard Lowe SET_SIZE(atomic_dec_64) 409*5d9d9091SRichard Lowe 410*5d9d9091SRichard Lowe /* 411*5d9d9091SRichard Lowe * NOTE: If atomic_add_64 and atomic_add_64_nv are ever 412*5d9d9091SRichard Lowe * separated, you need to also edit the libc sparcv9 platform 413*5d9d9091SRichard Lowe * specific mapfile and remove the NODYNSORT attribute 414*5d9d9091SRichard Lowe * from atomic_add_64_nv. 415*5d9d9091SRichard Lowe */ 416*5d9d9091SRichard Lowe ENTRY(atomic_add_64) 417*5d9d9091SRichard Lowe ALTENTRY(atomic_add_64_nv) 418*5d9d9091SRichard Lowe ALTENTRY(atomic_add_ptr) 419*5d9d9091SRichard Lowe ALTENTRY(atomic_add_ptr_nv) 420*5d9d9091SRichard Lowe ALTENTRY(atomic_add_long) 421*5d9d9091SRichard Lowe ALTENTRY(atomic_add_long_nv) 422*5d9d9091SRichard Loweadd_64: 423*5d9d9091SRichard Lowe ATOMIC_BACKOFF_INIT(%o4, %g4, %g5) 424*5d9d9091SRichard Lowe0: 425*5d9d9091SRichard Lowe ldx [%o0], %o2 426*5d9d9091SRichard Lowe1: 427*5d9d9091SRichard Lowe add %o2, %o1, %o3 428*5d9d9091SRichard Lowe casx [%o0], %o2, %o3 429*5d9d9091SRichard Lowe cmp %o2, %o3 430*5d9d9091SRichard Lowe ATOMIC_BACKOFF_BRANCH(%xcc, 2f, 1b) 431*5d9d9091SRichard Lowe mov %o3, %o2 432*5d9d9091SRichard Lowe retl 433*5d9d9091SRichard Lowe add %o2, %o1, %o0 ! return new value 434*5d9d9091SRichard Lowe2: 435*5d9d9091SRichard Lowe ATOMIC_BACKOFF_BACKOFF(%o4, %o5, %g4, %g5, add64, 0b) 436*5d9d9091SRichard Lowe SET_SIZE(atomic_add_long_nv) 437*5d9d9091SRichard Lowe SET_SIZE(atomic_add_long) 438*5d9d9091SRichard Lowe SET_SIZE(atomic_add_ptr_nv) 439*5d9d9091SRichard Lowe SET_SIZE(atomic_add_ptr) 440*5d9d9091SRichard Lowe SET_SIZE(atomic_add_64_nv) 441*5d9d9091SRichard Lowe SET_SIZE(atomic_add_64) 442*5d9d9091SRichard Lowe 443*5d9d9091SRichard Lowe /* 444*5d9d9091SRichard Lowe * NOTE: If atomic_or_8 and atomic_or_8_nv are ever 445*5d9d9091SRichard Lowe * separated, you need to also edit the libc sparcv9 platform 446*5d9d9091SRichard Lowe * specific mapfile and remove the NODYNSORT attribute 447*5d9d9091SRichard Lowe * from atomic_or_8_nv. 448*5d9d9091SRichard Lowe */ 449*5d9d9091SRichard Lowe ENTRY(atomic_or_8) 450*5d9d9091SRichard Lowe ALTENTRY(atomic_or_8_nv) 451*5d9d9091SRichard Lowe ALTENTRY(atomic_or_uchar) 452*5d9d9091SRichard Lowe ALTENTRY(atomic_or_uchar_nv) 453*5d9d9091SRichard Lowe and %o0, 0x3, %o4 ! %o4 = byte offset, left-to-right 454*5d9d9091SRichard Lowe xor %o4, 0x3, %g1 ! %g1 = byte offset, right-to-left 455*5d9d9091SRichard Lowe sll %g1, 3, %g1 ! %g1 = bit offset, right-to-left 456*5d9d9091SRichard Lowe set 0xff, %o3 ! %o3 = mask 457*5d9d9091SRichard Lowe sll %o3, %g1, %o3 ! %o3 = shifted to bit offset 458*5d9d9091SRichard Lowe sll %o1, %g1, %o1 ! %o1 = shifted to bit offset 459*5d9d9091SRichard Lowe and %o1, %o3, %o1 ! %o1 = single byte value 460*5d9d9091SRichard Lowe andn %o0, 0x3, %o0 ! %o0 = word address 461*5d9d9091SRichard Lowe ld [%o0], %o2 ! read old value 462*5d9d9091SRichard Lowe1: 463*5d9d9091SRichard Lowe or %o2, %o1, %o5 ! or in the new value 464*5d9d9091SRichard Lowe cas [%o0], %o2, %o5 465*5d9d9091SRichard Lowe cmp %o2, %o5 466*5d9d9091SRichard Lowe bne,a,pn %icc, 1b 467*5d9d9091SRichard Lowe mov %o5, %o2 ! %o2 = old value 468*5d9d9091SRichard Lowe or %o2, %o1, %o5 469*5d9d9091SRichard Lowe and %o5, %o3, %o5 470*5d9d9091SRichard Lowe retl 471*5d9d9091SRichard Lowe srl %o5, %g1, %o0 ! %o0 = new value 472*5d9d9091SRichard Lowe SET_SIZE(atomic_or_uchar_nv) 473*5d9d9091SRichard Lowe SET_SIZE(atomic_or_uchar) 474*5d9d9091SRichard Lowe SET_SIZE(atomic_or_8_nv) 475*5d9d9091SRichard Lowe SET_SIZE(atomic_or_8) 476*5d9d9091SRichard Lowe 477*5d9d9091SRichard Lowe /* 478*5d9d9091SRichard Lowe * NOTE: If atomic_or_16 and atomic_or_16_nv are ever 479*5d9d9091SRichard Lowe * separated, you need to also edit the libc sparcv9 platform 480*5d9d9091SRichard Lowe * specific mapfile and remove the NODYNSORT attribute 481*5d9d9091SRichard Lowe * from atomic_or_16_nv. 482*5d9d9091SRichard Lowe */ 483*5d9d9091SRichard Lowe ENTRY(atomic_or_16) 484*5d9d9091SRichard Lowe ALTENTRY(atomic_or_16_nv) 485*5d9d9091SRichard Lowe ALTENTRY(atomic_or_ushort) 486*5d9d9091SRichard Lowe ALTENTRY(atomic_or_ushort_nv) 487*5d9d9091SRichard Lowe and %o0, 0x2, %o4 ! %o4 = byte offset, left-to-right 488*5d9d9091SRichard Lowe xor %o4, 0x2, %g1 ! %g1 = byte offset, right-to-left 489*5d9d9091SRichard Lowe sll %o4, 3, %o4 ! %o4 = bit offset, left-to-right 490*5d9d9091SRichard Lowe sll %g1, 3, %g1 ! %g1 = bit offset, right-to-left 491*5d9d9091SRichard Lowe sethi %hi(0xffff0000), %o3 ! %o3 = mask 492*5d9d9091SRichard Lowe srl %o3, %o4, %o3 ! %o3 = shifted to bit offset 493*5d9d9091SRichard Lowe sll %o1, %g1, %o1 ! %o1 = shifted to bit offset 494*5d9d9091SRichard Lowe and %o1, %o3, %o1 ! %o1 = single short value 495*5d9d9091SRichard Lowe andn %o0, 0x2, %o0 ! %o0 = word address 496*5d9d9091SRichard Lowe ! if low-order bit is 1, we will properly get an alignment fault here 497*5d9d9091SRichard Lowe ld [%o0], %o2 ! read old value 498*5d9d9091SRichard Lowe1: 499*5d9d9091SRichard Lowe or %o2, %o1, %o5 ! or in the new value 500*5d9d9091SRichard Lowe cas [%o0], %o2, %o5 501*5d9d9091SRichard Lowe cmp %o2, %o5 502*5d9d9091SRichard Lowe bne,a,pn %icc, 1b 503*5d9d9091SRichard Lowe mov %o5, %o2 ! %o2 = old value 504*5d9d9091SRichard Lowe or %o2, %o1, %o5 ! or in the new value 505*5d9d9091SRichard Lowe and %o5, %o3, %o5 506*5d9d9091SRichard Lowe retl 507*5d9d9091SRichard Lowe srl %o5, %g1, %o0 ! %o0 = new value 508*5d9d9091SRichard Lowe SET_SIZE(atomic_or_ushort_nv) 509*5d9d9091SRichard Lowe SET_SIZE(atomic_or_ushort) 510*5d9d9091SRichard Lowe SET_SIZE(atomic_or_16_nv) 511*5d9d9091SRichard Lowe SET_SIZE(atomic_or_16) 512*5d9d9091SRichard Lowe 513*5d9d9091SRichard Lowe /* 514*5d9d9091SRichard Lowe * NOTE: If atomic_or_32 and atomic_or_32_nv are ever 515*5d9d9091SRichard Lowe * separated, you need to also edit the libc sparcv9 platform 516*5d9d9091SRichard Lowe * specific mapfile and remove the NODYNSORT attribute 517*5d9d9091SRichard Lowe * from atomic_or_32_nv. 518*5d9d9091SRichard Lowe */ 519*5d9d9091SRichard Lowe ENTRY(atomic_or_32) 520*5d9d9091SRichard Lowe ALTENTRY(atomic_or_32_nv) 521*5d9d9091SRichard Lowe ALTENTRY(atomic_or_uint) 522*5d9d9091SRichard Lowe ALTENTRY(atomic_or_uint_nv) 523*5d9d9091SRichard Lowe ATOMIC_BACKOFF_INIT(%o4, %g4, %g5) 524*5d9d9091SRichard Lowe0: 525*5d9d9091SRichard Lowe ld [%o0], %o2 526*5d9d9091SRichard Lowe1: 527*5d9d9091SRichard Lowe or %o2, %o1, %o3 528*5d9d9091SRichard Lowe cas [%o0], %o2, %o3 529*5d9d9091SRichard Lowe cmp %o2, %o3 530*5d9d9091SRichard Lowe ATOMIC_BACKOFF_BRANCH(%icc, 2f, 1b) 531*5d9d9091SRichard Lowe mov %o3, %o2 532*5d9d9091SRichard Lowe retl 533*5d9d9091SRichard Lowe or %o2, %o1, %o0 ! return new value 534*5d9d9091SRichard Lowe2: 535*5d9d9091SRichard Lowe ATOMIC_BACKOFF_BACKOFF(%o4, %o5, %g4, %g5, or32, 0b) 536*5d9d9091SRichard Lowe SET_SIZE(atomic_or_uint_nv) 537*5d9d9091SRichard Lowe SET_SIZE(atomic_or_uint) 538*5d9d9091SRichard Lowe SET_SIZE(atomic_or_32_nv) 539*5d9d9091SRichard Lowe SET_SIZE(atomic_or_32) 540*5d9d9091SRichard Lowe 541*5d9d9091SRichard Lowe /* 542*5d9d9091SRichard Lowe * NOTE: If atomic_or_64 and atomic_or_64_nv are ever 543*5d9d9091SRichard Lowe * separated, you need to also edit the libc sparcv9 platform 544*5d9d9091SRichard Lowe * specific mapfile and remove the NODYNSORT attribute 545*5d9d9091SRichard Lowe * from atomic_or_64_nv. 546*5d9d9091SRichard Lowe */ 547*5d9d9091SRichard Lowe ENTRY(atomic_or_64) 548*5d9d9091SRichard Lowe ALTENTRY(atomic_or_64_nv) 549*5d9d9091SRichard Lowe ALTENTRY(atomic_or_ulong) 550*5d9d9091SRichard Lowe ALTENTRY(atomic_or_ulong_nv) 551*5d9d9091SRichard Lowe ATOMIC_BACKOFF_INIT(%o4, %g4, %g5) 552*5d9d9091SRichard Lowe0: 553*5d9d9091SRichard Lowe ldx [%o0], %o2 554*5d9d9091SRichard Lowe1: 555*5d9d9091SRichard Lowe or %o2, %o1, %o3 556*5d9d9091SRichard Lowe casx [%o0], %o2, %o3 557*5d9d9091SRichard Lowe cmp %o2, %o3 558*5d9d9091SRichard Lowe ATOMIC_BACKOFF_BRANCH(%xcc, 2f, 1b) 559*5d9d9091SRichard Lowe mov %o3, %o2 560*5d9d9091SRichard Lowe retl 561*5d9d9091SRichard Lowe or %o2, %o1, %o0 ! return new value 562*5d9d9091SRichard Lowe2: 563*5d9d9091SRichard Lowe ATOMIC_BACKOFF_BACKOFF(%o4, %o5, %g4, %g5, or64, 0b) 564*5d9d9091SRichard Lowe SET_SIZE(atomic_or_ulong_nv) 565*5d9d9091SRichard Lowe SET_SIZE(atomic_or_ulong) 566*5d9d9091SRichard Lowe SET_SIZE(atomic_or_64_nv) 567*5d9d9091SRichard Lowe SET_SIZE(atomic_or_64) 568*5d9d9091SRichard Lowe 569*5d9d9091SRichard Lowe /* 570*5d9d9091SRichard Lowe * NOTE: If atomic_and_8 and atomic_and_8_nv are ever 571*5d9d9091SRichard Lowe * separated, you need to also edit the libc sparcv9 platform 572*5d9d9091SRichard Lowe * specific mapfile and remove the NODYNSORT attribute 573*5d9d9091SRichard Lowe * from atomic_and_8_nv. 574*5d9d9091SRichard Lowe */ 575*5d9d9091SRichard Lowe ENTRY(atomic_and_8) 576*5d9d9091SRichard Lowe ALTENTRY(atomic_and_8_nv) 577*5d9d9091SRichard Lowe ALTENTRY(atomic_and_uchar) 578*5d9d9091SRichard Lowe ALTENTRY(atomic_and_uchar_nv) 579*5d9d9091SRichard Lowe and %o0, 0x3, %o4 ! %o4 = byte offset, left-to-right 580*5d9d9091SRichard Lowe xor %o4, 0x3, %g1 ! %g1 = byte offset, right-to-left 581*5d9d9091SRichard Lowe sll %g1, 3, %g1 ! %g1 = bit offset, right-to-left 582*5d9d9091SRichard Lowe set 0xff, %o3 ! %o3 = mask 583*5d9d9091SRichard Lowe sll %o3, %g1, %o3 ! %o3 = shifted to bit offset 584*5d9d9091SRichard Lowe sll %o1, %g1, %o1 ! %o1 = shifted to bit offset 585*5d9d9091SRichard Lowe orn %o1, %o3, %o1 ! all ones in other bytes 586*5d9d9091SRichard Lowe andn %o0, 0x3, %o0 ! %o0 = word address 587*5d9d9091SRichard Lowe ld [%o0], %o2 ! read old value 588*5d9d9091SRichard Lowe1: 589*5d9d9091SRichard Lowe and %o2, %o1, %o5 ! and in the new value 590*5d9d9091SRichard Lowe cas [%o0], %o2, %o5 591*5d9d9091SRichard Lowe cmp %o2, %o5 592*5d9d9091SRichard Lowe bne,a,pn %icc, 1b 593*5d9d9091SRichard Lowe mov %o5, %o2 ! %o2 = old value 594*5d9d9091SRichard Lowe and %o2, %o1, %o5 595*5d9d9091SRichard Lowe and %o5, %o3, %o5 596*5d9d9091SRichard Lowe retl 597*5d9d9091SRichard Lowe srl %o5, %g1, %o0 ! %o0 = new value 598*5d9d9091SRichard Lowe SET_SIZE(atomic_and_uchar_nv) 599*5d9d9091SRichard Lowe SET_SIZE(atomic_and_uchar) 600*5d9d9091SRichard Lowe SET_SIZE(atomic_and_8_nv) 601*5d9d9091SRichard Lowe SET_SIZE(atomic_and_8) 602*5d9d9091SRichard Lowe 603*5d9d9091SRichard Lowe /* 604*5d9d9091SRichard Lowe * NOTE: If atomic_and_16 and atomic_and_16_nv are ever 605*5d9d9091SRichard Lowe * separated, you need to also edit the libc sparcv9 platform 606*5d9d9091SRichard Lowe * specific mapfile and remove the NODYNSORT attribute 607*5d9d9091SRichard Lowe * from atomic_and_16_nv. 608*5d9d9091SRichard Lowe */ 609*5d9d9091SRichard Lowe ENTRY(atomic_and_16) 610*5d9d9091SRichard Lowe ALTENTRY(atomic_and_16_nv) 611*5d9d9091SRichard Lowe ALTENTRY(atomic_and_ushort) 612*5d9d9091SRichard Lowe ALTENTRY(atomic_and_ushort_nv) 613*5d9d9091SRichard Lowe and %o0, 0x2, %o4 ! %o4 = byte offset, left-to-right 614*5d9d9091SRichard Lowe xor %o4, 0x2, %g1 ! %g1 = byte offset, right-to-left 615*5d9d9091SRichard Lowe sll %o4, 3, %o4 ! %o4 = bit offset, left-to-right 616*5d9d9091SRichard Lowe sll %g1, 3, %g1 ! %g1 = bit offset, right-to-left 617*5d9d9091SRichard Lowe sethi %hi(0xffff0000), %o3 ! %o3 = mask 618*5d9d9091SRichard Lowe srl %o3, %o4, %o3 ! %o3 = shifted to bit offset 619*5d9d9091SRichard Lowe sll %o1, %g1, %o1 ! %o1 = shifted to bit offset 620*5d9d9091SRichard Lowe orn %o1, %o3, %o1 ! all ones in the other half 621*5d9d9091SRichard Lowe andn %o0, 0x2, %o0 ! %o0 = word address 622*5d9d9091SRichard Lowe ! if low-order bit is 1, we will properly get an alignment fault here 623*5d9d9091SRichard Lowe ld [%o0], %o2 ! read old value 624*5d9d9091SRichard Lowe1: 625*5d9d9091SRichard Lowe and %o2, %o1, %o5 ! and in the new value 626*5d9d9091SRichard Lowe cas [%o0], %o2, %o5 627*5d9d9091SRichard Lowe cmp %o2, %o5 628*5d9d9091SRichard Lowe bne,a,pn %icc, 1b 629*5d9d9091SRichard Lowe mov %o5, %o2 ! %o2 = old value 630*5d9d9091SRichard Lowe and %o2, %o1, %o5 631*5d9d9091SRichard Lowe and %o5, %o3, %o5 632*5d9d9091SRichard Lowe retl 633*5d9d9091SRichard Lowe srl %o5, %g1, %o0 ! %o0 = new value 634*5d9d9091SRichard Lowe SET_SIZE(atomic_and_ushort_nv) 635*5d9d9091SRichard Lowe SET_SIZE(atomic_and_ushort) 636*5d9d9091SRichard Lowe SET_SIZE(atomic_and_16_nv) 637*5d9d9091SRichard Lowe SET_SIZE(atomic_and_16) 638*5d9d9091SRichard Lowe 639*5d9d9091SRichard Lowe /* 640*5d9d9091SRichard Lowe * NOTE: If atomic_and_32 and atomic_and_32_nv are ever 641*5d9d9091SRichard Lowe * separated, you need to also edit the libc sparcv9 platform 642*5d9d9091SRichard Lowe * specific mapfile and remove the NODYNSORT attribute 643*5d9d9091SRichard Lowe * from atomic_and_32_nv. 644*5d9d9091SRichard Lowe */ 645*5d9d9091SRichard Lowe ENTRY(atomic_and_32) 646*5d9d9091SRichard Lowe ALTENTRY(atomic_and_32_nv) 647*5d9d9091SRichard Lowe ALTENTRY(atomic_and_uint) 648*5d9d9091SRichard Lowe ALTENTRY(atomic_and_uint_nv) 649*5d9d9091SRichard Lowe ATOMIC_BACKOFF_INIT(%o4, %g4, %g5) 650*5d9d9091SRichard Lowe0: 651*5d9d9091SRichard Lowe ld [%o0], %o2 652*5d9d9091SRichard Lowe1: 653*5d9d9091SRichard Lowe and %o2, %o1, %o3 654*5d9d9091SRichard Lowe cas [%o0], %o2, %o3 655*5d9d9091SRichard Lowe cmp %o2, %o3 656*5d9d9091SRichard Lowe ATOMIC_BACKOFF_BRANCH(%icc, 2f, 1b) 657*5d9d9091SRichard Lowe mov %o3, %o2 658*5d9d9091SRichard Lowe retl 659*5d9d9091SRichard Lowe and %o2, %o1, %o0 ! return new value 660*5d9d9091SRichard Lowe2: 661*5d9d9091SRichard Lowe ATOMIC_BACKOFF_BACKOFF(%o4, %o5, %g4, %g5, and32, 0b) 662*5d9d9091SRichard Lowe SET_SIZE(atomic_and_uint_nv) 663*5d9d9091SRichard Lowe SET_SIZE(atomic_and_uint) 664*5d9d9091SRichard Lowe SET_SIZE(atomic_and_32_nv) 665*5d9d9091SRichard Lowe SET_SIZE(atomic_and_32) 666*5d9d9091SRichard Lowe 667*5d9d9091SRichard Lowe /* 668*5d9d9091SRichard Lowe * NOTE: If atomic_and_64 and atomic_and_64_nv are ever 669*5d9d9091SRichard Lowe * separated, you need to also edit the libc sparcv9 platform 670*5d9d9091SRichard Lowe * specific mapfile and remove the NODYNSORT attribute 671*5d9d9091SRichard Lowe * from atomic_and_64_nv. 672*5d9d9091SRichard Lowe */ 673*5d9d9091SRichard Lowe ENTRY(atomic_and_64) 674*5d9d9091SRichard Lowe ALTENTRY(atomic_and_64_nv) 675*5d9d9091SRichard Lowe ALTENTRY(atomic_and_ulong) 676*5d9d9091SRichard Lowe ALTENTRY(atomic_and_ulong_nv) 677*5d9d9091SRichard Lowe ATOMIC_BACKOFF_INIT(%o4, %g4, %g5) 678*5d9d9091SRichard Lowe0: 679*5d9d9091SRichard Lowe ldx [%o0], %o2 680*5d9d9091SRichard Lowe1: 681*5d9d9091SRichard Lowe and %o2, %o1, %o3 682*5d9d9091SRichard Lowe casx [%o0], %o2, %o3 683*5d9d9091SRichard Lowe cmp %o2, %o3 684*5d9d9091SRichard Lowe ATOMIC_BACKOFF_BRANCH(%xcc, 2f, 1b) 685*5d9d9091SRichard Lowe mov %o3, %o2 686*5d9d9091SRichard Lowe retl 687*5d9d9091SRichard Lowe and %o2, %o1, %o0 ! return new value 688*5d9d9091SRichard Lowe2: 689*5d9d9091SRichard Lowe ATOMIC_BACKOFF_BACKOFF(%o4, %o5, %g4, %g5, and64, 0b) 690*5d9d9091SRichard Lowe SET_SIZE(atomic_and_ulong_nv) 691*5d9d9091SRichard Lowe SET_SIZE(atomic_and_ulong) 692*5d9d9091SRichard Lowe SET_SIZE(atomic_and_64_nv) 693*5d9d9091SRichard Lowe SET_SIZE(atomic_and_64) 694*5d9d9091SRichard Lowe 695*5d9d9091SRichard Lowe ENTRY(atomic_cas_8) 696*5d9d9091SRichard Lowe ALTENTRY(atomic_cas_uchar) 697*5d9d9091SRichard Lowe and %o0, 0x3, %o4 ! %o4 = byte offset, left-to-right 698*5d9d9091SRichard Lowe xor %o4, 0x3, %g1 ! %g1 = byte offset, right-to-left 699*5d9d9091SRichard Lowe sll %g1, 3, %g1 ! %g1 = bit offset, right-to-left 700*5d9d9091SRichard Lowe set 0xff, %o3 ! %o3 = mask 701*5d9d9091SRichard Lowe sll %o3, %g1, %o3 ! %o3 = shifted to bit offset 702*5d9d9091SRichard Lowe sll %o1, %g1, %o1 ! %o1 = shifted to bit offset 703*5d9d9091SRichard Lowe and %o1, %o3, %o1 ! %o1 = single byte value 704*5d9d9091SRichard Lowe sll %o2, %g1, %o2 ! %o2 = shifted to bit offset 705*5d9d9091SRichard Lowe and %o2, %o3, %o2 ! %o2 = single byte value 706*5d9d9091SRichard Lowe andn %o0, 0x3, %o0 ! %o0 = word address 707*5d9d9091SRichard Lowe ld [%o0], %o4 ! read old value 708*5d9d9091SRichard Lowe1: 709*5d9d9091SRichard Lowe andn %o4, %o3, %o4 ! clear target bits 710*5d9d9091SRichard Lowe or %o4, %o2, %o5 ! insert the new value 711*5d9d9091SRichard Lowe or %o4, %o1, %o4 ! insert the comparison value 712*5d9d9091SRichard Lowe cas [%o0], %o4, %o5 713*5d9d9091SRichard Lowe cmp %o4, %o5 ! did we succeed? 714*5d9d9091SRichard Lowe be,pt %icc, 2f 715*5d9d9091SRichard Lowe and %o5, %o3, %o4 ! isolate the old value 716*5d9d9091SRichard Lowe cmp %o1, %o4 ! should we have succeeded? 717*5d9d9091SRichard Lowe be,a,pt %icc, 1b ! yes, try again 718*5d9d9091SRichard Lowe mov %o5, %o4 ! %o4 = old value 719*5d9d9091SRichard Lowe2: 720*5d9d9091SRichard Lowe retl 721*5d9d9091SRichard Lowe srl %o4, %g1, %o0 ! %o0 = old value 722*5d9d9091SRichard Lowe SET_SIZE(atomic_cas_uchar) 723*5d9d9091SRichard Lowe SET_SIZE(atomic_cas_8) 724*5d9d9091SRichard Lowe 725*5d9d9091SRichard Lowe ENTRY(atomic_cas_16) 726*5d9d9091SRichard Lowe ALTENTRY(atomic_cas_ushort) 727*5d9d9091SRichard Lowe and %o0, 0x2, %o4 ! %o4 = byte offset, left-to-right 728*5d9d9091SRichard Lowe xor %o4, 0x2, %g1 ! %g1 = byte offset, right-to-left 729*5d9d9091SRichard Lowe sll %o4, 3, %o4 ! %o4 = bit offset, left-to-right 730*5d9d9091SRichard Lowe sll %g1, 3, %g1 ! %g1 = bit offset, right-to-left 731*5d9d9091SRichard Lowe sethi %hi(0xffff0000), %o3 ! %o3 = mask 732*5d9d9091SRichard Lowe srl %o3, %o4, %o3 ! %o3 = shifted to bit offset 733*5d9d9091SRichard Lowe sll %o1, %g1, %o1 ! %o1 = shifted to bit offset 734*5d9d9091SRichard Lowe and %o1, %o3, %o1 ! %o1 = single short value 735*5d9d9091SRichard Lowe sll %o2, %g1, %o2 ! %o2 = shifted to bit offset 736*5d9d9091SRichard Lowe and %o2, %o3, %o2 ! %o2 = single short value 737*5d9d9091SRichard Lowe andn %o0, 0x2, %o0 ! %o0 = word address 738*5d9d9091SRichard Lowe ! if low-order bit is 1, we will properly get an alignment fault here 739*5d9d9091SRichard Lowe ld [%o0], %o4 ! read old value 740*5d9d9091SRichard Lowe1: 741*5d9d9091SRichard Lowe andn %o4, %o3, %o4 ! clear target bits 742*5d9d9091SRichard Lowe or %o4, %o2, %o5 ! insert the new value 743*5d9d9091SRichard Lowe or %o4, %o1, %o4 ! insert the comparison value 744*5d9d9091SRichard Lowe cas [%o0], %o4, %o5 745*5d9d9091SRichard Lowe cmp %o4, %o5 ! did we succeed? 746*5d9d9091SRichard Lowe be,pt %icc, 2f 747*5d9d9091SRichard Lowe and %o5, %o3, %o4 ! isolate the old value 748*5d9d9091SRichard Lowe cmp %o1, %o4 ! should we have succeeded? 749*5d9d9091SRichard Lowe be,a,pt %icc, 1b ! yes, try again 750*5d9d9091SRichard Lowe mov %o5, %o4 ! %o4 = old value 751*5d9d9091SRichard Lowe2: 752*5d9d9091SRichard Lowe retl 753*5d9d9091SRichard Lowe srl %o4, %g1, %o0 ! %o0 = old value 754*5d9d9091SRichard Lowe SET_SIZE(atomic_cas_ushort) 755*5d9d9091SRichard Lowe SET_SIZE(atomic_cas_16) 756*5d9d9091SRichard Lowe 757*5d9d9091SRichard Lowe ENTRY(atomic_cas_32) 758*5d9d9091SRichard Lowe ALTENTRY(atomic_cas_uint) 759*5d9d9091SRichard Lowe cas [%o0], %o1, %o2 760*5d9d9091SRichard Lowe retl 761*5d9d9091SRichard Lowe mov %o2, %o0 762*5d9d9091SRichard Lowe SET_SIZE(atomic_cas_uint) 763*5d9d9091SRichard Lowe SET_SIZE(atomic_cas_32) 764*5d9d9091SRichard Lowe 765*5d9d9091SRichard Lowe ENTRY(atomic_cas_64) 766*5d9d9091SRichard Lowe ALTENTRY(atomic_cas_ptr) 767*5d9d9091SRichard Lowe ALTENTRY(atomic_cas_ulong) 768*5d9d9091SRichard Lowe casx [%o0], %o1, %o2 769*5d9d9091SRichard Lowe retl 770*5d9d9091SRichard Lowe mov %o2, %o0 771*5d9d9091SRichard Lowe SET_SIZE(atomic_cas_ulong) 772*5d9d9091SRichard Lowe SET_SIZE(atomic_cas_ptr) 773*5d9d9091SRichard Lowe SET_SIZE(atomic_cas_64) 774*5d9d9091SRichard Lowe 775*5d9d9091SRichard Lowe ENTRY(atomic_swap_8) 776*5d9d9091SRichard Lowe ALTENTRY(atomic_swap_uchar) 777*5d9d9091SRichard Lowe and %o0, 0x3, %o4 ! %o4 = byte offset, left-to-right 778*5d9d9091SRichard Lowe xor %o4, 0x3, %g1 ! %g1 = byte offset, right-to-left 779*5d9d9091SRichard Lowe sll %g1, 3, %g1 ! %g1 = bit offset, right-to-left 780*5d9d9091SRichard Lowe set 0xff, %o3 ! %o3 = mask 781*5d9d9091SRichard Lowe sll %o3, %g1, %o3 ! %o3 = shifted to bit offset 782*5d9d9091SRichard Lowe sll %o1, %g1, %o1 ! %o1 = shifted to bit offset 783*5d9d9091SRichard Lowe and %o1, %o3, %o1 ! %o1 = single byte value 784*5d9d9091SRichard Lowe andn %o0, 0x3, %o0 ! %o0 = word address 785*5d9d9091SRichard Lowe ld [%o0], %o2 ! read old value 786*5d9d9091SRichard Lowe1: 787*5d9d9091SRichard Lowe andn %o2, %o3, %o5 ! clear target bits 788*5d9d9091SRichard Lowe or %o5, %o1, %o5 ! insert the new value 789*5d9d9091SRichard Lowe cas [%o0], %o2, %o5 790*5d9d9091SRichard Lowe cmp %o2, %o5 791*5d9d9091SRichard Lowe bne,a,pn %icc, 1b 792*5d9d9091SRichard Lowe mov %o5, %o2 ! %o2 = old value 793*5d9d9091SRichard Lowe and %o5, %o3, %o5 794*5d9d9091SRichard Lowe retl 795*5d9d9091SRichard Lowe srl %o5, %g1, %o0 ! %o0 = old value 796*5d9d9091SRichard Lowe SET_SIZE(atomic_swap_uchar) 797*5d9d9091SRichard Lowe SET_SIZE(atomic_swap_8) 798*5d9d9091SRichard Lowe 799*5d9d9091SRichard Lowe ENTRY(atomic_swap_16) 800*5d9d9091SRichard Lowe ALTENTRY(atomic_swap_ushort) 801*5d9d9091SRichard Lowe and %o0, 0x2, %o4 ! %o4 = byte offset, left-to-right 802*5d9d9091SRichard Lowe xor %o4, 0x2, %g1 ! %g1 = byte offset, right-to-left 803*5d9d9091SRichard Lowe sll %o4, 3, %o4 ! %o4 = bit offset, left-to-right 804*5d9d9091SRichard Lowe sll %g1, 3, %g1 ! %g1 = bit offset, right-to-left 805*5d9d9091SRichard Lowe sethi %hi(0xffff0000), %o3 ! %o3 = mask 806*5d9d9091SRichard Lowe srl %o3, %o4, %o3 ! %o3 = shifted to bit offset 807*5d9d9091SRichard Lowe sll %o1, %g1, %o1 ! %o1 = shifted to bit offset 808*5d9d9091SRichard Lowe and %o1, %o3, %o1 ! %o1 = single short value 809*5d9d9091SRichard Lowe andn %o0, 0x2, %o0 ! %o0 = word address 810*5d9d9091SRichard Lowe ! if low-order bit is 1, we will properly get an alignment fault here 811*5d9d9091SRichard Lowe ld [%o0], %o2 ! read old value 812*5d9d9091SRichard Lowe1: 813*5d9d9091SRichard Lowe andn %o2, %o3, %o5 ! clear target bits 814*5d9d9091SRichard Lowe or %o5, %o1, %o5 ! insert the new value 815*5d9d9091SRichard Lowe cas [%o0], %o2, %o5 816*5d9d9091SRichard Lowe cmp %o2, %o5 817*5d9d9091SRichard Lowe bne,a,pn %icc, 1b 818*5d9d9091SRichard Lowe mov %o5, %o2 ! %o2 = old value 819*5d9d9091SRichard Lowe and %o5, %o3, %o5 820*5d9d9091SRichard Lowe retl 821*5d9d9091SRichard Lowe srl %o5, %g1, %o0 ! %o0 = old value 822*5d9d9091SRichard Lowe SET_SIZE(atomic_swap_ushort) 823*5d9d9091SRichard Lowe SET_SIZE(atomic_swap_16) 824*5d9d9091SRichard Lowe 825*5d9d9091SRichard Lowe ENTRY(atomic_swap_32) 826*5d9d9091SRichard Lowe ALTENTRY(atomic_swap_uint) 827*5d9d9091SRichard Lowe ATOMIC_BACKOFF_INIT(%o4, %g4, %g5) 828*5d9d9091SRichard Lowe0: 829*5d9d9091SRichard Lowe ld [%o0], %o2 830*5d9d9091SRichard Lowe1: 831*5d9d9091SRichard Lowe mov %o1, %o3 832*5d9d9091SRichard Lowe cas [%o0], %o2, %o3 833*5d9d9091SRichard Lowe cmp %o2, %o3 834*5d9d9091SRichard Lowe ATOMIC_BACKOFF_BRANCH(%icc, 2f, 1b) 835*5d9d9091SRichard Lowe mov %o3, %o2 836*5d9d9091SRichard Lowe retl 837*5d9d9091SRichard Lowe mov %o3, %o0 838*5d9d9091SRichard Lowe2: 839*5d9d9091SRichard Lowe ATOMIC_BACKOFF_BACKOFF(%o4, %o5, %g4, %g5, swap32, 0b) 840*5d9d9091SRichard Lowe SET_SIZE(atomic_swap_uint) 841*5d9d9091SRichard Lowe SET_SIZE(atomic_swap_32) 842*5d9d9091SRichard Lowe 843*5d9d9091SRichard Lowe ENTRY(atomic_swap_64) 844*5d9d9091SRichard Lowe ALTENTRY(atomic_swap_ptr) 845*5d9d9091SRichard Lowe ALTENTRY(atomic_swap_ulong) 846*5d9d9091SRichard Lowe ATOMIC_BACKOFF_INIT(%o4, %g4, %g5) 847*5d9d9091SRichard Lowe0: 848*5d9d9091SRichard Lowe ldx [%o0], %o2 849*5d9d9091SRichard Lowe1: 850*5d9d9091SRichard Lowe mov %o1, %o3 851*5d9d9091SRichard Lowe casx [%o0], %o2, %o3 852*5d9d9091SRichard Lowe cmp %o2, %o3 853*5d9d9091SRichard Lowe ATOMIC_BACKOFF_BRANCH(%xcc, 2f, 1b) 854*5d9d9091SRichard Lowe mov %o3, %o2 855*5d9d9091SRichard Lowe retl 856*5d9d9091SRichard Lowe mov %o3, %o0 857*5d9d9091SRichard Lowe2: 858*5d9d9091SRichard Lowe ATOMIC_BACKOFF_BACKOFF(%o4, %o5, %g4, %g5, swap64, 0b) 859*5d9d9091SRichard Lowe SET_SIZE(atomic_swap_ulong) 860*5d9d9091SRichard Lowe SET_SIZE(atomic_swap_ptr) 861*5d9d9091SRichard Lowe SET_SIZE(atomic_swap_64) 862*5d9d9091SRichard Lowe 863*5d9d9091SRichard Lowe ENTRY(atomic_set_long_excl) 864*5d9d9091SRichard Lowe ATOMIC_BACKOFF_INIT(%o5, %g4, %g5) 865*5d9d9091SRichard Lowe mov 1, %o3 866*5d9d9091SRichard Lowe slln %o3, %o1, %o3 867*5d9d9091SRichard Lowe0: 868*5d9d9091SRichard Lowe ldn [%o0], %o2 869*5d9d9091SRichard Lowe1: 870*5d9d9091SRichard Lowe andcc %o2, %o3, %g0 ! test if the bit is set 871*5d9d9091SRichard Lowe bnz,a,pn %ncc, 2f ! if so, then fail out 872*5d9d9091SRichard Lowe mov -1, %o0 873*5d9d9091SRichard Lowe or %o2, %o3, %o4 ! set the bit, and try to commit it 874*5d9d9091SRichard Lowe casn [%o0], %o2, %o4 875*5d9d9091SRichard Lowe cmp %o2, %o4 876*5d9d9091SRichard Lowe ATOMIC_BACKOFF_BRANCH(%ncc, 5f, 1b) 877*5d9d9091SRichard Lowe mov %o4, %o2 878*5d9d9091SRichard Lowe mov %g0, %o0 879*5d9d9091SRichard Lowe2: 880*5d9d9091SRichard Lowe retl 881*5d9d9091SRichard Lowe nop 882*5d9d9091SRichard Lowe5: 883*5d9d9091SRichard Lowe ATOMIC_BACKOFF_BACKOFF(%o5, %g1, %g4, %g5, setlongexcl, 0b) 884*5d9d9091SRichard Lowe SET_SIZE(atomic_set_long_excl) 885*5d9d9091SRichard Lowe 886*5d9d9091SRichard Lowe ENTRY(atomic_clear_long_excl) 887*5d9d9091SRichard Lowe ATOMIC_BACKOFF_INIT(%o5, %g4, %g5) 888*5d9d9091SRichard Lowe mov 1, %o3 889*5d9d9091SRichard Lowe slln %o3, %o1, %o3 890*5d9d9091SRichard Lowe0: 891*5d9d9091SRichard Lowe ldn [%o0], %o2 892*5d9d9091SRichard Lowe1: 893*5d9d9091SRichard Lowe andncc %o3, %o2, %g0 ! test if the bit is clear 894*5d9d9091SRichard Lowe bnz,a,pn %ncc, 2f ! if so, then fail out 895*5d9d9091SRichard Lowe mov -1, %o0 896*5d9d9091SRichard Lowe andn %o2, %o3, %o4 ! clear the bit, and try to commit it 897*5d9d9091SRichard Lowe casn [%o0], %o2, %o4 898*5d9d9091SRichard Lowe cmp %o2, %o4 899*5d9d9091SRichard Lowe ATOMIC_BACKOFF_BRANCH(%ncc, 5f, 1b) 900*5d9d9091SRichard Lowe mov %o4, %o2 901*5d9d9091SRichard Lowe mov %g0, %o0 902*5d9d9091SRichard Lowe2: 903*5d9d9091SRichard Lowe retl 904*5d9d9091SRichard Lowe nop 905*5d9d9091SRichard Lowe5: 906*5d9d9091SRichard Lowe ATOMIC_BACKOFF_BACKOFF(%o5, %g1, %g4, %g5, clrlongexcl, 0b) 907*5d9d9091SRichard Lowe SET_SIZE(atomic_clear_long_excl) 908*5d9d9091SRichard Lowe 909*5d9d9091SRichard Lowe#if !defined(_KERNEL) 910*5d9d9091SRichard Lowe 911*5d9d9091SRichard Lowe /* 912*5d9d9091SRichard Lowe * Spitfires and Blackbirds have a problem with membars in the 913*5d9d9091SRichard Lowe * delay slot (SF_ERRATA_51). For safety's sake, we assume 914*5d9d9091SRichard Lowe * that the whole world needs the workaround. 915*5d9d9091SRichard Lowe */ 916*5d9d9091SRichard Lowe ENTRY(membar_enter) 917*5d9d9091SRichard Lowe membar #StoreLoad|#StoreStore 918*5d9d9091SRichard Lowe retl 919*5d9d9091SRichard Lowe nop 920*5d9d9091SRichard Lowe SET_SIZE(membar_enter) 921*5d9d9091SRichard Lowe 922*5d9d9091SRichard Lowe ENTRY(membar_exit) 923*5d9d9091SRichard Lowe membar #LoadStore|#StoreStore 924*5d9d9091SRichard Lowe retl 925*5d9d9091SRichard Lowe nop 926*5d9d9091SRichard Lowe SET_SIZE(membar_exit) 927*5d9d9091SRichard Lowe 928*5d9d9091SRichard Lowe ENTRY(membar_producer) 929*5d9d9091SRichard Lowe membar #StoreStore 930*5d9d9091SRichard Lowe retl 931*5d9d9091SRichard Lowe nop 932*5d9d9091SRichard Lowe SET_SIZE(membar_producer) 933*5d9d9091SRichard Lowe 934*5d9d9091SRichard Lowe ENTRY(membar_consumer) 935*5d9d9091SRichard Lowe membar #LoadLoad 936*5d9d9091SRichard Lowe retl 937*5d9d9091SRichard Lowe nop 938*5d9d9091SRichard Lowe SET_SIZE(membar_consumer) 939*5d9d9091SRichard Lowe 940*5d9d9091SRichard Lowe#endif /* !_KERNEL */ 941