1 /* $NetBSD: ctlreg.h,v 1.54 2010/12/18 05:45:43 mrg Exp $ */ 2 3 /* 4 * Copyright (c) 1996-2002 Eduardo Horvath 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 12 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 13 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 14 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 15 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 16 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 17 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 18 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 19 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 20 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 21 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 22 * SUCH DAMAGE. 23 * 24 */ 25 26 #ifndef _SPARC_CTLREG_H_ 27 #define _SPARC_CTLREG_H_ 28 29 /* 30 * Sun 4u control registers. (includes address space definitions 31 * and some registers in control space). 32 */ 33 34 /* 35 * The Alternate address spaces. 36 * 37 * 0x00-0x7f are privileged 38 * 0x80-0xff can be used by users 39 */ 40 41 #define ASI_LITTLE 0x08 /* This bit should make an ASI little endian */ 42 43 #define ASI_NUCLEUS 0x04 /* [4u] kernel address space */ 44 #define ASI_NUCLEUS_LITTLE 0x0c /* [4u] kernel address space, little endian */ 45 46 #define ASI_AS_IF_USER_PRIMARY 0x10 /* [4u] primary user address space */ 47 #define ASI_AS_IF_USER_SECONDARY 0x11 /* [4u] secondary user address space */ 48 49 #define ASI_PHYS_CACHED 0x14 /* [4u] MMU bypass to main memory */ 50 #define ASI_PHYS_NON_CACHED 0x15 /* [4u] MMU bypass to I/O location */ 51 52 #define ASI_AS_IF_USER_PRIMARY_LITTLE 0x18 /* [4u] primary user address space, little endian */ 53 #define ASI_AS_IF_USER_SECONDARY_LITTLE 0x19 /* [4u] secondary user address space, little endian */ 54 55 #define ASI_PHYS_CACHED_LITTLE 0x1c /* [4u] MMU bypass to main memory, little endian */ 56 #define ASI_PHYS_NON_CACHED_LITTLE 0x1d /* [4u] MMU bypass to I/O location, little endian */ 57 58 #define ASI_NUCLEUS_QUAD_LDD 0x24 /* [4u] use w/LDDA to load 128-bit item */ 59 #define ASI_NUCLEUS_QUAD_LDD_LITTLE 0x2c /* [4u] use w/LDDA to load 128-bit item, little endian */ 60 61 #define ASI_FLUSH_D_PAGE_PRIMARY 0x38 /* [4u] flush D-cache page using primary context */ 62 #define ASI_FLUSH_D_PAGE_SECONDARY 0x39 /* [4u] flush D-cache page using secondary context */ 63 #define ASI_FLUSH_D_CTX_PRIMARY 0x3a /* [4u] flush D-cache context using primary context */ 64 #define ASI_FLUSH_D_CTX_SECONDARY 0x3b /* [4u] flush D-cache context using secondary context */ 65 66 #define ASI_DCACHE_INVALIDATE 0x42 /* [III] invalidate D-cache */ 67 #define ASI_DCACHE_UTAG 0x43 /* [III] diagnostic access to D-cache micro tag */ 68 #define ASI_DCACHE_SNOOP_TAG 0x44 /* [III] diagnostic access to D-cache snoop tag RAM */ 69 70 #define ASI_LSU_CONTROL_REGISTER 0x45 /* [4u] load/store unit control register */ 71 72 #define ASI_DCACHE_DATA 0x46 /* [4u] diagnostic access to D-cache data RAM */ 73 #define ASI_DCACHE_TAG 0x47 /* [4u] diagnostic access to D-cache tag RAM */ 74 75 #define ASI_INTR_DISPATCH_STATUS 0x48 /* [4u] interrupt dispatch status register */ 76 #define ASI_INTR_RECEIVE 0x49 /* [4u] interrupt receive status register */ 77 #define ASI_MID_REG 0x4a /* [4u] hardware config and MID */ 78 #define ASI_ERROR_EN_REG 0x4b /* [4u] asynchronous error enables */ 79 #define ASI_AFSR 0x4c /* [4u] asynchronous fault status register */ 80 #define ASI_AFAR 0x4d /* [4u] asynchronous fault address register */ 81 82 #define ASI_ICACHE_DATA 0x66 /* [4u] diagnostic access to I-cache data RAM */ 83 #define ASI_ICACHE_TAG 0x67 /* [4u] diagnostic access to I-cache tag RAM */ 84 #define ASI_FLUSH_I_PAGE_PRIMARY 0x68 /* [4u] flush I-cache page using primary context */ 85 #define ASI_FLUSH_I_PAGE_SECONDARY 0x69 /* [4u] flush I-cache page using secondary context */ 86 #define ASI_FLUSH_I_CTX_PRIMARY 0x6a /* [4u] flush I-cache context using primary context */ 87 #define ASI_FLUSH_I_CTX_SECONDARY 0x6b /* [4u] flush I-cache context using secondary context */ 88 89 #define ASI_BLOCK_AS_IF_USER_PRIMARY 0x70 /* [4u] primary user address space, block loads/stores */ 90 #define ASI_BLOCK_AS_IF_USER_SECONDARY 0x71 /* [4u] secondary user address space, block loads/stores */ 91 92 #define ASI_ECACHE_DIAG 0x76 /* [4u] diag access to E-cache tag and data */ 93 #define ASI_DATAPATH_ERR_REG_WRITE 0x77 /* [4u] ASI is reused */ 94 95 #define ASI_BLOCK_AS_IF_USER_PRIMARY_LITTLE 0x78 /* [4u] primary user address space, block loads/stores */ 96 #define ASI_BLOCK_AS_IF_USER_SECONDARY_LITTLE 0x79 /* [4u] secondary user address space, block loads/stores */ 97 98 #define ASI_INTERRUPT_RECEIVE_DATA 0x7f /* [4u] interrupt receive data registers {0,1,2} */ 99 #define ASI_DATAPATH_ERR_REG_READ 0x7f /* [4u] read access to datapath error registers (ASI reused) */ 100 101 #define ASI_PRIMARY 0x80 /* [4u] primary address space */ 102 #define ASI_SECONDARY 0x81 /* [4u] secondary address space */ 103 #define ASI_PRIMARY_NOFAULT 0x82 /* [4u] primary address space, no fault */ 104 #define ASI_SECONDARY_NOFAULT 0x83 /* [4u] secondary address space, no fault */ 105 106 #define ASI_PRIMARY_LITTLE 0x88 /* [4u] primary address space, little endian */ 107 #define ASI_SECONDARY_LITTLE 0x89 /* [4u] secondary address space, little endian */ 108 #define ASI_PRIMARY_NOFAULT_LITTLE 0x8a /* [4u] primary address space, no fault, little endian */ 109 #define ASI_SECONDARY_NOFAULT_LITTLE 0x8b /* [4u] secondary address space, no fault, little endian */ 110 111 #define ASI_PST8_PRIMARY 0xc0 /* [VIS] Eight 8-bit partial store, primary */ 112 #define ASI_PST8_SECONDARY 0xc1 /* [VIS] Eight 8-bit partial store, secondary */ 113 #define ASI_PST16_PRIMARY 0xc2 /* [VIS] Four 16-bit partial store, primary */ 114 #define ASI_PST16_SECONDARY 0xc3 /* [VIS] Fout 16-bit partial store, secondary */ 115 #define ASI_PST32_PRIMARY 0xc4 /* [VIS] Two 32-bit partial store, primary */ 116 #define ASI_PST32_SECONDARY 0xc5 /* [VIS] Two 32-bit partial store, secondary */ 117 118 #define ASI_PST8_PRIMARY_LITTLE 0xc8 /* [VIS] Eight 8-bit partial store, primary, little endian */ 119 #define ASI_PST8_SECONDARY_LITTLE 0xc9 /* [VIS] Eight 8-bit partial store, secondary, little endian */ 120 #define ASI_PST16_PRIMARY_LITTLE 0xca /* [VIS] Four 16-bit partial store, primary, little endian */ 121 #define ASI_PST16_SECONDARY_LITTLE 0xcb /* [VIS] Fout 16-bit partial store, secondary, little endian */ 122 #define ASI_PST32_PRIMARY_LITTLE 0xcc /* [VIS] Two 32-bit partial store, primary, little endian */ 123 #define ASI_PST32_SECONDARY_LITTLE 0xcd /* [VIS] Two 32-bit partial store, secondary, little endian */ 124 125 #define ASI_FL8_PRIMARY 0xd0 /* [VIS] One 8-bit load/store floating, primary */ 126 #define ASI_FL8_SECONDARY 0xd1 /* [VIS] One 8-bit load/store floating, secondary */ 127 #define ASI_FL16_PRIMARY 0xd2 /* [VIS] One 16-bit load/store floating, primary */ 128 #define ASI_FL16_SECONDARY 0xd3 /* [VIS] One 16-bit load/store floating, secondary */ 129 130 #define ASI_FL8_PRIMARY_LITTLE 0xd8 /* [VIS] One 8-bit load/store floating, primary, little endian */ 131 #define ASI_FL8_SECONDARY_LITTLE 0xd9 /* [VIS] One 8-bit load/store floating, secondary, little endian */ 132 #define ASI_FL16_PRIMARY_LITTLE 0xda /* [VIS] One 16-bit load/store floating, primary, little endian */ 133 #define ASI_FL16_SECONDARY_LITTLE 0xdb /* [VIS] One 16-bit load/store floating, secondary, little endian */ 134 135 #define ASI_BLOCK_COMMIT_PRIMARY 0xe0 /* [4u] block store with commit, primary */ 136 #define ASI_BLOCK_COMMIT_SECONDARY 0xe1 /* [4u] block store with commit, secondary */ 137 #define ASI_BLOCK_PRIMARY 0xf0 /* [4u] block load/store, primary */ 138 #define ASI_BLOCK_SECONDARY 0xf1 /* [4u] block load/store, secondary */ 139 #define ASI_BLOCK_PRIMARY_LITTLE 0xf8 /* [4u] block load/store, primary, little endian */ 140 #define ASI_BLOCK_SECONDARY_LITTLE 0xf9 /* [4u] block load/store, secondary, little endian */ 141 142 143 /* 144 * These are the shorter names used by Solaris 145 */ 146 147 #define ASI_N ASI_NUCLEUS 148 #define ASI_NL ASI_NUCLEUS_LITTLE 149 #define ASI_AIUP ASI_AS_IF_USER_PRIMARY 150 #define ASI_AIUS ASI_AS_IF_USER_SECONDARY 151 #define ASI_AIUPL ASI_AS_IF_USER_PRIMARY_LITTLE 152 #define ASI_AIUSL ASI_AS_IF_USER_SECONDARY_LITTLE 153 #define ASI_P ASI_PRIMARY 154 #define ASI_S ASI_SECONDARY 155 #define ASI_PNF ASI_PRIMARY_NOFAULT 156 #define ASI_SNF ASI_SECONDARY_NOFAULT 157 #define ASI_PL ASI_PRIMARY_LITTLE 158 #define ASI_SL ASI_SECONDARY_LITTLE 159 #define ASI_PNFL ASI_PRIMARY_NOFAULT_LITTLE 160 #define ASI_SNFL ASI_SECONDARY_NOFAULT_LITTLE 161 #define ASI_FL8_P ASI_FL8_PRIMARY 162 #define ASI_FL8_S ASI_FL8_SECONDARY 163 #define ASI_FL16_P ASI_FL16_PRIMARY 164 #define ASI_FL16_S ASI_FL16_SECONDARY 165 #define ASI_FL8_PL ASI_FL8_PRIMARY_LITTLE 166 #define ASI_FL8_SL ASI_FL8_SECONDARY_LITTLE 167 #define ASI_FL16_PL ASI_FL16_PRIMARY_LITTLE 168 #define ASI_FL16_SL ASI_FL16_SECONDARY_LITTLE 169 #define ASI_BLK_AIUP ASI_BLOCK_AS_IF_USER_PRIMARY 170 #define ASI_BLK_AIUPL ASI_BLOCK_AS_IF_USER_PRIMARY_LITTLE 171 #define ASI_BLK_AIUS ASI_BLOCK_AS_IF_USER_SECONDARY 172 #define ASI_BLK_AIUSL ASI_BLOCK_AS_IF_USER_SECONDARY_LITTLE 173 #define ASI_BLK_COMMIT_P ASI_BLOCK_COMMIT_PRIMARY 174 #define ASI_BLK_COMMIT_PRIMARY ASI_BLOCK_COMMIT_PRIMARY 175 #define ASI_BLK_COMMIT_S ASI_BLOCK_COMMIT_SECONDARY 176 #define ASI_BLK_COMMIT_SECONDARY ASI_BLOCK_COMMIT_SECONDARY 177 #define ASI_BLK_P ASI_BLOCK_PRIMARY 178 #define ASI_BLK_PL ASI_BLOCK_PRIMARY_LITTLE 179 #define ASI_BLK_S ASI_BLOCK_SECONDARY 180 #define ASI_BLK_SL ASI_BLOCK_SECONDARY_LITTLE 181 182 /* Alternative spellings */ 183 #define ASI_PRIMARY_NO_FAULT ASI_PRIMARY_NOFAULT 184 #define ASI_PRIMARY_NO_FAULT_LITTLE ASI_PRIMARY_NOFAULT_LITTLE 185 #define ASI_SECONDARY_NO_FAULT ASI_SECONDARY_NOFAULT 186 #define ASI_SECONDARY_NO_FAULT_LITTLE ASI_SECONDARY_NOFAULT_LITTLE 187 188 #define PHYS_ASI(x) (((x) | 0x09) == 0x1d) 189 #define LITTLE_ASI(x) ((x) & ASI_LITTLE) 190 191 /* 192 * The following are 4u control registers 193 */ 194 195 /* Get the CPU's UPAID */ 196 #define UPA_CR_MID_SHIFT (17) 197 #define UPA_CR_MID_SIZE (5) 198 #define UPA_CR_MID_MASK \ 199 (((1 << UPA_CR_MID_SIZE) - 1) << UPA_CR_MID_SHIFT) 200 201 #define UPA_CR_MID(x) (((x)>>UPA_CR_MID_SHIFT)&((1 << UPA_CR_MID_SIZE) - 1)) 202 203 #ifdef _LOCORE 204 205 #define UPA_GET_MID(r1) \ 206 ldxa [%g0] ASI_MID_REG, r1 ; \ 207 srlx r1, UPA_CR_MID_SHIFT, r1 ; \ 208 and r1, (1 << UPA_CR_MID_SIZE) - 1, r1 209 210 #else 211 #define CPU_UPAID UPA_CR_MID(ldxa(0, ASI_MID_REG)) 212 #endif 213 214 /* 215 * [4u] MMU and Cache Control Register (MCCR) 216 * use ASI = 0x45 217 */ 218 #define ASI_MCCR ASI_LSU_CONTROL_REGISTER 219 #define MCCR 0x00 220 221 /* MCCR Bits and their meanings */ 222 #define MCCR_DMMU_EN 0x08 223 #define MCCR_IMMU_EN 0x04 224 #define MCCR_DCACHE_EN 0x02 225 #define MCCR_ICACHE_EN 0x01 226 #define MCCR_RAW_EN 0x400000000000 227 228 229 /* 230 * MMU control registers 231 */ 232 233 /* Choose an MMU */ 234 #define ASI_DMMU 0x58 235 #define ASI_IMMU 0x50 236 237 /* Other assorted MMU ASIs */ 238 #define ASI_IMMU_8KPTR 0x51 239 #define ASI_IMMU_64KPTR 0x52 240 #define ASI_IMMU_DATA_IN 0x54 241 #define ASI_IMMU_TLB_DATA 0x55 242 #define ASI_IMMU_TLB_TAG 0x56 243 #define ASI_DMMU_8KPTR 0x59 244 #define ASI_DMMU_64KPTR 0x5a 245 #define ASI_DMMU_DATA_IN 0x5c 246 #define ASI_DMMU_TLB_DATA 0x5d 247 #define ASI_DMMU_TLB_TAG 0x5e 248 249 /* 250 * The following are the control registers 251 * They work on both MMUs unless noted. 252 * III = cheetah only 253 * 254 * Register contents are defined later on individual registers. 255 */ 256 #define TSB_TAG_TARGET 0x0 257 #define TLB_DATA_IN 0x0 258 #define CTX_PRIMARY 0x08 /* primary context -- DMMU only */ 259 #define CTX_SECONDARY 0x10 /* secondary context -- DMMU only */ 260 #define SFSR 0x18 261 #define SFAR 0x20 /* fault address -- DMMU only */ 262 #define TSB 0x28 263 #define TLB_TAG_ACCESS 0x30 264 #define VIRTUAL_WATCHPOINT 0x38 265 #define PHYSICAL_WATCHPOINT 0x40 266 #define TSB_PEXT 0x48 /* III primary ext */ 267 #define TSB_SEXT 0x50 /* III 2ndary ext -- DMMU only */ 268 #define TSB_NEXT 0x58 /* III nucleus ext */ 269 270 /* Tag Target bits */ 271 #define TAG_TARGET_VA_MASK 0x03ffffffffffffffffLL 272 #define TAG_TARGET_VA(x) (((x)<<22)&TAG_TARGET_VA_MASK) 273 #define TAG_TARGET_CONTEXT(x) ((x)>>48) 274 #define TAG_TARGET(c,v) ((((uint64_t)c)<<48)|(((uint64_t)v)&TAG_TARGET_VA_MASK)) 275 276 /* SFSR bits for both D_SFSR and I_SFSR */ 277 #define SFSR_ASI(x) ((x)>>16) 278 #define SFSR_FT_VA_OOR_2 0x02000 /* IMMU: jumpl or return to unsupportd VA */ 279 #define SFSR_FT_VA_OOR_1 0x01000 /* fault at unsupported VA */ 280 #define SFSR_FT_NFO 0x00800 /* DMMU: Access to page marked NFO */ 281 #define SFSR_ILL_ASI 0x00400 /* DMMU: Illegal (unsupported) ASI */ 282 #define SFSR_FT_IO_ATOMIC 0x00200 /* DMMU: Atomic access to noncacheable page */ 283 #define SFSR_FT_ILL_NF 0x00100 /* DMMU: NF load or flush to page marked E (has side effects) */ 284 #define SFSR_FT_PRIV 0x00080 /* Privilege violation */ 285 #define SFSR_FT_E 0x00040 /* DMUU: value of E bit associated address */ 286 #define SFSR_CTXT(x) (((x)>>4)&0x3) 287 #define SFSR_CTXT_IS_PRIM(x) (SFSR_CTXT(x)==0x00) 288 #define SFSR_CTXT_IS_SECOND(x) (SFSR_CTXT(x)==0x01) 289 #define SFSR_CTXT_IS_NUCLEUS(x) (SFSR_CTXT(x)==0x02) 290 #define SFSR_PRIV 0x00008 /* value of PSTATE.PRIV for faulting access */ 291 #define SFSR_W 0x00004 /* DMMU: attempted write */ 292 #define SFSR_OW 0x00002 /* Overwrite; prev vault was still valid */ 293 #define SFSR_FV 0x00001 /* Fault is valid */ 294 #define SFSR_FT (SFSR_FT_VA_OOR_2|SFSR_FT_VA_OOR_1|SFSR_FT_NFO| \ 295 SFSR_ILL_ASI|SFSR_FT_IO_ATOMIC|SFSR_FT_ILL_NF|SFSR_FT_PRIV) 296 297 #define SFSR_BITS "\177\20" \ 298 "f\20\30ASI\0" "b\16VAT\0" "b\15VAD\0" "b\14NFO\0" "b\13ASI\0" "b\12A\0" \ 299 "b\11NF\0" "b\10PRIV\0" "b\7E\0" "b\6NUCLEUS\0" "b\5SECONDCTX\0" "b\4PRIV\0" \ 300 "b\3W\0" "b\2OW\0" "b\1FV\0" 301 302 /* ASFR bits */ 303 #define ASFR_ME 0x100000000LL 304 #define ASFR_PRIV 0x080000000LL 305 #define ASFR_ISAP 0x040000000LL 306 #define ASFR_ETP 0x020000000LL 307 #define ASFR_IVUE 0x010000000LL 308 #define ASFR_TO 0x008000000LL 309 #define ASFR_BERR 0x004000000LL 310 #define ASFR_LDP 0x002000000LL 311 #define ASFR_CP 0x001000000LL 312 #define ASFR_WP 0x000800000LL 313 #define ASFR_EDP 0x000400000LL 314 #define ASFR_UE 0x000200000LL 315 #define ASFR_CE 0x000100000LL 316 #define ASFR_ETS 0x0000f0000LL 317 #define ASFT_P_SYND 0x00000ffffLL 318 319 #define AFSR_BITS "\177\20" \ 320 "b\40ME\0" "b\37PRIV\0" "b\36ISAP\0" "b\35ETP\0" \ 321 "b\34IVUE\0" "b\33TO\0" "b\32BERR\0" "b\31LDP\0" \ 322 "b\30CP\0" "b\27WP\0" "b\26EDP\0" "b\25UE\0" \ 323 "b\24CE\0" "f\20\4ETS\0" "f\0\20P_SYND\0" 324 325 /* 326 * Here's the spitfire TSB control register bits. 327 * 328 * Each TSB entry is 16-bytes wide. The TSB must be size aligned 329 */ 330 #define TSB_SIZE_512 0x0 /* 8kB, etc. */ 331 #define TSB_SIZE_1K 0x01 332 #define TSB_SIZE_2K 0x02 333 #define TSB_SIZE_4K 0x03 334 #define TSB_SIZE_8K 0x04 335 #define TSB_SIZE_16K 0x05 336 #define TSB_SIZE_32K 0x06 337 #define TSB_SIZE_64K 0x07 338 #define TSB_SPLIT 0x1000 339 #define TSB_BASE 0xffffffffffffe000 340 341 /* TLB Tag Access bits */ 342 #define TLB_TAG_ACCESS_VA 0xffffffffffffe000 343 #define TLB_TAG_ACCESS_CTX 0x0000000000001fff 344 345 /* 346 * TLB demap registers. TTEs are defined in v9pte.h 347 * 348 * Use the address space to select between IMMU and DMMU. 349 * The address of the register selects which context register 350 * to read the ASI from. 351 * 352 * The data stored in the register is interpreted as the VA to 353 * use. The DEMAP_CTX_<> registers ignore the address and demap the 354 * entire ASI. 355 * 356 */ 357 #define ASI_IMMU_DEMAP 0x57 /* [4u] IMMU TLB demap */ 358 #define ASI_DMMU_DEMAP 0x5f /* [4u] IMMU TLB demap */ 359 360 #define DEMAP_PAGE_NUCLEUS ((0x02)<<4) /* Demap page from kernel AS */ 361 #define DEMAP_PAGE_PRIMARY ((0x00)<<4) /* Demap a page from primary CTXT */ 362 #define DEMAP_PAGE_SECONDARY ((0x01)<<4) /* Demap page from secondary CTXT (DMMU only) */ 363 #define DEMAP_CTX_NUCLEUS ((0x06)<<4) /* Demap all of kernel CTXT */ 364 #define DEMAP_CTX_PRIMARY ((0x04)<<4) /* Demap all of primary CTXT */ 365 #define DEMAP_CTX_SECONDARY ((0x05)<<4) /* Demap all of secondary CTXT */ 366 #define DEMAP_ALL ((0x08)<<4) /* Demap all non-locked TLB entries [USIII] */ 367 368 /* 369 * These define the sizes of the TLB in various CPUs. 370 * They're mostly not necessary except for diagnostic code. 371 */ 372 #define TLB_SIZE_SPITFIRE 64 373 #define TLB_SIZE_CHEETAH_I16 16 374 #define TLB_SIZE_CHEETAH_I128 128 375 #define TLB_SIZE_CHEETAH_D16 16 376 #define TLB_SIZE_CHEETAH_D512_0 512 377 #define TLB_SIZE_CHEETAH_D512_1 512 378 #define TLB_CHEETAH_I16 (0 << 16) 379 #define TLB_CHEETAH_I128 (2 << 16) 380 #define TLB_CHEETAH_D16 (0 << 16) 381 #define TLB_CHEETAH_D512_0 (2 << 16) 382 #define TLB_CHEETAH_D512_1 (3 << 16) 383 384 /* 385 * Interrupt registers. This really gets hairy. 386 */ 387 388 /* IRSR -- Interrupt Receive Status Ragister */ 389 #define ASI_IRSR 0x49 390 #define IRSR 0x00 391 #define IRSR_BUSY 0x020 392 #define IRSR_MID(x) (x&0x1f) 393 394 /* IRDR -- Interrupt Receive Data Registers */ 395 #define ASI_IRDR 0x7f 396 #define IRDR_0H 0x40 397 #define IRDR_0L 0x48 /* unimplemented */ 398 #define IRDR_1H 0x50 399 #define IRDR_1L 0x58 /* unimplemented */ 400 #define IRDR_2H 0x60 401 #define IRDR_2L 0x68 /* unimplemented */ 402 #define IRDR_3H 0x70 /* unimplemented */ 403 #define IRDR_3L 0x78 /* unimplemented */ 404 405 /* SOFTINT ASRs */ 406 #define SET_SOFTINT %asr20 /* Sets these bits */ 407 #define CLEAR_SOFTINT %asr21 /* Clears these bits */ 408 #define SOFTINT %asr22 /* Reads the register */ 409 #define TICK_CMPR %asr23 410 411 #define TICK_INT 0x01 /* level-14 clock tick */ 412 #define SOFTINT1 (0x1<<1) 413 #define SOFTINT2 (0x1<<2) 414 #define SOFTINT3 (0x1<<3) 415 #define SOFTINT4 (0x1<<4) 416 #define SOFTINT5 (0x1<<5) 417 #define SOFTINT6 (0x1<<6) 418 #define SOFTINT7 (0x1<<7) 419 #define SOFTINT8 (0x1<<8) 420 #define SOFTINT9 (0x1<<9) 421 #define SOFTINT10 (0x1<<10) 422 #define SOFTINT11 (0x1<<11) 423 #define SOFTINT12 (0x1<<12) 424 #define SOFTINT13 (0x1<<13) 425 #define SOFTINT14 (0x1<<14) 426 #define SOFTINT15 (0x1<<15) 427 428 /* Interrupt Dispatch -- usually reserved for cross-calls */ 429 #define ASR_IDSR 0x48 /* Interrupt dispatch status reg */ 430 #define IDSR 0x00 431 #define IDSR_NACK 0x02 432 #define IDSR_BUSY 0x01 433 434 #define ASI_INTERRUPT_DISPATCH 0x77 /* [4u] spitfire interrupt dispatch regs */ 435 436 /* Interrupt delivery initiation */ 437 #define IDCR(x) ((((uint64_t)(x)) << 14) | 0x70) 438 439 #define IDDR_0H 0x40 /* Store data to send in these regs */ 440 #define IDDR_0L 0x48 /* unimplemented */ 441 #define IDDR_1H 0x50 442 #define IDDR_1L 0x58 /* unimplemented */ 443 #define IDDR_2H 0x60 444 #define IDDR_2L 0x68 /* unimplemented */ 445 #define IDDR_3H 0x70 /* unimplemented */ 446 #define IDDR_3L 0x78 /* unimplemented */ 447 448 /* 449 * Error registers 450 */ 451 452 /* Since we won't try to fix async errs, we don't care about the bits in the regs */ 453 #define ASI_AFAR 0x4d /* Asynchronous fault address register */ 454 #define AFAR 0x00 455 #define ASI_AFSR 0x4c /* Asynchronous fault status register */ 456 #define AFSR 0x00 457 458 #define ASI_P_EER 0x4b /* Error enable register */ 459 #define P_EER 0x00 460 #define P_EER_ISAPEN 0x04 /* Enable fatal on ISAP */ 461 #define P_EER_NCEEN 0x02 /* Enable trap on uncorrectable errs */ 462 #define P_EER_CEEN 0x01 /* Enable trap on correctable errs */ 463 464 #define ASI_DATAPATH_READ 0x7f /* Read the regs */ 465 #define ASI_DATAPATH_WRITE 0x77 /* Write to the regs */ 466 #define P_DPER_0 0x00 /* Datapath err reg 0 */ 467 #define P_DPER_1 0x18 /* Datapath err reg 1 */ 468 #define P_DCR_0 0x20 /* Datapath control reg 0 */ 469 #define P_DCR_1 0x38 /* Datapath control reg 0 */ 470 471 472 /* From sparc64/asm.h which I think I'll deprecate since it makes bus.h a pain. */ 473 474 #ifndef _LOCORE 475 /* 476 * GCC __asm constructs for doing assembly stuff. 477 */ 478 479 /* 480 * ``Routines'' to load and store from/to alternate address space. 481 * The location can be a variable, the asi value (address space indicator) 482 * must be a constant. 483 * 484 * N.B.: You can put as many special functions here as you like, since 485 * they cost no kernel space or time if they are not used. 486 * 487 * These were static inline functions, but gcc screws up the constraints 488 * on the address space identifiers (the "n"umeric value part) because 489 * it inlines too late, so we have to use the funny valued-macro syntax. 490 */ 491 492 /* 493 * Apparently the definition of bypass ASIs is that they all use the 494 * D$ so we need to flush the D$ to make sure we don't get data pollution. 495 */ 496 497 #ifdef __arch64__ 498 499 /* 64-bit kernel, non-constant */ 500 #define SPARC64_LD_NONCONST(ld) \ 501 __asm volatile( \ 502 "wr %2,%%g0,%%asi; " \ 503 #ld " [%1]%%asi,%0 " \ 504 : "=r" (_v) \ 505 : "r" ((__uintptr_t)(loc)), "r" (asi)) 506 507 #if defined(__GNUC__) && defined(__OPTIMIZE__) 508 #define SPARC64_LD_DEF(ld, type, vtype) \ 509 static __inline type ld(paddr_t loc, int asi) \ 510 { \ 511 vtype _v; \ 512 if (__builtin_constant_p(asi)) \ 513 __asm volatile( \ 514 #ld " [%1]%2,%0 " \ 515 : "=r" (_v) \ 516 : "r" ((__uintptr_t)(loc)), "n" (asi)); \ 517 else \ 518 SPARC64_LD_NONCONST(ld); \ 519 return _v; \ 520 } 521 #else 522 #define SPARC64_LD_DEF(ld, type, vtype) \ 523 static __inline type ld(paddr_t loc, int asi) \ 524 { \ 525 vtype _v; \ 526 SPARC64_LD_NONCONST(ld); \ 527 return _v; \ 528 } 529 #endif 530 #define SPARC64_LD_DEF64(ld, type) SPARC64_LD_DEF(ld, type, uint64_t) 531 532 #else /* __arch64__ */ 533 534 /* 32-bit kernel, MMU bypass, non-constant */ 535 #define SPARC64_LD_PHYS_NONCONST(ld) \ 536 __asm volatile( \ 537 "clruw %2; " \ 538 "rdpr %%pstate,%1; " \ 539 "sllx %3,32,%0; " \ 540 "wrpr %1,8,%%pstate; " \ 541 "or %0,%2,%0; " \ 542 "wr %4,%%g0,%%asi; " \ 543 #ld " [%0]%%asi,%0; " \ 544 "wrpr %1,0,%%pstate " \ 545 : "=&r" (_v), "=&r" (_pstate) \ 546 : "r" ((uint32_t)(loc)), "r" (_hi), "r" (asi)) 547 /* 32-bit kernel, non-constant */ 548 #define SPARC64_LD_NONCONST(ld) \ 549 __asm volatile( \ 550 "wr %2,%%g0,%%asi; " \ 551 #ld " [%1]%%asi,%0 " \ 552 : "=&r" (_v) \ 553 : "r" ((uint32_t)(loc)), "r" (asi)) 554 /* 32-bit kernel, MMU bypass, non-constant, 64-bit value */ 555 #define SPARC64_LD_PHYS_NONCONST64(ld) \ 556 __asm volatile( \ 557 "clruw %2; " \ 558 "rdpr %%pstate,%1; " \ 559 "sllx %3,32,%0; " \ 560 "wrpr %1,8,%%pstate; " \ 561 "or %0,%2,%0; " \ 562 "wr %4,%%g0,%%asi; " \ 563 #ld " [%0]%%asi,%0; " \ 564 "wrpr %1,0,%%pstate; " \ 565 "srlx %0,32,%1; " \ 566 "srl %0,0,%0 " \ 567 : "=&r" (_vlo), "=&r" (_vhi) \ 568 : "r" ((uint32_t)(loc)), "r" (_hi), "r" (asi)) 569 /* 32-bit kernel, non-constant, 64-bit value */ 570 #define SPARC64_LD_NONCONST64(ld) \ 571 __asm volatile( \ 572 "wr %3,%%g0,%%asi; " \ 573 #ld " [%2]%%asi,%0; " \ 574 "srlx %0,32,%1; " \ 575 "srl %0,0,%0 " \ 576 : "=&r" (_vlo), "=&r" (_vhi) \ 577 : "r" ((uint32_t)(loc)), "r" (asi)) 578 579 #if defined(__GNUC__) && defined(__OPTIMIZE__) 580 #define SPARC64_LD_DEF(ld, type, vtype) \ 581 static __inline type ld(paddr_t loc, int asi) \ 582 { \ 583 vtype _v; \ 584 uint32_t _hi, _pstate; \ 585 if (PHYS_ASI(asi)) { \ 586 _hi = (uint64_t)(loc) >> 32; \ 587 if (__builtin_constant_p(asi)) \ 588 __asm volatile( \ 589 "clruw %2; " \ 590 "rdpr %%pstate,%1; " \ 591 "sllx %3,32,%0; " \ 592 "wrpr %1,8,%%pstate; " \ 593 "or %0,%2,%0; " \ 594 #ld " [%0]%4,%0; " \ 595 "wrpr %1,0,%%pstate; " \ 596 : "=&r" (_v), "=&r" (_pstate) \ 597 : "r" ((uint32_t)(loc)), "r" (_hi), \ 598 "n" (asi)); \ 599 else \ 600 SPARC64_LD_PHYS_NONCONST(ld); \ 601 } else { \ 602 if (__builtin_constant_p(asi)) \ 603 __asm volatile( \ 604 #ld " [%1]%2,%0 " \ 605 : "=&r" (_v) \ 606 : "r" ((uint32_t)(loc)), "n" (asi)); \ 607 else \ 608 SPARC64_LD_NONCONST(ld); \ 609 } \ 610 return _v; \ 611 } 612 #define SPARC64_LD_DEF64(ld, type) \ 613 static __inline type ld(paddr_t loc, int asi) \ 614 { \ 615 uint32_t _vlo, _vhi, _hi; \ 616 if (PHYS_ASI(asi)) { \ 617 _hi = (uint64_t)(loc) >> 32; \ 618 if (__builtin_constant_p(asi)) \ 619 __asm volatile( \ 620 "clruw %2; " \ 621 "rdpr %%pstate,%1; " \ 622 "sllx %3,32,%0; " \ 623 "wrpr %1,8,%%pstate; " \ 624 "or %0,%2,%0; " \ 625 #ld " [%0]%4,%0; " \ 626 "wrpr %1,0,%%pstate; " \ 627 "srlx %0,32,%1; " \ 628 "srl %0,0,%0 " \ 629 : "=&r" (_vlo), "=&r" (_vhi) \ 630 : "r" ((uint32_t)(loc)), "r" (_hi), \ 631 "n" (asi)); \ 632 else \ 633 SPARC64_LD_PHYS_NONCONST64(ld); \ 634 } else { \ 635 if (__builtin_constant_p(asi)) \ 636 __asm volatile( \ 637 #ld " [%2]%3,%0; " \ 638 "srlx %0,32,%1; " \ 639 "srl %0,0,%0 " \ 640 : "=&r" (_vlo), "=&r" (_vhi) \ 641 : "r" ((uint32_t)(loc)), "n" (asi)); \ 642 else \ 643 SPARC64_LD_NONCONST64(ld); \ 644 } \ 645 return ((uint64_t)_vhi << 32) | _vlo; \ 646 } 647 #else 648 #define SPARC64_LD_DEF(ld, type, vtype) \ 649 static __inline type ld(paddr_t loc, int asi) \ 650 { \ 651 vtype _v; \ 652 uint32_t _hi, _pstate; \ 653 if (PHYS_ASI(asi)) { \ 654 _hi = (uint64_t)(loc) >> 32; \ 655 SPARC64_LD_PHYS_NONCONST(ld); \ 656 } else \ 657 SPARC64_LD_NONCONST(ld); \ 658 return _v; \ 659 } 660 #define SPARC64_LD_DEF64(ld, type) \ 661 static __inline type ld(paddr_t loc, int asi) \ 662 { \ 663 uint32_t _vlo, _vhi, _hi; \ 664 if (PHYS_ASI(asi)) { \ 665 _hi = (uint64_t)(loc) >> 32; \ 666 SPARC64_LD_PHYS_NONCONST64(ld); \ 667 } else \ 668 SPARC64_LD_NONCONST64(ld); \ 669 return ((uint64_t)_vhi << 32) | _vlo; \ 670 } 671 #endif 672 673 #endif /* __arch64__ */ 674 675 /* load byte from alternate address space */ 676 SPARC64_LD_DEF(lduba, uint8_t, uint32_t) 677 /* load half-word from alternate address space */ 678 SPARC64_LD_DEF(lduha, uint16_t, uint32_t) 679 /* load unsigned int from alternate address space */ 680 SPARC64_LD_DEF(lda, uint32_t, uint32_t) 681 /* load signed int from alternate address space */ 682 SPARC64_LD_DEF(ldswa, int, int) 683 /* load 64-bit unsigned int from alternate address space */ 684 SPARC64_LD_DEF64(ldxa, uint64_t) 685 686 687 #ifdef __arch64__ 688 689 /* 64-bit kernel, non-constant */ 690 #define SPARC64_ST_NONCONST(st) \ 691 __asm volatile( \ 692 "wr %2,%%g0,%%asi; " \ 693 #st " %0,[%1]%%asi " \ 694 : : "r" (value), "r" ((__uintptr_t)(loc)), \ 695 "r" (asi)) 696 697 #if defined(__GNUC__) && defined(__OPTIMIZE__) 698 #define SPARC64_ST_DEF(st, type) \ 699 static __inline void st(paddr_t loc, int asi, type value) \ 700 { \ 701 if (__builtin_constant_p(asi)) \ 702 __asm volatile( \ 703 #st " %0,[%1]%2 " \ 704 : : "r" (value), "r" ((__uintptr_t)(loc)), \ 705 "n" (asi)); \ 706 else \ 707 SPARC64_ST_NONCONST(st); \ 708 } 709 #else 710 #define SPARC64_ST_DEF(st, type) \ 711 static __inline void st(paddr_t loc, int asi, type value) \ 712 { \ 713 SPARC64_ST_NONCONST(st); \ 714 } 715 #endif 716 #define SPARC64_ST_DEF64(st, type) SPARC64_ST_DEF(st, type) 717 718 #else /* __arch64__ */ 719 720 /* 32-bit kernel, MMU bypass, non-constant */ 721 #define SPARC64_ST_PHYS_NONCONST(st) \ 722 __asm volatile( \ 723 "clruw %3; " \ 724 "rdpr %%pstate,%1; " \ 725 "sllx %4,32,%0; " \ 726 "wrpr %1,8,%%pstate; " \ 727 "or %0,%3,%0; " \ 728 "wr %5,%%g0,%%asi; " \ 729 #st " %2,[%0]%%asi; " \ 730 "wrpr %1,0,%%pstate " \ 731 : "=&r" (_hi), "=&r" (_pstate) \ 732 : "r" (value), "r" ((uint32_t)(loc)), \ 733 "r" (_hi), "r" (asi)) 734 /* 32-bit kernel, non-constant */ 735 #define SPARC64_ST_NONCONST(st) \ 736 __asm volatile( \ 737 "wr %2,%%g0,%%asi; " \ 738 #st " %0,[%1]%%asi " \ 739 : : "r" (value), "r" ((uint32_t)(loc)), "r" (asi)) 740 /* 32-bit kernel, MMU bypass, non-constant, 64-bit value */ 741 #define SPARC64_ST_PHYS_NONCONST64(st) \ 742 __asm volatile( \ 743 "clruw %3; " \ 744 "clruw %5; " \ 745 "sllx %4,32,%1; " \ 746 "sllx %6,32,%0; " \ 747 "rdpr %%pstate,%2; " \ 748 "or %1,%3,%1; " \ 749 "wrpr %2,8,%%pstate; " \ 750 "or %0,%5,%0; " \ 751 "wr %7,%%g0,%%asi; " \ 752 #st " %1,[%0]%%asi; " \ 753 "wrpr %2,0,%%pstate " \ 754 : "=&r" (_hi), "=&r" (_vhi), "=&r" (_vlo) \ 755 : "r" (_vlo), "r" (_vhi), \ 756 "r" ((uint32_t)(loc)), "r" (_hi), "r" (asi)) 757 /* 32-bit kernel, non-constant, 64-bit value */ 758 #define SPARC64_ST_NONCONST64(st) \ 759 __asm volatile( \ 760 "clruw %1; " \ 761 "sllx %2,32,%0; " \ 762 "or %0,%1,%0; " \ 763 "wr %4,%%g0,%%asi; " \ 764 #st " %0,[%3]%%asi " \ 765 : "=&r" (_vhi) \ 766 : "r" (_vlo), "r" (_vhi), \ 767 "r" ((uint32_t)(loc)), "r" (asi)) 768 769 #if defined(__GNUC__) && defined(__OPTIMIZE__) 770 #define SPARC64_ST_DEF(st, type) \ 771 static __inline void st(paddr_t loc, int asi, type value) \ 772 { \ 773 uint32_t _hi, _pstate; \ 774 if (PHYS_ASI(asi)) { \ 775 _hi = (uint64_t)(loc) >> 32; \ 776 if (__builtin_constant_p(asi)) \ 777 __asm volatile( \ 778 "clruw %3; " \ 779 "sllx %4,32,%0; " \ 780 "rdpr %%pstate,%1; " \ 781 "or %0,%3,%0; " \ 782 "wrpr %1,8,%%pstate; " \ 783 #st " %2,[%0]%5; " \ 784 "wrpr %1,0,%%pstate " \ 785 : "=&r" (_hi), "=&r" (_pstate) \ 786 : "r" (value), "r" ((uint32_t)(loc)), \ 787 "r" (_hi), "n" (asi)); \ 788 else \ 789 SPARC64_ST_PHYS_NONCONST(st); \ 790 } else { \ 791 if (__builtin_constant_p(asi)) \ 792 __asm volatile( \ 793 #st " %0,[%1]%2 " \ 794 : : "r" (value), "r" ((uint32_t)(loc)), \ 795 "n" (asi)); \ 796 else \ 797 SPARC64_ST_NONCONST(st); \ 798 } \ 799 } 800 #define SPARC64_ST_DEF64(st, type) \ 801 static __inline void st(paddr_t loc, int asi, type value) \ 802 { \ 803 uint32_t _vlo, _vhi, _hi; \ 804 _vlo = value; \ 805 _vhi = (uint64_t)(value) >> 32; \ 806 if (PHYS_ASI(asi)) { \ 807 _hi = (uint64_t)(loc) >> 32; \ 808 if (__builtin_constant_p(asi)) \ 809 __asm volatile( \ 810 "clruw %3; " \ 811 "clruw %5; " \ 812 "sllx %4,32,%1; " \ 813 "sllx %6,32,%0; " \ 814 "rdpr %%pstate,%2; " \ 815 "or %1,%3,%1; " \ 816 "or %0,%5,%0; " \ 817 "wrpr %2,8,%%pstate; " \ 818 #st " %1,[%0]%7; " \ 819 "wrpr %2,0,%%pstate " \ 820 : "=&r" (_hi), "=&r" (_vhi), "=&r" (_vlo) \ 821 : "r" (_vlo), "r" (_vhi), \ 822 "r" ((uint32_t)(loc)), "r" (_hi), \ 823 "n" (asi)); \ 824 else \ 825 SPARC64_ST_PHYS_NONCONST64(st); \ 826 } else { \ 827 if (__builtin_constant_p(asi)) \ 828 __asm volatile( \ 829 "clruw %1; " \ 830 "sllx %2,32,%0; " \ 831 "or %0,%1,%0; " \ 832 #st " %0,[%3]%4 " \ 833 : "=&r" (_vhi) \ 834 : "r" (_vlo), "r" (_vhi), \ 835 "r" ((uint32_t)(loc)), "n" (asi)); \ 836 else \ 837 SPARC64_ST_NONCONST64(st); \ 838 } \ 839 } 840 #else 841 #define SPARC64_ST_DEF(st, type) \ 842 static __inline void st(paddr_t loc, int asi, type value) \ 843 { \ 844 uint32_t _hi, _pstate; \ 845 if (PHYS_ASI(asi)) { \ 846 _hi = (uint64_t)(loc) >> 32; \ 847 SPARC64_ST_PHYS_NONCONST(st); \ 848 } else \ 849 SPARC64_ST_NONCONST(st); \ 850 } 851 #define SPARC64_ST_DEF64(st, type) \ 852 static __inline void st(paddr_t loc, int asi, type value) \ 853 { \ 854 uint32_t _vlo, _vhi, _hi; \ 855 _vlo = value; \ 856 _vhi = (uint64_t)(value) >> 32; \ 857 if (PHYS_ASI(asi)) { \ 858 _hi = (uint64_t)(loc) >> 32; \ 859 SPARC64_ST_PHYS_NONCONST64(st); \ 860 } else \ 861 SPARC64_ST_NONCONST64(st); \ 862 } 863 #endif 864 865 #endif /* __arch64__ */ 866 867 /* store byte to alternate address space */ 868 SPARC64_ST_DEF(stba, uint8_t) 869 /* store half-word to alternate address space */ 870 SPARC64_ST_DEF(stha, uint16_t) 871 /* store unsigned int to alternate address space */ 872 SPARC64_ST_DEF(sta, uint32_t) 873 /* store 64-bit unsigned int to alternate address space */ 874 SPARC64_ST_DEF64(stxa, uint64_t) 875 876 877 /* set dmmu secondary context */ 878 static __inline void 879 dmmu_set_secondary_context(uint ctx) 880 { 881 __asm volatile( 882 "stxa %0,[%1]%2; " 883 "membar #Sync " 884 : : "r" (ctx), "r" (CTX_SECONDARY), "n" (ASI_DMMU) 885 : "memory"); 886 } 887 888 /* flush address from data cache */ 889 #define flush(loc) __asm volatile("flush %0" : : "r" ((__uintptr_t)(loc))) 890 891 /* 892 * SPARC V9 memory barrier instructions. 893 */ 894 /* Make all stores complete before next store */ 895 #define membar_storestore() __asm volatile("membar #StoreStore" : :) 896 /* Make all loads complete before next store */ 897 #define membar_loadstore() __asm volatile("membar #LoadStore" : :) 898 /* Make all stores complete before next load */ 899 #define membar_storeload() __asm volatile("membar #StoreLoad" : :) 900 /* Make all loads complete before next load */ 901 #define membar_loadload() __asm volatile("membar #LoadLoad" : :) 902 /* Complete all outstanding memory operations and exceptions */ 903 #define membar_sync() __asm volatile("membar #Sync" : :) 904 /* Complete all outstanding memory operations */ 905 #define membar_memissue() __asm volatile("membar #MemIssue" : :) 906 /* Complete all outstanding stores before any new loads */ 907 #define membar_lookaside() __asm volatile("membar #Lookaside" : :) 908 909 #define membar_load() __asm volatile("membar #LoadLoad | #LoadStore" : :) 910 #define membar_store() __asm volatile("membar #LoadStore | #StoreStore" : :) 911 912 #endif 913 914 #endif /* _SPARC_CTLREG_H_ */ 915