1 /* $NetBSD: mips_fixup.c,v 1.16 2016/07/11 19:17:55 skrll Exp $ */
2
3 /*-
4 * Copyright (c) 2010 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas of 3am Software Foundry.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: mips_fixup.c,v 1.16 2016/07/11 19:17:55 skrll Exp $");
34
35 #include "opt_mips3_wired.h"
36 #include "opt_multiprocessor.h"
37 #include <sys/param.h>
38
39 #include <uvm/uvm_extern.h>
40
41 #include <mips/locore.h>
42 #include <mips/cache.h>
43 #include <mips/mips3_pte.h>
44 #include <mips/regnum.h>
45 #include <mips/mips_opcode.h>
46
47 bool
mips_fixup_exceptions(mips_fixup_callback_t callback,void * arg)48 mips_fixup_exceptions(mips_fixup_callback_t callback, void *arg)
49 {
50 #if (MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0
51 int32_t ebase = mipsNN_cp0_ebase_read();
52 uint32_t *start;
53 if (ebase == mips_options.mips_cpu_id
54 || (ebase & __BITS(31,30)) != __BIT(31)) {
55 start = (uint32_t *)MIPS_KSEG0_START;
56 } else {
57 start = (uint32_t *)(intptr_t)(ebase & ~MIPS_EBASE_CPUNUM);
58 }
59 #else
60 uint32_t * const start = (uint32_t *)MIPS_KSEG0_START;
61 #endif
62 uint32_t * const end = start + (5 * 128) / sizeof(uint32_t);
63 const int32_t addr = (intptr_t)&cpu_info_store;
64 const size_t size = sizeof(cpu_info_store);
65 uint32_t new_insns[2];
66 uint32_t *lui_insnp = NULL;
67 int32_t lui_offset = 0;
68 bool fixed = false;
69 size_t lui_reg = 0;
70 #ifdef DEBUG_VERBOSE
71 printf("%s: fixing %p..%p\n", __func__, start, end);
72 #endif
73 /*
74 * If this was allocated so that bit 15 of the value/address is 1, then
75 * %hi will add 1 to the immediate (or 0x10000 to the value loaded)
76 * to compensate for using a negative offset for the lower half of
77 * the value.
78 */
79 const int32_t upper_start = (addr + 32768) & ~0xffff;
80 const int32_t upper_end = (addr + size - 1 + 32768) & ~0xffff;
81
82 #ifndef MIPS64_OCTEON
83 KASSERT((addr & ~0xfff) == ((addr + size - 1) & ~0xfff));
84 #endif
85
86 uint32_t lui_insn = 0;
87 for (uint32_t *insnp = start; insnp < end; insnp++) {
88 const uint32_t insn = *insnp;
89 if (INSN_LUI_P(insn)) {
90 const int32_t offset = insn << 16;
91 lui_reg = (insn >> 16) & 31;
92 #ifdef DEBUG_VERBOSE
93 printf("%s: %#x: insn %08x: lui r%zu, %%hi(%#x)",
94 __func__, (int32_t)(intptr_t)insnp,
95 insn, lui_reg, offset);
96 #endif
97 KASSERT(lui_reg == _R_K0 || lui_reg == _R_K1);
98 if (upper_start == offset || upper_end == offset) {
99 lui_insnp = insnp;
100 lui_insn = insn;
101 lui_offset = offset;
102 #ifdef DEBUG_VERBOSE
103 printf(" (maybe)");
104 #endif
105 } else {
106 lui_insnp = NULL;
107 lui_insn = 0;
108 lui_offset = 0;
109 }
110 #ifdef DEBUG_VERBOSE
111 printf("\n");
112 #endif
113 } else if (lui_insn != 0
114 && (INSN_LOAD_P(insn) || INSN_STORE_P(insn))) {
115 size_t base = (insn >> 21) & 31;
116 #if defined(DIAGNOSTIC) || defined(DEBUG_VERBOSE)
117 size_t rt = (insn >> 16) & 31;
118 #endif
119 int32_t load_addr = lui_offset + (int16_t)insn;
120 if (addr <= load_addr
121 && load_addr < addr + size
122 && base == lui_reg) {
123 KASSERT(rt == _R_K0 || rt == _R_K1);
124 #ifdef DEBUG_VERBOSE
125 printf("%s: %#x: insn %08x: %s r%zu, %%lo(%08x)(r%zu)\n",
126 __func__, (int32_t)(intptr_t)insnp,
127 insn,
128 INSN_LOAD_P(insn)
129 ? INSN_LW_P(insn) ? "lw" : "ld"
130 : INSN_SW_P(insn) ? "sw" : "sd",
131 rt, load_addr, base);
132 #endif
133 new_insns[0] = lui_insn;
134 new_insns[1] = *insnp;
135 if ((callback)(load_addr, new_insns, arg)) {
136 if (lui_insnp) {
137 *lui_insnp = new_insns[0];
138 *insnp = new_insns[1];
139 } else if (new_insns[1] == 0) {
140 *insnp = new_insns[0];
141 } else {
142 *insnp = new_insns[1];
143 }
144 fixed = true;
145 }
146 lui_insnp = NULL;
147 }
148 }
149 }
150
151 if (fixed)
152 mips_icache_sync_range((vaddr_t)start,
153 sizeof(start[0]) * (end - start));
154
155 return fixed;
156 }
157
158 #ifdef MIPS3_PLUS
159 bool
mips_fixup_zero_relative(int32_t load_addr,uint32_t new_insns[2],void * arg)160 mips_fixup_zero_relative(int32_t load_addr, uint32_t new_insns[2], void *arg)
161 {
162 struct cpu_info * const ci = curcpu();
163 struct pmap_tlb_info * const ti = ci->ci_tlb_info;
164
165 KASSERT(MIPS_KSEG0_P(load_addr));
166 KASSERT(!MIPS_CACHE_VIRTUAL_ALIAS);
167 #ifdef MULTIPROCESSOR
168 KASSERT(CPU_IS_PRIMARY(ci));
169 #endif
170 KASSERT((intptr_t)ci <= load_addr);
171 KASSERT(load_addr < (intptr_t)(ci + 1));
172 KASSERT(MIPS_HAS_R4K_MMU);
173
174 /*
175 * Use the load instruction as a prototype and it make use $0
176 * as base and the new negative offset. The second instruction
177 * is a NOP.
178 */
179 new_insns[0] =
180 (new_insns[1] & (0xfc1f0000|PAGE_MASK)) | (0xffff & ~PAGE_MASK);
181 new_insns[1] = 0;
182 #ifdef DEBUG_VERBOSE
183 printf("%s: %08x: insn#1 %08x: %s r%u, %d(r%u)\n",
184 __func__, (int32_t)load_addr, new_insns[0],
185 INSN_LOAD_P(new_insns[0])
186 ? INSN_LW_P(new_insns[0]) ? "lw" : "ld"
187 : INSN_LW_P(new_insns[0]) ? "sw" : "sd",
188 (new_insns[0] >> 16) & 31,
189 (int16_t)new_insns[0],
190 (new_insns[0] >> 21) & 31);
191 #endif
192 /*
193 * Construct the TLB_LO entry needed to map cpu_info_store.
194 */
195
196 /*
197 * Now allocate a TLB entry in the primary TLB for the mapping and
198 * enter the mapping into the TLB.
199 */
200 TLBINFO_LOCK(ti);
201 if (ci->ci_tlb_slot < 0) {
202 uint32_t tlb_lo = MIPS3_PG_G|MIPS3_PG_V|MIPS3_PG_D
203 | mips3_paddr_to_tlbpfn(MIPS_KSEG0_TO_PHYS(trunc_page(load_addr)));
204 struct tlbmask tlbmask = {
205 .tlb_hi = -PAGE_SIZE | KERNEL_PID,
206 #if PGSHIFT & 1
207 .tlb_lo1 = tlb_lo,
208 .tlb_lo1 = tlb_lo + MIPS3_PG_NEXT,
209 #else
210 .tlb_lo0 = 0,
211 .tlb_lo1 = tlb_lo,
212 #endif
213 .tlb_mask = -1,
214 };
215 ci->ci_tlb_slot = ti->ti_wired++;
216 mips3_cp0_wired_write(ti->ti_wired);
217 tlb_invalidate_addr(-PAGE_SIZE, KERNEL_PID);
218 tlb_write_entry(ci->ci_tlb_slot, &tlbmask);
219 }
220 TLBINFO_UNLOCK(ti);
221
222 return true;
223 }
224 #endif /* MIPS3_PLUS */
225
226 #define OPCODE_J 002
227 #define OPCODE_JAL 003
228
229 static inline void
fixup_mips_jump(uint32_t * insnp,const struct mips_jump_fixup_info * jfi)230 fixup_mips_jump(uint32_t *insnp, const struct mips_jump_fixup_info *jfi)
231 {
232 uint32_t insn = *insnp;
233
234 KASSERT((insn >> (26+1)) == (OPCODE_J >> 1));
235 KASSERT((insn << 6) == (jfi->jfi_stub << 6));
236
237 insn ^= (jfi->jfi_stub ^ jfi->jfi_real);
238
239 KASSERT((insn << 6) == (jfi->jfi_real << 6));
240
241 #ifdef DEBUG
242 #if 0
243 int32_t va = ((intptr_t) insnp >> 26) << 26;
244 printf("%s: %08x: [%08x] %s %08x -> [%08x] %s %08x\n",
245 __func__, (int32_t)(intptr_t)insnp,
246 insn, opcode == OPCODE_J ? "j" : "jal",
247 va | (jfi->jfo_stub << 2),
248 *insnp, opcode == OPCODE_J ? "j" : "jal",
249 va | (jfi->jfi_real << 2));
250 #endif
251 #endif
252 *insnp = insn;
253 }
254
255 intptr_t
mips_fixup_addr(const uint32_t * stubp)256 mips_fixup_addr(const uint32_t *stubp)
257 {
258 /*
259 * Stubs typically look like:
260 * lui v0, %hi(sym)
261 * lX t9, %lo(sym)(v0)
262 * [nop]
263 * jr t9
264 * nop
265 *
266 * Or for loongson2 (
267 * lui v0, %hi(sym)
268 * lX t9, %lo(sym)(v0)
269 * lui at,0xcfff
270 * ori at,at,0xffff
271 * and t9,t9,at
272 * jr t9
273 * move at,at
274 * or:
275 * lui v0, %hi(sym)
276 * lX t9, %lo(sym)(v0)
277 * li at, 0x3
278 * dmtc0 at, $22
279 * jr t9
280 * nop
281 */
282 mips_reg_t regs[32];
283 uint32_t used = 1 |__BIT(_R_A0)|__BIT(_R_A1)|__BIT(_R_A2)|__BIT(_R_A3);
284 size_t n;
285 const char *errstr = "mips";
286 /*
287 * This is basically a small MIPS emulator for those instructions
288 * that might be in a stub routine.
289 */
290 for (n = 0; n < 16; n++) {
291 const InstFmt insn = { .word = stubp[n] };
292 switch (insn.IType.op) {
293 case OP_LUI:
294 regs[insn.IType.rt] = (int16_t)insn.IType.imm << 16;
295 used |= (1 << insn.IType.rt);
296 break;
297 #ifdef _LP64
298 case OP_LD:
299 if ((used & (1 << insn.IType.rs)) == 0) {
300 errstr = "LD";
301 goto out;
302 }
303 regs[insn.IType.rt] = *(const int64_t *)
304 (regs[insn.IType.rs] + (int16_t)insn.IType.imm);
305 used |= (1 << insn.IType.rt);
306 break;
307 case OP_SD:
308 if (insn.IType.rt != _R_RA || insn.IType.rs != _R_SP) {
309 errstr = "SD";
310 goto out;
311 }
312 break;
313 #else
314 case OP_LW:
315 if ((used & (1 << insn.IType.rs)) == 0) {
316 errstr = "LW";
317 goto out;
318 }
319 regs[insn.IType.rt] = *(const int32_t *)
320 ((intptr_t)regs[insn.IType.rs]
321 + (int16_t)insn.IType.imm);
322 used |= (1 << insn.IType.rt);
323 break;
324 case OP_SW:
325 if (insn.IType.rt != _R_RA || insn.IType.rs != _R_SP) {
326 errstr = "SW";
327 goto out;
328 }
329 break;
330 #endif
331 case OP_ORI:
332 if ((used & (1 << insn.IType.rs)) == 0) {
333 errstr = "ORI";
334 goto out;
335 }
336 regs[insn.IType.rt] |= insn.IType.imm;
337 used |= (1 << insn.IType.rt);
338 break;
339 case OP_COP0:
340 switch (insn.RType.rs) {
341 case OP_DMT:
342 if (insn.RType.rd != 22) {
343 errstr = "dmtc0 dst";
344 goto out;
345 }
346 if ((used & (1 << insn.RType.rt)) == 0) {
347 errstr = "dmtc0 src";
348 goto out;
349 }
350 break;
351 default:
352 errstr = "COP0";
353 goto out;
354 }
355 break;
356 case OP_SPECIAL:
357 switch (insn.RType.func) {
358 case OP_JALR:
359 case OP_JR:
360 if ((used & (1 << insn.RType.rs)) == 0) {
361 errstr = "JR";
362 goto out;
363 }
364 if (stubp[n+1] != 0
365 && (stubp[n+1] & 0xfff0003c) != 0x0000003c
366 && stubp[n+1] != 0x00200825) {
367 n++;
368 errstr = "delay slot";
369 goto out;
370 }
371 return regs[insn.RType.rs];
372 case OP_AND:
373 if ((used & (1 << insn.RType.rs)) == 0
374 || (used & (1 << insn.RType.rt)) == 0) {
375 errstr = "AND";
376 goto out;
377 }
378 regs[insn.RType.rd] =
379 regs[insn.RType.rs] & regs[insn.RType.rt];
380 used |= (1 << insn.RType.rd);
381 break;
382 #if !defined(__mips_o32)
383 case OP_DSLL32: /* force to 32-bits */
384 case OP_DSRA32: /* force to 32-bits */
385 if (regs[insn.RType.rd] != regs[insn.RType.rt]
386 || (used & (1 << insn.RType.rt)) == 0
387 || regs[insn.RType.shamt] != 0) {
388 errstr = "AND";
389 goto out;
390 }
391 break;
392 #endif
393 case OP_SLL: /* nop */
394 if (insn.RType.rd != _R_ZERO) {
395 errstr = "NOP";
396 goto out;
397 }
398 break;
399 case OP_DSLL:
400 default:
401 errstr = "SPECIAL";
402 goto out;
403 }
404 break;
405 default:
406 errstr = "mips";
407 goto out;
408 }
409 }
410
411 out:
412 printf("%s: unexpected %s insn %#x at %p\n",
413 __func__, errstr,
414 stubp[n], &stubp[n]);
415 return 0;
416 }
417
418 void
mips_fixup_stubs(uint32_t * start,uint32_t * end)419 mips_fixup_stubs(uint32_t *start, uint32_t *end)
420 {
421 #ifdef DEBUG
422 size_t fixups_done = 0;
423 uint32_t cycles =
424 #if (MIPS3 + MIPS4 + MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0
425 (CPUISMIPS3 ? mips3_cp0_count_read() : 0);
426 #else
427 0;
428 #endif
429 #endif
430 extern uint32_t __stub_start[], __stub_end[];
431
432 KASSERT(MIPS_KSEG0_P(start));
433 KASSERT(MIPS_KSEG0_P(end));
434 KASSERT(MIPS_KSEG0_START == (((intptr_t)start >> 28) << 28));
435
436 if (end > __stub_start)
437 end = __stub_start;
438
439 for (uint32_t *insnp = start; insnp < end; insnp++) {
440 uint32_t insn = *insnp;
441 uint32_t offset = insn & 0x03ffffff;
442 uint32_t opcode = insn >> 26;
443 const uint32_t * const stubp =
444 &((uint32_t *)(((intptr_t)insnp >> 28) << 28))[offset];
445
446 /*
447 * First we check to see if this is a jump and whether it is
448 * within the range we are interested in.
449 */
450 if ((opcode != OPCODE_J && opcode != OPCODE_JAL)
451 || stubp < __stub_start || __stub_end <= stubp)
452 continue;
453
454 const intptr_t real_addr = mips_fixup_addr(stubp);
455
456 /*
457 * If the real_addr has been set yet, don't fix up.
458 */
459 if (real_addr == 0) {
460 continue;
461 }
462 /*
463 * Verify the real destination is in the same 256MB
464 * as the location of the jump instruction.
465 */
466 KASSERT((real_addr >> 28) == ((intptr_t)insnp >> 28));
467
468 /*
469 * Now fix it up. Replace the old displacement to the stub
470 * with the real displacement.
471 */
472 struct mips_jump_fixup_info fixup = {
473 .jfi_stub = fixup_addr2offset(stubp),
474 .jfi_real = fixup_addr2offset(real_addr),
475 };
476
477 fixup_mips_jump(insnp, &fixup);
478 #ifdef DEBUG
479 fixups_done++;
480 #endif
481 }
482
483 if (sizeof(uint32_t [end - start]) > mips_cache_info.mci_picache_size)
484 mips_icache_sync_all();
485 else
486 mips_icache_sync_range((vaddr_t)start,
487 sizeof(uint32_t [end - start]));
488
489 #ifdef DEBUG
490 #if (MIPS3 + MIPS4 + MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0
491 if (CPUISMIPS3)
492 cycles = mips3_cp0_count_read() - cycles;
493 #endif
494 printf("%s: %zu fixup%s done in %u cycles\n", __func__,
495 fixups_done, fixups_done == 1 ? "" : "s",
496 cycles);
497 #endif
498 }
499
500 #define __stub __section(".stub")
501
502 void mips_cpu_switch_resume(struct lwp *) __stub;
503 tlb_asid_t
504 tlb_get_asid(void) __stub;
505 void tlb_set_asid(uint32_t) __stub;
506 void tlb_invalidate_all(void) __stub;
507 void tlb_invalidate_globals(void) __stub;
508 void tlb_invalidate_asids(uint32_t, uint32_t) __stub;
509 void tlb_invalidate_addr(vaddr_t, tlb_asid_t) __stub;
510 u_int tlb_record_asids(u_long *, uint32_t) __stub;
511 bool tlb_update_addr(vaddr_t, tlb_asid_t, pt_entry_t, bool)
512 __stub;
513 void tlb_read_entry(size_t, struct tlbmask *) __stub;
514 void tlb_write_entry(size_t, const struct tlbmask *) __stub;
515
516 /*
517 * wbflush isn't a stub since it gets overridden quite late
518 * (after mips_vector_init returns).
519 */
520 void wbflush(void) /*__stub*/;
521
522 void
mips_cpu_switch_resume(struct lwp * l)523 mips_cpu_switch_resume(struct lwp *l)
524 {
525 (*mips_locore_jumpvec.ljv_cpu_switch_resume)(l);
526 }
527
528 tlb_asid_t
tlb_get_asid(void)529 tlb_get_asid(void)
530 {
531 return (*mips_locore_jumpvec.ljv_tlb_get_asid)();
532 }
533
534 void
tlb_set_asid(uint32_t asid)535 tlb_set_asid(uint32_t asid)
536 {
537 (*mips_locore_jumpvec.ljv_tlb_set_asid)(asid);
538 }
539
540 void
tlb_invalidate_all(void)541 tlb_invalidate_all(void)
542 {
543 (*mips_locore_jumpvec.ljv_tlb_invalidate_all)();
544 }
545
546 void
tlb_invalidate_addr(vaddr_t va,tlb_asid_t asid)547 tlb_invalidate_addr(vaddr_t va, tlb_asid_t asid)
548 {
549 (*mips_locore_jumpvec.ljv_tlb_invalidate_addr)(va, asid);
550 }
551
552 void
tlb_invalidate_globals(void)553 tlb_invalidate_globals(void)
554 {
555 (*mips_locore_jumpvec.ljv_tlb_invalidate_globals)();
556 }
557
558 void
tlb_invalidate_asids(uint32_t asid_lo,uint32_t asid_hi)559 tlb_invalidate_asids(uint32_t asid_lo, uint32_t asid_hi)
560 {
561 (*mips_locore_jumpvec.ljv_tlb_invalidate_asids)(asid_lo, asid_hi);
562 }
563
564 u_int
tlb_record_asids(u_long * bitmap,tlb_asid_t asid_max)565 tlb_record_asids(u_long *bitmap, tlb_asid_t asid_max)
566 {
567 return (*mips_locore_jumpvec.ljv_tlb_record_asids)(bitmap, asid_max);
568 }
569
570 #if 0
571 bool
572 tlb_update_addr(vaddr_t va, tlb_asid_t asid, pt_entry_t pte, bool insert)
573 {
574 return (*mips_locore_jumpvec.ljv_tlb_update_addr)(va, asid, pte, insert);
575 }
576 #endif
577
578 void
tlb_read_entry(size_t tlbno,struct tlbmask * tlb)579 tlb_read_entry(size_t tlbno, struct tlbmask *tlb)
580 {
581 (*mips_locore_jumpvec.ljv_tlb_read_entry)(tlbno, tlb);
582 }
583
584 void
tlb_write_entry(size_t tlbno,const struct tlbmask * tlb)585 tlb_write_entry(size_t tlbno, const struct tlbmask *tlb)
586 {
587 (*mips_locore_jumpvec.ljv_tlb_write_entry)(tlbno, tlb);
588 }
589
590 void
wbflush(void)591 wbflush(void)
592 {
593 (*mips_locoresw.lsw_wbflush)();
594 }
595
596 #ifndef LOCKDEBUG
597 void mutex_enter(kmutex_t *mtx) __stub;
598 void mutex_exit(kmutex_t *mtx) __stub;
599 void mutex_spin_enter(kmutex_t *mtx) __stub;
600 void mutex_spin_exit(kmutex_t *mtx) __stub;
601
602 void
mutex_enter(kmutex_t * mtx)603 mutex_enter(kmutex_t *mtx)
604 {
605
606 (*mips_locore_atomicvec.lav_mutex_enter)(mtx);
607 }
608
609 void
mutex_exit(kmutex_t * mtx)610 mutex_exit(kmutex_t *mtx)
611 {
612
613 (*mips_locore_atomicvec.lav_mutex_exit)(mtx);
614 }
615
616 void
mutex_spin_enter(kmutex_t * mtx)617 mutex_spin_enter(kmutex_t *mtx)
618 {
619
620 (*mips_locore_atomicvec.lav_mutex_spin_enter)(mtx);
621 }
622
623 void
mutex_spin_exit(kmutex_t * mtx)624 mutex_spin_exit(kmutex_t *mtx)
625 {
626
627 (*mips_locore_atomicvec.lav_mutex_spin_exit)(mtx);
628 }
629 #endif /* !LOCKDEBUG */
630
631 u_int _atomic_cas_uint(volatile u_int *, u_int, u_int) __stub;
632 u_long _atomic_cas_ulong(volatile u_long *, u_long, u_long) __stub;
633
634 u_int
_atomic_cas_uint(volatile u_int * ptr,u_int old,u_int new)635 _atomic_cas_uint(volatile u_int *ptr, u_int old, u_int new)
636 {
637
638 return (*mips_locore_atomicvec.lav_atomic_cas_uint)(ptr, old, new);
639 }
640
641 u_long
_atomic_cas_ulong(volatile u_long * ptr,u_long old,u_long new)642 _atomic_cas_ulong(volatile u_long *ptr, u_long old, u_long new)
643 {
644
645 return (*mips_locore_atomicvec.lav_atomic_cas_ulong)(ptr, old, new);
646 }
647
648 __strong_alias(atomic_cas_uint, _atomic_cas_uint)
649 __strong_alias(atomic_cas_uint_ni, _atomic_cas_uint)
650 __strong_alias(_atomic_cas_32, _atomic_cas_uint)
651 __strong_alias(_atomic_cas_32_ni, _atomic_cas_uint)
652 __strong_alias(atomic_cas_32, _atomic_cas_uint)
653 __strong_alias(atomic_cas_32_ni, _atomic_cas_uint)
654 __strong_alias(atomic_cas_ptr, _atomic_cas_ulong)
655 __strong_alias(atomic_cas_ptr_ni, _atomic_cas_ulong)
656 __strong_alias(atomic_cas_ulong, _atomic_cas_ulong)
657 __strong_alias(atomic_cas_ulong_ni, _atomic_cas_ulong)
658 #ifdef _LP64
659 __strong_alias(atomic_cas_64, _atomic_cas_ulong)
660 __strong_alias(atomic_cas_64_ni, _atomic_cas_ulong)
661 __strong_alias(_atomic_cas_64, _atomic_cas_ulong)
662 __strong_alias(_atomic_cas_64_ni, _atomic_cas_ulong)
663 #endif
664
665 int ucas_uint(volatile u_int *, u_int, u_int, u_int *) __stub;
666 int ucas_ulong(volatile u_long *, u_long, u_long, u_long *) __stub;
667
668 int
ucas_uint(volatile u_int * ptr,u_int old,u_int new,u_int * retp)669 ucas_uint(volatile u_int *ptr, u_int old, u_int new, u_int *retp)
670 {
671
672 return (*mips_locore_atomicvec.lav_ucas_uint)(ptr, old, new, retp);
673 }
674 __strong_alias(ucas_32, ucas_uint);
675 __strong_alias(ucas_int, ucas_uint);
676
677 int
ucas_ulong(volatile u_long * ptr,u_long old,u_long new,u_long * retp)678 ucas_ulong(volatile u_long *ptr, u_long old, u_long new, u_long *retp)
679 {
680
681 return (*mips_locore_atomicvec.lav_ucas_ulong)(ptr, old, new, retp);
682 }
683 __strong_alias(ucas_ptr, ucas_ulong);
684 __strong_alias(ucas_long, ucas_ulong);
685 #ifdef _LP64
686 __strong_alias(ucas_64, ucas_ulong);
687 #endif
688