18a272653SPeter Holm#!/bin/sh
28a272653SPeter Holm
38a272653SPeter Holm#
48a272653SPeter Holm# Copyright (c) 2015 EMC Corp.
58a272653SPeter Holm# All rights reserved.
68a272653SPeter Holm#
78a272653SPeter Holm# Redistribution and use in source and binary forms, with or without
88a272653SPeter Holm# modification, are permitted provided that the following conditions
98a272653SPeter Holm# are met:
108a272653SPeter Holm# 1. Redistributions of source code must retain the above copyright
118a272653SPeter Holm#    notice, this list of conditions and the following disclaimer.
128a272653SPeter Holm# 2. Redistributions in binary form must reproduce the above copyright
138a272653SPeter Holm#    notice, this list of conditions and the following disclaimer in the
148a272653SPeter Holm#    documentation and/or other materials provided with the distribution.
158a272653SPeter Holm#
168a272653SPeter Holm# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
178a272653SPeter Holm# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
188a272653SPeter Holm# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
198a272653SPeter Holm# ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
208a272653SPeter Holm# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
218a272653SPeter Holm# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
228a272653SPeter Holm# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
238a272653SPeter Holm# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
248a272653SPeter Holm# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
258a272653SPeter Holm# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
268a272653SPeter Holm# SUCH DAMAGE.
278a272653SPeter Holm#
288a272653SPeter Holm
298a272653SPeter Holm# Bug 198163 - Kernel panic in vm_reserv_populate()
308a272653SPeter Holm# Test scenario by: ikosarev@accesssoftek.com
318a272653SPeter Holm# http://people.freebsd.org/~pho/stress/log/kostik771.txt
328a272653SPeter Holm# Fixed by r280238
338a272653SPeter Holm
348a272653SPeter Holm. ../default.cfg
358a272653SPeter Holm
368a272653SPeter Holmuname -a | egrep -q "i386|amd64" || exit 0
378a272653SPeter Holmodir=`pwd`
388a272653SPeter Holmcd /tmp
398a272653SPeter Holmsed '1,/^EOF/d' < $odir/$0 > vm_reserv_populate.cc
408a272653SPeter Holmrm -f /tmp/vm_reserv_populate
418a272653SPeter Holmmycc -o vm_reserv_populate -Wall -Wextra -g -O2 vm_reserv_populate.cc ||
428a272653SPeter Holm    exit 1
438a272653SPeter Holmrm -f vm_reserv_populate.cc
448a272653SPeter Holm
458a272653SPeter Holm(cd $odir/../testcases/swap; ./swap -t 5m -i 20 -h -l 100 > /dev/null) &
468a272653SPeter Holm./vm_reserv_populate
478a272653SPeter Holmwhile pgrep -q swap; do
488a272653SPeter Holm	pkill -9 swap
498a272653SPeter Holmdone
508a272653SPeter Holmrm vm_reserv_populate
518a272653SPeter Holmexit 0
528a272653SPeter HolmEOF
538a272653SPeter Holm#include <pthread.h>
548a272653SPeter Holm#include <unistd.h>
558a272653SPeter Holm#include <stdio.h>
568a272653SPeter Holm#include <stdlib.h>
578a272653SPeter Holm#include <sys/types.h>
588a272653SPeter Holm#include <sys/wait.h>
598a272653SPeter Holm#include <sys/time.h>
608a272653SPeter Holm#include <signal.h>
618a272653SPeter Holm#include <errno.h>
628a272653SPeter Holm#include <stdarg.h>
638a272653SPeter Holm#include <string.h>
648a272653SPeter Holm#include <assert.h>
658a272653SPeter Holm
668a272653SPeter Holm#include <sys/syscall.h>
678a272653SPeter Holm#include <sys/mman.h>
688a272653SPeter Holm#include <sys/types.h>
698a272653SPeter Holm#include <sys/stat.h>
708a272653SPeter Holm#include <fcntl.h>
718a272653SPeter Holm
728a272653SPeter Holm#define INLINE inline
738a272653SPeter Holm#define NOINLINE __attribute__((noinline))
748a272653SPeter Holm#define SYSCALL(name) SYS_ ## name
758a272653SPeter Holm#define internal_syscall __syscall
768a272653SPeter Holm
778a272653SPeter Holmtypedef unsigned char u8;
788a272653SPeter Holmtypedef unsigned int u32;
798a272653SPeter Holmtypedef unsigned long long u64;
808a272653SPeter Holmtypedef unsigned long uptr;
818a272653SPeter Holm
828a272653SPeter Holmstruct atomic_uint32_t {
838a272653SPeter Holm  typedef u32 Type;
848a272653SPeter Holm  volatile Type val_dont_use;
858a272653SPeter Holm};
868a272653SPeter Holm
878a272653SPeter Holmstruct atomic_uintptr_t {
888a272653SPeter Holm  typedef uptr Type;
898a272653SPeter Holm  volatile Type val_dont_use;
908a272653SPeter Holm};
918a272653SPeter Holm
928a272653SPeter Holmuptr internal_sched_yield() {
938a272653SPeter Holm  return internal_syscall(SYSCALL(sched_yield));
948a272653SPeter Holm}
958a272653SPeter Holm
968a272653SPeter Holmenum memory_order {
978a272653SPeter Holm  memory_order_relaxed = 1 << 0,
988a272653SPeter Holm  memory_order_consume = 1 << 1,
998a272653SPeter Holm  memory_order_acquire = 1 << 2,
1008a272653SPeter Holm  memory_order_release = 1 << 3,
1018a272653SPeter Holm  memory_order_acq_rel = 1 << 4,
1028a272653SPeter Holm  memory_order_seq_cst = 1 << 5
1038a272653SPeter Holm};
1048a272653SPeter Holm
1058a272653SPeter HolmINLINE void proc_yield(int cnt) {
1068a272653SPeter Holm  __asm__ __volatile__("" ::: "memory");
1078a272653SPeter Holm  for (int i = 0; i < cnt; i++)
1088a272653SPeter Holm    __asm__ __volatile__("pause");
1098a272653SPeter Holm  __asm__ __volatile__("" ::: "memory");
1108a272653SPeter Holm}
1118a272653SPeter Holm
1128a272653SPeter Holmtemplate<typename T>
1138a272653SPeter HolmNOINLINE typename T::Type atomic_load(
1148a272653SPeter Holm    const volatile T *a, memory_order mo) {
1158a272653SPeter Holm  assert(mo & (memory_order_relaxed | memory_order_consume
1168a272653SPeter Holm      | memory_order_acquire | memory_order_seq_cst));
1178a272653SPeter Holm  assert(!((uptr)a % sizeof(*a)));
1188a272653SPeter Holm  typename T::Type v;
1198a272653SPeter Holm
1208a272653SPeter Holm  if (sizeof(*a) < 8 || sizeof(void*) == 8) {
1218a272653SPeter Holm    // Assume that aligned loads are atomic.
1228a272653SPeter Holm    if (mo == memory_order_relaxed) {
1238a272653SPeter Holm      v = a->val_dont_use;
1248a272653SPeter Holm    } else if (mo == memory_order_consume) {
1258a272653SPeter Holm      // Assume that processor respects data dependencies
1268a272653SPeter Holm      // (and that compiler won't break them).
1278a272653SPeter Holm      __asm__ __volatile__("" ::: "memory");
1288a272653SPeter Holm      v = a->val_dont_use;
1298a272653SPeter Holm      __asm__ __volatile__("" ::: "memory");
1308a272653SPeter Holm    } else if (mo == memory_order_acquire) {
1318a272653SPeter Holm      __asm__ __volatile__("" ::: "memory");
1328a272653SPeter Holm      v = a->val_dont_use;
1338a272653SPeter Holm      // On x86 loads are implicitly acquire.
1348a272653SPeter Holm      __asm__ __volatile__("" ::: "memory");
1358a272653SPeter Holm    } else {  // seq_cst
1368a272653SPeter Holm      // On x86 plain MOV is enough for seq_cst store.
1378a272653SPeter Holm      __asm__ __volatile__("" ::: "memory");
1388a272653SPeter Holm      v = a->val_dont_use;
1398a272653SPeter Holm      __asm__ __volatile__("" ::: "memory");
1408a272653SPeter Holm    }
1418a272653SPeter Holm  } else {
1428a272653SPeter Holm    // 64-bit load on 32-bit platform.
1438a272653SPeter Holm    __asm__ __volatile__(
1448a272653SPeter Holm        "movq %1, %%mm0;"  // Use mmx reg for 64-bit atomic moves
1458a272653SPeter Holm        "movq %%mm0, %0;"  // (ptr could be read-only)
1468a272653SPeter Holm        "emms;"            // Empty mmx state/Reset FP regs
1478a272653SPeter Holm        : "=m" (v)
1488a272653SPeter Holm        : "m" (a->val_dont_use)
1498a272653SPeter Holm        : // mark the FP stack and mmx registers as clobbered
1508a272653SPeter Holm          "st", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)",
1518a272653SPeter Holm#ifdef __MMX__
1528a272653SPeter Holm          "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
1538a272653SPeter Holm#endif  // #ifdef __MMX__
1548a272653SPeter Holm          "memory");
1558a272653SPeter Holm  }
1568a272653SPeter Holm  return v;
1578a272653SPeter Holm}
1588a272653SPeter Holm
1598a272653SPeter Holmtemplate<typename T>
1608a272653SPeter HolmINLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
1618a272653SPeter Holm  assert(mo & (memory_order_relaxed | memory_order_release
1628a272653SPeter Holm      | memory_order_seq_cst));
1638a272653SPeter Holm  assert(!((uptr)a % sizeof(*a)));
1648a272653SPeter Holm
1658a272653SPeter Holm  if (sizeof(*a) < 8 || sizeof(void*) == 8) {
1668a272653SPeter Holm    // Assume that aligned loads are atomic.
1678a272653SPeter Holm    if (mo == memory_order_relaxed) {
1688a272653SPeter Holm      a->val_dont_use = v;
1698a272653SPeter Holm    } else if (mo == memory_order_release) {
1708a272653SPeter Holm      // On x86 stores are implicitly release.
1718a272653SPeter Holm      __asm__ __volatile__("" ::: "memory");
1728a272653SPeter Holm      a->val_dont_use = v;
1738a272653SPeter Holm      __asm__ __volatile__("" ::: "memory");
1748a272653SPeter Holm    } else {  // seq_cst
1758a272653SPeter Holm      // On x86 stores are implicitly release.
1768a272653SPeter Holm      __asm__ __volatile__("" ::: "memory");
1778a272653SPeter Holm      a->val_dont_use = v;
1788a272653SPeter Holm      __sync_synchronize();
1798a272653SPeter Holm    }
1808a272653SPeter Holm  } else {
1818a272653SPeter Holm    // 64-bit store on 32-bit platform.
1828a272653SPeter Holm    __asm__ __volatile__(
1838a272653SPeter Holm        "movq %1, %%mm0;"  // Use mmx reg for 64-bit atomic moves
1848a272653SPeter Holm        "movq %%mm0, %0;"
1858a272653SPeter Holm        "emms;"            // Empty mmx state/Reset FP regs
1868a272653SPeter Holm        : "=m" (a->val_dont_use)
1878a272653SPeter Holm        : "m" (v)
1888a272653SPeter Holm        : // mark the FP stack and mmx registers as clobbered
1898a272653SPeter Holm          "st", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)",
1908a272653SPeter Holm#ifdef __MMX__
1918a272653SPeter Holm          "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
1928a272653SPeter Holm#endif  // #ifdef __MMX__
1938a272653SPeter Holm          "memory");
1948a272653SPeter Holm    if (mo == memory_order_seq_cst)
1958a272653SPeter Holm      __sync_synchronize();
1968a272653SPeter Holm  }
1978a272653SPeter Holm}
1988a272653SPeter Holm
1998a272653SPeter Holmtemplate<typename T>
2008a272653SPeter HolmINLINE bool atomic_compare_exchange_strong(volatile T *a,
2018a272653SPeter Holm                                           typename T::Type *cmp,
2028a272653SPeter Holm                                           typename T::Type xchg,
2038a272653SPeter Holm                                           memory_order mo __unused) {
2048a272653SPeter Holm  typedef typename T::Type Type;
2058a272653SPeter Holm  Type cmpv = *cmp;
2068a272653SPeter Holm  Type prev = __sync_val_compare_and_swap(&a->val_dont_use, cmpv, xchg);
2078a272653SPeter Holm  if (prev == cmpv)
2088a272653SPeter Holm    return true;
2098a272653SPeter Holm  *cmp = prev;
2108a272653SPeter Holm  return false;
2118a272653SPeter Holm}
2128a272653SPeter Holm
2138a272653SPeter Holmtemplate<typename T>
2148a272653SPeter HolmINLINE bool atomic_compare_exchange_weak(volatile T *a,
2158a272653SPeter Holm                                         typename T::Type *cmp,
2168a272653SPeter Holm                                         typename T::Type xchg,
2178a272653SPeter Holm                                         memory_order mo) {
2188a272653SPeter Holm  return atomic_compare_exchange_strong(a, cmp, xchg, mo);
2198a272653SPeter Holm}
2208a272653SPeter Holm
2218a272653SPeter Holmconst u32 kTabSizeLog = 20;
2228a272653SPeter Holmconst int kTabSize = 1 << kTabSizeLog;
2238a272653SPeter Holm
2248a272653SPeter Holmstatic atomic_uintptr_t tab[kTabSize];
2258a272653SPeter Holm
2268a272653SPeter Holmint x_fork(void) {
2278a272653SPeter Holm  for (int i = 0; i < kTabSize; ++i) {
2288a272653SPeter Holm    atomic_uintptr_t *p = &tab[i];
2298a272653SPeter Holm    for (int j = 0;; j++) {
2308a272653SPeter Holm      uptr cmp = atomic_load(p, memory_order_relaxed);
2318a272653SPeter Holm      if ((cmp & 1) == 0 &&
2328a272653SPeter Holm          atomic_compare_exchange_weak(p, &cmp, cmp | 1, memory_order_acquire))
2338a272653SPeter Holm        break;
2348a272653SPeter Holm      if (j < 10)
2358a272653SPeter Holm        proc_yield(10);
2368a272653SPeter Holm      else
2378a272653SPeter Holm        internal_sched_yield();
2388a272653SPeter Holm    }
2398a272653SPeter Holm  }
2408a272653SPeter Holm
2418a272653SPeter Holm  int pid = fork();
2428a272653SPeter Holm
2438a272653SPeter Holm  for (int i = 0; i < kTabSize; ++i) {
2448a272653SPeter Holm    atomic_uintptr_t *p = &tab[i];
2458a272653SPeter Holm    uptr s = atomic_load(p, memory_order_relaxed);
2468a272653SPeter Holm    atomic_store(p, (s & ~1UL), memory_order_release);
2478a272653SPeter Holm  }
2488a272653SPeter Holm
2498a272653SPeter Holm  return pid;
2508a272653SPeter Holm}
2518a272653SPeter Holm
2528a272653SPeter Holmvoid test() {
2538a272653SPeter Holm  pid_t pid = x_fork();
2548a272653SPeter Holm  if (pid) {
2558a272653SPeter Holm    pid_t p;
2568a272653SPeter Holm    while ((p = wait(NULL)) == -1) { }
2578a272653SPeter Holm  }
2588a272653SPeter Holm}
2598a272653SPeter Holm
2608a272653SPeter Holmint main() {
2618a272653SPeter Holm  const int kChildren = 1000;
2628a272653SPeter Holm  for (int i = 0; i < kChildren; ++i) {
2638a272653SPeter Holm    pid_t pid = x_fork();
2648a272653SPeter Holm    if (!pid) {
2658a272653SPeter Holm      test();
2668a272653SPeter Holm      return 0;
2678a272653SPeter Holm    }
2688a272653SPeter Holm  }
2698a272653SPeter Holm
2708a272653SPeter Holm  sleep(5);
2718a272653SPeter Holm
2728a272653SPeter Holm  for (int i = 0; i < kChildren; ++i) {
2738a272653SPeter Holm    pid_t p;
2748a272653SPeter Holm    while ((p = wait(NULL)) == -1) {  }
2758a272653SPeter Holm  }
2768a272653SPeter Holm
2778a272653SPeter Holm  return 0;
2788a272653SPeter Holm}
279