1*9bf968a3Sandvar /* $NetBSD: memset2.c,v 1.11 2022/01/15 10:38:56 andvar Exp $ */
20ea3b65aSsimonb
38daf714eSmatt /*-
48daf714eSmatt * Copyright (c) 2009 The NetBSD Foundation, Inc.
58daf714eSmatt * All rights reserved.
68daf714eSmatt *
78daf714eSmatt * This code is derived from software contributed to The NetBSD Foundation
88daf714eSmatt * by Matt Thomas <matt@3am-software.com>.
98daf714eSmatt *
108daf714eSmatt * Redistribution and use in source and binary forms, with or without
118daf714eSmatt * modification, are permitted provided that the following conditions
128daf714eSmatt * are met:
138daf714eSmatt * 1. Redistributions of source code must retain the above copyright
148daf714eSmatt * notice, this list of conditions and the following disclaimer.
158daf714eSmatt * 2. Redistributions in binary form must reproduce the above copyright
168daf714eSmatt * notice, this list of conditions and the following disclaimer in the
178daf714eSmatt * documentation and/or other materials provided with the distribution.
188daf714eSmatt *
198daf714eSmatt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
208daf714eSmatt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
218daf714eSmatt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
228daf714eSmatt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
238daf714eSmatt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
248daf714eSmatt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
258daf714eSmatt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
268daf714eSmatt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
278daf714eSmatt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
288daf714eSmatt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
298daf714eSmatt * POSSIBILITY OF SUCH DAMAGE.
308daf714eSmatt */
318daf714eSmatt
324542d04bSapb #include <sys/cdefs.h>
334542d04bSapb #if defined(LIBC_SCCS) && !defined(lint)
34*9bf968a3Sandvar __RCSID("$NetBSD: memset2.c,v 1.11 2022/01/15 10:38:56 andvar Exp $");
354542d04bSapb #endif /* LIBC_SCCS and not lint */
364542d04bSapb
378daf714eSmatt #include <sys/types.h>
388daf714eSmatt
398daf714eSmatt #if !defined(_KERNEL) && !defined(_STANDALONE)
408daf714eSmatt #include <assert.h>
418daf714eSmatt #include <limits.h>
428daf714eSmatt #include <string.h>
438daf714eSmatt #include <inttypes.h>
448daf714eSmatt #else
458daf714eSmatt #include <lib/libkern/libkern.h>
468daf714eSmatt #include <machine/limits.h>
478daf714eSmatt #endif
488daf714eSmatt
498daf714eSmatt #include <sys/endian.h>
508daf714eSmatt #include <machine/types.h>
518daf714eSmatt
529c3e547aSmrg #undef __OPTIMIZE_SIZE__
539c3e547aSmrg #define __OPTIMIZE_SIZE__ 1 /* other code path is very broken */
5470abe4f6Ssimonb
558daf714eSmatt #ifdef TEST
568daf714eSmatt #include <assert.h>
578daf714eSmatt #define _DIAGASSERT(a) assert(a)
588daf714eSmatt #endif
598daf714eSmatt
608daf714eSmatt #ifdef _FORTIFY_SOURCE
618daf714eSmatt #undef bzero
628daf714eSmatt #endif
63ae62ad9eSjoerg #undef memset
648daf714eSmatt
658daf714eSmatt /*
662ad19f5cSsimonb * Assume __register_t is the widest non-synthetic unsigned type.
678daf714eSmatt */
682ad19f5cSsimonb typedef __register_t memword_t;
694542d04bSapb
708daf714eSmatt #ifdef BZERO
718daf714eSmatt static inline
728daf714eSmatt #define memset memset0
738daf714eSmatt #endif
748daf714eSmatt
758daf714eSmatt #ifdef TEST
768daf714eSmatt static
778daf714eSmatt #define memset test_memset
788daf714eSmatt #endif
798daf714eSmatt
808daf714eSmatt void *
memset(void * addr,int c,size_t len)818daf714eSmatt memset(void *addr, int c, size_t len)
828daf714eSmatt {
838daf714eSmatt memword_t *dstp = addr;
848daf714eSmatt memword_t *edstp;
858daf714eSmatt memword_t fill;
868daf714eSmatt #ifndef __OPTIMIZE_SIZE__
878daf714eSmatt memword_t keep_mask = 0;
888daf714eSmatt #endif
898daf714eSmatt size_t fill_count;
908daf714eSmatt
918daf714eSmatt _DIAGASSERT(addr != 0);
928daf714eSmatt
938daf714eSmatt if (__predict_false(len == 0))
948daf714eSmatt return addr;
958daf714eSmatt
968daf714eSmatt /*
978daf714eSmatt * Pad out the fill byte (v) across a memword_t.
98*9bf968a3Sandvar * The conditional at the end prevents GCC from complaining about
998daf714eSmatt * shift count >= width of type
1008daf714eSmatt */
10148e172dbSsimonb fill = (unsigned char)c;
1028daf714eSmatt fill |= fill << 8;
1038daf714eSmatt fill |= fill << 16;
1048daf714eSmatt fill |= fill << (sizeof(c) < sizeof(fill) ? 32 : 0);
1058daf714eSmatt
1068daf714eSmatt /*
1078daf714eSmatt * Get the number of unaligned bytes to fill in the first word.
1088daf714eSmatt */
1098daf714eSmatt fill_count = -(uintptr_t)addr & (sizeof(memword_t) - 1);
1108daf714eSmatt
1118daf714eSmatt if (__predict_false(fill_count != 0)) {
1128daf714eSmatt #ifndef __OPTIMIZE_SIZE__
1138daf714eSmatt /*
1148daf714eSmatt * We want to clear <fill_count> trailing bytes in the word.
1158daf714eSmatt * On big/little endian, these are the least/most significant,
1168daf714eSmatt * bits respectively. So as we shift, the keep_mask will only
1178daf714eSmatt * have bits set for the bytes we won't be filling.
1188daf714eSmatt */
1198daf714eSmatt #if BYTE_ORDER == BIG_ENDIAN
1208daf714eSmatt keep_mask = ~(memword_t)0U << (fill_count * 8);
1218daf714eSmatt #endif
1228daf714eSmatt #if BYTE_ORDER == LITTLE_ENDIAN
1238daf714eSmatt keep_mask = ~(memword_t)0U >> (fill_count * 8);
1248daf714eSmatt #endif
1258daf714eSmatt /*
1268daf714eSmatt * Make sure dstp is aligned to a memword_t boundary.
1278daf714eSmatt */
1288daf714eSmatt dstp = (memword_t *)((uintptr_t)addr & -sizeof(memword_t));
1298daf714eSmatt if (len >= fill_count) {
1308daf714eSmatt /*
1318daf714eSmatt * If we can fill the rest of this word, then we mask
1328daf714eSmatt * off the bytes we are filling and then fill in those
1338daf714eSmatt * bytes with the new fill value.
1348daf714eSmatt */
1358daf714eSmatt *dstp = (*dstp & keep_mask) | (fill & ~keep_mask);
1368daf714eSmatt len -= fill_count;
1378daf714eSmatt if (__predict_false(len == 0))
1388daf714eSmatt return addr;
1398daf714eSmatt /*
1408daf714eSmatt * Since we were able to fill the rest of this word,
1418daf714eSmatt * we will advance to the next word and thus have no
1428daf714eSmatt * bytes to preserve.
1438daf714eSmatt *
1448daf714eSmatt * If we don't have enough to fill the rest of this
1458daf714eSmatt * word, we will fall through the following loop
1468daf714eSmatt * (since there are no full words to fill). Then we
1478daf714eSmatt * use the keep_mask above to preserve the leading
1488daf714eSmatt * bytes of word.
1498daf714eSmatt */
1508daf714eSmatt dstp++;
1518daf714eSmatt keep_mask = 0;
1528daf714eSmatt } else {
1538daf714eSmatt len += (uintptr_t)addr & (sizeof(memword_t) - 1);
1548daf714eSmatt }
1558daf714eSmatt #else /* __OPTIMIZE_SIZE__ */
1568daf714eSmatt uint8_t *dp, *ep;
1578daf714eSmatt if (len < fill_count)
1588daf714eSmatt fill_count = len;
1598daf714eSmatt for (dp = (uint8_t *)dstp, ep = dp + fill_count;
1608daf714eSmatt dp != ep; dp++)
1618daf714eSmatt *dp = fill;
1628daf714eSmatt if ((len -= fill_count) == 0)
1638daf714eSmatt return addr;
1648daf714eSmatt dstp = (memword_t *)ep;
1658daf714eSmatt #endif /* __OPTIMIZE_SIZE__ */
1668daf714eSmatt }
1678daf714eSmatt
1688daf714eSmatt /*
1698daf714eSmatt * Simply fill memory one word at time (for as many full words we have
1708daf714eSmatt * to write).
1718daf714eSmatt */
1728daf714eSmatt for (edstp = dstp + len / sizeof(memword_t); dstp != edstp; dstp++)
1738daf714eSmatt *dstp = fill;
1748daf714eSmatt
1758daf714eSmatt /*
1768daf714eSmatt * We didn't subtract out the full words we just filled since we know
1778daf714eSmatt * by the time we get here we will have less than a words worth to
1788daf714eSmatt * write. So we can concern ourselves with only the subword len bits.
1798daf714eSmatt */
1808daf714eSmatt len &= sizeof(memword_t)-1;
1818daf714eSmatt if (len > 0) {
1828daf714eSmatt #ifndef __OPTIMIZE_SIZE__
1838daf714eSmatt /*
1848daf714eSmatt * We want to clear <len> leading bytes in the word.
1858daf714eSmatt * On big/little endian, these are the most/least significant
186*9bf968a3Sandvar * bits, respectively. But as we want the mask of the bytes to
1878daf714eSmatt * keep, we have to complement the mask. So after we shift,
1888daf714eSmatt * the keep_mask will only have bits set for the bytes we won't
1898daf714eSmatt * be filling.
1908daf714eSmatt *
1918daf714eSmatt * But the keep_mask could already have bytes to preserve
192*9bf968a3Sandvar * if the amount to fill was less than the amount of trailing
1938daf714eSmatt * space in the first word.
1948daf714eSmatt */
1958daf714eSmatt #if BYTE_ORDER == BIG_ENDIAN
1968daf714eSmatt keep_mask |= ~(memword_t)0U >> (len * 8);
1978daf714eSmatt #endif
1988daf714eSmatt #if BYTE_ORDER == LITTLE_ENDIAN
1998daf714eSmatt keep_mask |= ~(memword_t)0U << (len * 8);
2008daf714eSmatt #endif
2018daf714eSmatt /*
2028daf714eSmatt * Now we mask off the bytes we are filling and then fill in
2038daf714eSmatt * those bytes with the new fill value.
2048daf714eSmatt */
2058daf714eSmatt *dstp = (*dstp & keep_mask) | (fill & ~keep_mask);
2068daf714eSmatt #else /* __OPTIMIZE_SIZE__ */
2078daf714eSmatt uint8_t *dp, *ep;
2088daf714eSmatt for (dp = (uint8_t *)dstp, ep = dp + len;
2098daf714eSmatt dp != ep; dp++)
2108daf714eSmatt *dp = fill;
2118daf714eSmatt #endif /* __OPTIMIZE_SIZE__ */
2128daf714eSmatt }
2138daf714eSmatt
2148daf714eSmatt /*
2158daf714eSmatt * Return the initial addr
2168daf714eSmatt */
2178daf714eSmatt return addr;
2188daf714eSmatt }
2198daf714eSmatt
2208daf714eSmatt #ifdef BZERO
2218daf714eSmatt /*
2228daf714eSmatt * For bzero, simply inline memset and let the compiler optimize things away.
2238daf714eSmatt */
2248daf714eSmatt void
bzero(void * addr,size_t len)2258daf714eSmatt bzero(void *addr, size_t len)
2268daf714eSmatt {
2278daf714eSmatt memset(addr, 0, len);
2288daf714eSmatt }
2298daf714eSmatt #endif
2308daf714eSmatt
2318daf714eSmatt #ifdef TEST
2328daf714eSmatt #include <stdbool.h>
2338daf714eSmatt #include <stdio.h>
2348daf714eSmatt
2358daf714eSmatt #undef memset
2368daf714eSmatt
2378daf714eSmatt static union {
2388daf714eSmatt uint8_t bytes[sizeof(memword_t) * 4];
2398daf714eSmatt memword_t words[4];
2408daf714eSmatt } testmem;
2418daf714eSmatt
2428daf714eSmatt int
main(int argc,char ** argv)2438daf714eSmatt main(int argc, char **argv)
2448daf714eSmatt {
2458daf714eSmatt size_t start;
2468daf714eSmatt size_t len;
2478daf714eSmatt bool failed = false;
2488daf714eSmatt
2498daf714eSmatt for (start = 1; start < sizeof(testmem) - 1; start++) {
2508daf714eSmatt for (len = 1; start + len < sizeof(testmem) - 1; len++) {
2518daf714eSmatt bool ok = true;
2528daf714eSmatt size_t i;
2538daf714eSmatt uint8_t check_value;
2548daf714eSmatt memset(testmem.bytes, 0xff, sizeof(testmem));
2558daf714eSmatt test_memset(testmem.bytes + start, 0x00, len);
2568daf714eSmatt for (i = 0; i < sizeof(testmem); i++) {
2578daf714eSmatt if (i == 0 || i == start + len)
2588daf714eSmatt check_value = 0xff;
2598daf714eSmatt else if (i == start)
2608daf714eSmatt check_value = 0x00;
2618daf714eSmatt if (testmem.bytes[i] != check_value) {
2628daf714eSmatt if (ok)
2638daf714eSmatt printf("pass @ %zu .. %zu failed",
2648daf714eSmatt start, start + len - 1);
2658daf714eSmatt ok = false;
2668daf714eSmatt printf(" [%zu]=0x%02x(!0x%02x)",
2678daf714eSmatt i, testmem.bytes[i], check_value);
2688daf714eSmatt }
2698daf714eSmatt }
2708daf714eSmatt if (!ok) {
2718daf714eSmatt printf("\n");
2728daf714eSmatt failed = 1;
2738daf714eSmatt }
2748daf714eSmatt }
2758daf714eSmatt }
2768daf714eSmatt
2778daf714eSmatt return failed ? 1 : 0;
2788daf714eSmatt }
2798daf714eSmatt #endif /* TEST */
280