xref: /minix/usr.bin/make/unit-tests/Makefile (revision 0a6a1f1d)
1# $NetBSD: Makefile,v 1.52 2015/05/05 21:51:09 sjg Exp $
2#
3# Unit tests for make(1)
4# The main targets are:
5#
6# all:	run all the tests
7# test:	run 'all', and compare to expected results
8# accept: move generated output to expected results
9#
10# Adding a test case.
11# Each feature should get its own set of tests in its own suitably
12# named makefile (*.mk), with its own set of expected results (*.exp),
13# and it should be added to the TESTNAMES list.
14#
15
16.MAIN: all
17
18UNIT_TESTS:= ${.PARSEDIR}
19.PATH: ${UNIT_TESTS}
20
21# Each test is in a sub-makefile.
22# Keep the list sorted.
23TESTNAMES= \
24	comment \
25	cond1 \
26	cond2 \
27	error \
28	export \
29	export-all \
30	export-env \
31	doterror \
32	dotwait \
33	forloop \
34	forsubst \
35	hash \
36	misc \
37	moderrs \
38	modmatch \
39	modmisc \
40	modorder \
41	modts \
42	modword \
43	order \
44	posix \
45	qequals \
46	sunshcmd \
47	sysv \
48	ternary \
49	unexport \
50	unexport-env \
51	varcmd \
52	varmisc \
53	varshell
54
55# these tests were broken by referting POSIX chanegs
56STRICT_POSIX_TESTS = \
57	escape \
58	impsrc \
59	phony-end \
60	posix1 \
61	suffixes
62
63# Override make flags for certain tests
64flags.doterror=
65flags.order=-j1
66
67OUTFILES= ${TESTNAMES:S/$/.out/}
68
69all: ${OUTFILES}
70
71CLEANFILES += *.rawout *.out *.status *.tmp *.core *.tmp
72CLEANFILES += obj*.[och] lib*.a		# posix1.mk
73CLEANFILES += issue* .[ab]*		# suffixes.mk
74CLEANRECURSIVE += dir dummy		# posix1.mk
75
76clean:
77	rm -f ${CLEANFILES}
78.if !empty(CLEANRECURSIVE)
79	rm -rf ${CLEANRECURSIVE}
80.endif
81
82TEST_MAKE?= ${.MAKE}
83TOOL_SED?= sed
84
85# ensure consistent results from sort(1)
86LC_ALL= C
87LANG= C
88.export LANG LC_ALL
89
90# the tests are actually done with sub-makes.
91.SUFFIXES: .mk .rawout .out
92.mk.rawout:
93	@echo ${TEST_MAKE} ${flags.${.TARGET:R}:U-k} -f ${.IMPSRC}
94	-@cd ${.OBJDIR} && \
95	{ ${TEST_MAKE} ${flags.${.TARGET:R}:U-k} -f ${.IMPSRC} \
96	  2>&1 ; echo $$? >${.TARGET:R}.status ; } > ${.TARGET}.tmp
97	@mv ${.TARGET}.tmp ${.TARGET}
98
99# We always pretend .MAKE was called 'make'
100# and strip ${.CURDIR}/ from the output
101# and replace anything after 'stopped in' with unit-tests
102# so the results can be compared.
103.rawout.out:
104	@echo postprocess ${.TARGET}
105	@${TOOL_SED} -e 's,^${TEST_MAKE:T:C/\./\\\./g}[][0-9]*:,make:,' \
106	  -e 's,${TEST_MAKE:C/\./\\\./g},make,' \
107	  -e '/stopped/s, /.*, unit-tests,' \
108	  -e 's,${.CURDIR:C/\./\\\./g}/,,g' \
109	  -e 's,${UNIT_TESTS:C/\./\\\./g}/,,g' \
110	  < ${.IMPSRC} > ${.TARGET}.tmp
111	@echo "exit status `cat ${.TARGET:R}.status`" >> ${.TARGET}.tmp
112	@mv ${.TARGET}.tmp ${.TARGET}
113
114# Compare all output files
115test:	${OUTFILES} .PHONY
116	@failed= ; \
117	for test in ${TESTNAMES}; do \
118	  diff -u ${UNIT_TESTS}/$${test}.exp $${test}.out \
119	  || failed="$${failed}$${failed:+ }$${test}" ; \
120	done ; \
121	if [ -n "$${failed}" ]; then \
122	  echo "Failed tests: $${failed}" ; false ; \
123	else \
124	  echo "All tests passed" ; \
125	fi
126
127accept:
128	@for test in ${TESTNAMES}; do \
129	  cmp -s ${UNIT_TESTS}/$${test}.exp $${test}.out \
130	  || { echo "Replacing $${test}.exp" ; \
131	       cp $${test}.out ${UNIT_TESTS}/$${test}.exp ; } \
132	done
133
134.if exists(${TEST_MAKE})
135${TESTNAMES:S/$/.rawout/}: ${TEST_MAKE}
136.endif
137
138.-include <bsd.obj.mk>
139