xref: /netbsd/sys/arch/mips/mips/cache_r4k.c (revision 6550d01e)
1 /*	$NetBSD: cache_r4k.c,v 1.10 2005/12/24 20:07:19 perry Exp $	*/
2 
3 /*
4  * Copyright 2001 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: cache_r4k.c,v 1.10 2005/12/24 20:07:19 perry Exp $");
40 
41 #include <sys/param.h>
42 
43 #include <mips/cache.h>
44 #include <mips/cache_r4k.h>
45 
46 /*
47  * Cache operations for R4000/R4400-style caches:
48  *
49  *	- Direct-mapped
50  *	- Write-back
51  *	- Virtually indexed, physically tagged
52  *
53  * XXX Does not handle split secondary caches.
54  */
55 
56 #define	round_line(x)		(((x) + 15) & ~15)
57 #define	trunc_line(x)		((x) & ~15)
58 
59 __asm(".set mips3");
60 
61 void
62 r4k_icache_sync_all_16(void)
63 {
64 	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
65 	vaddr_t eva = va + mips_picache_size;
66 
67 	mips_dcache_wbinv_all();
68 
69 	__asm volatile("sync");
70 
71 	while (va < eva) {
72 		cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
73 		va += (32 * 16);
74 	}
75 }
76 
77 void
78 r4k_icache_sync_range_16(vaddr_t va, vsize_t size)
79 {
80 	vaddr_t eva = round_line(va + size);
81 
82 	va = trunc_line(va);
83 
84 	mips_dcache_wb_range(va, (eva - va));
85 
86 	__asm volatile("sync");
87 
88 	while ((eva - va) >= (32 * 16)) {
89 		cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
90 		va += (32 * 16);
91 	}
92 
93 	while (va < eva) {
94 		cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
95 		va += 16;
96 	}
97 }
98 
99 void
100 r4k_icache_sync_range_index_16(vaddr_t va, vsize_t size)
101 {
102 	vaddr_t eva, orig_va;
103 
104 	orig_va = va;
105 
106 	eva = round_line(va + size);
107 	va = trunc_line(va);
108 
109 	mips_dcache_wbinv_range_index(va, (eva - va));
110 
111 	__asm volatile("sync");
112 
113 	/*
114 	 * Since we're doing Index ops, we expect to not be able
115 	 * to access the address we've been given.  So, get the
116 	 * bits that determine the cache index, and make a KSEG0
117 	 * address out of them.
118 	 */
119 	va = MIPS_PHYS_TO_KSEG0(orig_va & mips_picache_way_mask);
120 
121 	eva = round_line(va + size);
122 	va = trunc_line(va);
123 
124 	while ((eva - va) >= (32 * 16)) {
125 		cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
126 		va += (32 * 16);
127 	}
128 
129 	while (va < eva) {
130 		cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
131 		va += 16;
132 	}
133 }
134 
135 void
136 r4k_pdcache_wbinv_all_16(void)
137 {
138 	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
139 	vaddr_t eva = va + mips_pdcache_size;
140 
141 	while (va < eva) {
142 		cache_r4k_op_32lines_16(va,
143 		    CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
144 		va += (32 * 16);
145 	}
146 }
147 
148 void
149 r4k_pdcache_wbinv_range_16(vaddr_t va, vsize_t size)
150 {
151 	vaddr_t eva = round_line(va + size);
152 
153 	va = trunc_line(va);
154 
155 	while ((eva - va) >= (32 * 16)) {
156 		cache_r4k_op_32lines_16(va,
157 		    CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
158 		va += (32 * 16);
159 	}
160 
161 	while (va < eva) {
162 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
163 		va += 16;
164 	}
165 }
166 
167 void
168 r4k_pdcache_wbinv_range_index_16(vaddr_t va, vsize_t size)
169 {
170 	vaddr_t eva;
171 
172 	/*
173 	 * Since we're doing Index ops, we expect to not be able
174 	 * to access the address we've been given.  So, get the
175 	 * bits that determine the cache index, and make a KSEG0
176 	 * address out of them.
177 	 */
178 	va = MIPS_PHYS_TO_KSEG0(va & (mips_pdcache_size - 1));
179 
180 	eva = round_line(va + size);
181 	va = trunc_line(va);
182 
183 	while ((eva - va) >= (32 * 16)) {
184 		cache_r4k_op_32lines_16(va,
185 		    CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
186 		va += (32 * 16);
187 	}
188 
189 	while (va < eva) {
190 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
191 		va += 16;
192 	}
193 }
194 
195 void
196 r4k_pdcache_inv_range_16(vaddr_t va, vsize_t size)
197 {
198 	vaddr_t eva = round_line(va + size);
199 
200 	va = trunc_line(va);
201 
202 	while ((eva - va) >= (32 * 16)) {
203 		cache_r4k_op_32lines_16(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
204 		va += (32 * 16);
205 	}
206 
207 	while (va < eva) {
208 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
209 		va += 16;
210 	}
211 }
212 
213 void
214 r4k_pdcache_wb_range_16(vaddr_t va, vsize_t size)
215 {
216 	vaddr_t eva = round_line(va + size);
217 
218 	va = trunc_line(va);
219 
220 	while ((eva - va) >= (32 * 16)) {
221 		cache_r4k_op_32lines_16(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
222 		va += (32 * 16);
223 	}
224 
225 	while (va < eva) {
226 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
227 		va += 16;
228 	}
229 }
230 
231 #undef round_line
232 #undef trunc_line
233 
234 #define	round_line(x)		(((x) + 31) & ~31)
235 #define	trunc_line(x)		((x) & ~31)
236 
237 void
238 r4k_icache_sync_all_32(void)
239 {
240 	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
241 	vaddr_t eva = va + mips_picache_size;
242 
243 	mips_dcache_wbinv_all();
244 
245 	__asm volatile("sync");
246 
247 	while (va < eva) {
248 		cache_r4k_op_32lines_32(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
249 		va += (32 * 32);
250 	}
251 }
252 
253 void
254 r4k_icache_sync_range_32(vaddr_t va, vsize_t size)
255 {
256 	vaddr_t eva = round_line(va + size);
257 
258 	va = trunc_line(va);
259 
260 	mips_dcache_wb_range(va, (eva - va));
261 
262 	__asm volatile("sync");
263 
264 	while ((eva - va) >= (32 * 32)) {
265 		cache_r4k_op_32lines_32(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
266 		va += (32 * 32);
267 	}
268 
269 	while (va < eva) {
270 		cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
271 		va += 32;
272 	}
273 }
274 
275 void
276 r4k_icache_sync_range_index_32(vaddr_t va, vsize_t size)
277 {
278 	vaddr_t eva;
279 
280 	eva = round_line(va + size);
281 	va = trunc_line(va);
282 
283 	mips_dcache_wbinv_range_index(va, (eva - va));
284 
285 	__asm volatile("sync");
286 
287 	/*
288 	 * Since we're doing Index ops, we expect to not be able
289 	 * to access the address we've been given.  So, get the
290 	 * bits that determine the cache index, and make a KSEG0
291 	 * address out of them.
292 	 */
293 	va = MIPS_PHYS_TO_KSEG0(va & mips_picache_way_mask);
294 
295 	eva = round_line(va + size);
296 	va = trunc_line(va);
297 
298 	while ((eva - va) >= (32 * 32)) {
299 		cache_r4k_op_32lines_32(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
300 		va += (32 * 32);
301 	}
302 
303 	while (va < eva) {
304 		cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
305 		va += 32;
306 	}
307 }
308 
309 void
310 r4k_pdcache_wbinv_all_32(void)
311 {
312 	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
313 	vaddr_t eva = va + mips_pdcache_size;
314 
315 	while (va < eva) {
316 		cache_r4k_op_32lines_32(va,
317 		    CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
318 		va += (32 * 32);
319 	}
320 }
321 
322 void
323 r4k_pdcache_wbinv_range_32(vaddr_t va, vsize_t size)
324 {
325 	vaddr_t eva = round_line(va + size);
326 
327 	va = trunc_line(va);
328 
329 	while ((eva - va) >= (32 * 32)) {
330 		cache_r4k_op_32lines_32(va,
331 		    CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
332 		va += (32 * 32);
333 	}
334 
335 	while (va < eva) {
336 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
337 		va += 32;
338 	}
339 }
340 
341 void
342 r4k_pdcache_wbinv_range_index_32(vaddr_t va, vsize_t size)
343 {
344 	vaddr_t eva;
345 
346 	/*
347 	 * Since we're doing Index ops, we expect to not be able
348 	 * to access the address we've been given.  So, get the
349 	 * bits that determine the cache index, and make a KSEG0
350 	 * address out of them.
351 	 */
352 	va = MIPS_PHYS_TO_KSEG0(va & (mips_pdcache_size - 1));
353 
354 	eva = round_line(va + size);
355 	va = trunc_line(va);
356 
357 	while ((eva - va) >= (32 * 32)) {
358 		cache_r4k_op_32lines_32(va,
359 		    CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
360 		va += (32 * 32);
361 	}
362 
363 	while (va < eva) {
364 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
365 		va += 32;
366 	}
367 }
368 
369 void
370 r4k_pdcache_inv_range_32(vaddr_t va, vsize_t size)
371 {
372 	vaddr_t eva = round_line(va + size);
373 
374 	va = trunc_line(va);
375 
376 	while ((eva - va) >= (32 * 32)) {
377 		cache_r4k_op_32lines_32(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
378 		va += (32 * 32);
379 	}
380 
381 	while (va < eva) {
382 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
383 		va += 32;
384 	}
385 }
386 
387 void
388 r4k_pdcache_wb_range_32(vaddr_t va, vsize_t size)
389 {
390 	vaddr_t eva = round_line(va + size);
391 
392 	va = trunc_line(va);
393 
394 	while ((eva - va) >= (32 * 32)) {
395 		cache_r4k_op_32lines_32(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
396 		va += (32 * 32);
397 	}
398 
399 	while (va < eva) {
400 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
401 		va += 32;
402 	}
403 }
404 
405 void
406 r4k_sdcache_wbinv_all_32(void)
407 {
408 	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
409 	vaddr_t eva = va + mips_sdcache_size;
410 
411 	while (va < eva) {
412 		cache_r4k_op_32lines_32(va,
413 		    CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
414 		va += (32 * 32);
415 	}
416 }
417 
418 void
419 r4k_sdcache_wbinv_range_32(vaddr_t va, vsize_t size)
420 {
421 	vaddr_t eva = round_line(va + size);
422 
423 	va = trunc_line(va);
424 
425 	while ((eva - va) >= (32 * 32)) {
426 		cache_r4k_op_32lines_32(va,
427 		    CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
428 		va += (32 * 32);
429 	}
430 
431 	while (va < eva) {
432 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
433 		va += 32;
434 	}
435 }
436 
437 void
438 r4k_sdcache_wbinv_range_index_32(vaddr_t va, vsize_t size)
439 {
440 	vaddr_t eva;
441 
442 	/*
443 	 * Since we're doing Index ops, we expect to not be able
444 	 * to access the address we've been given.  So, get the
445 	 * bits that determine the cache index, and make a KSEG0
446 	 * address out of them.
447 	 */
448 	va = MIPS_PHYS_TO_KSEG0(va & (mips_sdcache_size - 1));
449 
450 	eva = round_line(va + size);
451 	va = trunc_line(va);
452 
453 	while ((eva - va) >= (32 * 32)) {
454 		cache_r4k_op_32lines_32(va,
455 		    CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
456 		va += (32 * 32);
457 	}
458 
459 	while (va < eva) {
460 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
461 		va += 32;
462 	}
463 }
464 
465 void
466 r4k_sdcache_inv_range_32(vaddr_t va, vsize_t size)
467 {
468 	vaddr_t eva = round_line(va + size);
469 
470 	va = trunc_line(va);
471 
472 	while ((eva - va) >= (32 * 32)) {
473 		cache_r4k_op_32lines_32(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
474 		va += (32 * 32);
475 	}
476 
477 	while (va < eva) {
478 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
479 		va += 32;
480 	}
481 }
482 
483 void
484 r4k_sdcache_wb_range_32(vaddr_t va, vsize_t size)
485 {
486 	vaddr_t eva = round_line(va + size);
487 
488 	va = trunc_line(va);
489 
490 	while ((eva - va) >= (32 * 32)) {
491 		cache_r4k_op_32lines_32(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
492 		va += (32 * 32);
493 	}
494 
495 	while (va < eva) {
496 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
497 		va += 32;
498 	}
499 }
500 
501 #undef round_line
502 #undef trunc_line
503 
504 #define	round_line(x)		(((x) + 127) & ~127)
505 #define	trunc_line(x)		((x) & ~127)
506 
507 void
508 r4k_sdcache_wbinv_all_128(void)
509 {
510 	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
511 	vaddr_t eva = va + mips_sdcache_size;
512 
513 	while (va < eva) {
514 		cache_r4k_op_32lines_128(va,
515 		    CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
516 		va += (32 * 128);
517 	}
518 }
519 
520 void
521 r4k_sdcache_wbinv_range_128(vaddr_t va, vsize_t size)
522 {
523 	vaddr_t eva = round_line(va + size);
524 
525 	va = trunc_line(va);
526 
527 	while ((eva - va) >= (32 * 128)) {
528 		cache_r4k_op_32lines_128(va,
529 		    CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
530 		va += (32 * 128);
531 	}
532 
533 	while (va < eva) {
534 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
535 		va += 128;
536 	}
537 }
538 
539 void
540 r4k_sdcache_wbinv_range_index_128(vaddr_t va, vsize_t size)
541 {
542 	vaddr_t eva;
543 
544 	/*
545 	 * Since we're doing Index ops, we expect to not be able
546 	 * to access the address we've been given.  So, get the
547 	 * bits that determine the cache index, and make a KSEG0
548 	 * address out of them.
549 	 */
550 	va = MIPS_PHYS_TO_KSEG0(va & (mips_sdcache_size - 1));
551 
552 	eva = round_line(va + size);
553 	va = trunc_line(va);
554 
555 	while ((eva - va) >= (32 * 128)) {
556 		cache_r4k_op_32lines_128(va,
557 		    CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
558 		va += (32 * 128);
559 	}
560 
561 	while (va < eva) {
562 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
563 		va += 128;
564 	}
565 }
566 
567 void
568 r4k_sdcache_inv_range_128(vaddr_t va, vsize_t size)
569 {
570 	vaddr_t eva = round_line(va + size);
571 
572 	va = trunc_line(va);
573 
574 	while ((eva - va) >= (32 * 128)) {
575 		cache_r4k_op_32lines_128(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
576 		va += (32 * 128);
577 	}
578 
579 	while (va < eva) {
580 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
581 		va += 128;
582 	}
583 }
584 
585 void
586 r4k_sdcache_wb_range_128(vaddr_t va, vsize_t size)
587 {
588 	vaddr_t eva = round_line(va + size);
589 
590 	va = trunc_line(va);
591 
592 	while ((eva - va) >= (32 * 128)) {
593 		cache_r4k_op_32lines_128(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
594 		va += (32 * 128);
595 	}
596 
597 	while (va < eva) {
598 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
599 		va += 128;
600 	}
601 }
602 
603 #undef round_line
604 #undef trunc_line
605 
606 #define	round_line(x)		(((x) + mips_sdcache_line_size - 1) & ~(mips_sdcache_line_size - 1))
607 #define	trunc_line(x)		((x) & ~(mips_sdcache_line_size - 1))
608 
609 void
610 r4k_sdcache_wbinv_all_generic(void)
611 {
612 	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
613 	vaddr_t eva = va + mips_sdcache_size;
614 	int line_size = mips_sdcache_line_size;
615 
616 	while (va < eva) {
617 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
618 		va += line_size;
619 	}
620 }
621 
622 void
623 r4k_sdcache_wbinv_range_generic(vaddr_t va, vsize_t size)
624 {
625 	vaddr_t eva = round_line(va + size);
626 	int line_size = mips_sdcache_line_size;
627 
628 	va = trunc_line(va);
629 
630 	while (va < eva) {
631 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
632 		va += line_size;
633 	}
634 }
635 
636 void
637 r4k_sdcache_wbinv_range_index_generic(vaddr_t va, vsize_t size)
638 {
639 	vaddr_t eva;
640 	int line_size = mips_sdcache_line_size;
641 
642 	/*
643 	 * Since we're doing Index ops, we expect to not be able
644 	 * to access the address we've been given.  So, get the
645 	 * bits that determine the cache index, and make a KSEG0
646 	 * address out of them.
647 	 */
648 	va = MIPS_PHYS_TO_KSEG0(va & (mips_sdcache_size - 1));
649 
650 	eva = round_line(va + size);
651 	va = trunc_line(va);
652 
653 	while (va < eva) {
654 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
655 		va += line_size;
656 	}
657 }
658 
659 void
660 r4k_sdcache_inv_range_generic(vaddr_t va, vsize_t size)
661 {
662 	vaddr_t eva = round_line(va + size);
663 	int line_size = mips_sdcache_line_size;
664 
665 	va = trunc_line(va);
666 
667 	while (va < eva) {
668 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
669 		va += line_size;
670 	}
671 }
672 
673 void
674 r4k_sdcache_wb_range_generic(vaddr_t va, vsize_t size)
675 {
676 	vaddr_t eva = round_line(va + size);
677 	int line_size = mips_sdcache_line_size;
678 
679 	va = trunc_line(va);
680 
681 	while (va < eva) {
682 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
683 		va += line_size;
684 	}
685 }
686 
687 #undef round_line
688 #undef trunc_line
689