xref: /openbsd/sys/arch/alpha/isa/isadma_bounce.c (revision 5be19075)
1 /*	$OpenBSD: isadma_bounce.c,v 1.13 2019/05/13 21:27:59 mpi Exp $	*/
2 /* $NetBSD: isadma_bounce.c,v 1.3 2000/06/29 09:02:57 mrg Exp $ */
3 
4 /*-
5  * Copyright (c) 1996, 1997, 1998, 2000 The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10  * NASA Ames Research Center.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #define _ALPHA_BUS_DMA_PRIVATE
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/syslog.h>
38 #include <sys/device.h>
39 #include <sys/malloc.h>
40 #include <sys/proc.h>
41 #include <sys/mbuf.h>
42 
43 #include <machine/bus.h>
44 
45 #include <dev/isa/isareg.h>
46 #include <dev/isa/isavar.h>
47 
48 #include <uvm/uvm_extern.h>
49 
50 extern	paddr_t avail_end;
51 
52 /*
53  * ISA can only DMA to 0-16M.
54  */
55 #define	ISA_DMA_BOUNCE_THRESHOLD	(16 * 1024 * 1024)
56 
57 /*
58  * Cookie used by bouncing ISA DMA.  A pointer to one of these is stashed
59  * in the DMA map.
60  */
61 struct isadma_bounce_cookie {
62 	int	id_flags;		/* flags; see below */
63 
64 	/*
65 	 * Information about the original buffer used during
66 	 * DMA map syncs.  Note that origbuflen is only used
67 	 * for ID_BUFTYPE_LINEAR.
68 	 */
69 	void	*id_origbuf;		/* pointer to orig buffer if
70 					   bouncing */
71 	bus_size_t id_origbuflen;	/* ...and size */
72 	int	id_buftype;		/* type of buffer */
73 
74 	void	*id_bouncebuf;		/* pointer to the bounce buffer */
75 	bus_size_t id_bouncebuflen;	/* ...and size */
76 	int	id_nbouncesegs;		/* number of valid bounce segs */
77 	bus_dma_segment_t id_bouncesegs[1]; /* array of bounce buffer
78 					       physical memory segments */
79 };
80 
81 /* id_flags */
82 #define	ID_MIGHT_NEED_BOUNCE	0x01	/* map could need bounce buffers */
83 #define	ID_HAS_BOUNCE		0x02	/* map currently has bounce buffers */
84 #define	ID_IS_BOUNCING		0x04	/* map is bouncing current xfer */
85 
86 /* id_buftype */
87 #define	ID_BUFTYPE_INVALID	0
88 #define	ID_BUFTYPE_LINEAR	1
89 #define	ID_BUFTYPE_MBUF		2
90 #define	ID_BUFTYPE_UIO		3
91 #define	ID_BUFTYPE_RAW		4
92 
93 int	isadma_bounce_alloc_bouncebuf(bus_dma_tag_t, bus_dmamap_t,
94 	    bus_size_t, int);
95 void	isadma_bounce_free_bouncebuf(bus_dma_tag_t, bus_dmamap_t);
96 
97 /*
98  * Create an ISA DMA map.
99  */
100 int
isadma_bounce_dmamap_create(bus_dma_tag_t t,bus_size_t size,int nsegments,bus_size_t maxsegsz,bus_size_t boundary,int flags,bus_dmamap_t * dmamp)101 isadma_bounce_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
102     bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
103 {
104 	struct isadma_bounce_cookie *cookie;
105 	bus_dmamap_t map;
106 	int error, cookieflags;
107 	void *cookiestore;
108 	size_t cookiesize;
109 
110 	/* Call common function to create the basic map. */
111 	error = _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary,
112 	    flags, dmamp);
113 	if (error)
114 		return (error);
115 
116 	map = *dmamp;
117 	map->_dm_cookie = NULL;
118 	map->_dm_cookiesize = 0;
119 
120 	cookiesize = sizeof(*cookie);
121 
122 	/*
123 	 * ISA only has 24-bits of address space.  This means
124 	 * we can't DMA to pages over 16M.  In order to DMA to
125 	 * arbitrary buffers, we use "bounce buffers" - pages
126 	 * in memory below the 16M boundary.  On DMA reads,
127 	 * DMA happens to the bounce buffers, and is copied into
128 	 * the caller's buffer.  On writes, data is copied into
129 	 * the bounce buffer, and the DMA happens from those
130 	 * pages.  To software using the DMA mapping interface,
131 	 * this looks simply like a data cache.
132 	 *
133 	 * If we have more than 16M of RAM in the system, we may
134 	 * need bounce buffers.  We check and remember that here.
135 	 *
136 	 * ...or, there is an opposite case.  The most segments
137 	 * a transfer will require is (maxxfer / PAGE_SIZE) + 1.  If
138 	 * the caller can't handle that many segments (e.g. the
139 	 * ISA DMA controller), we may have to bounce it as well.
140 	 */
141 	cookieflags = 0;
142 	if (avail_end > (t->_wbase + t->_wsize) ||
143 	    ((map->_dm_size / PAGE_SIZE) + 1) > map->_dm_segcnt) {
144 		cookieflags |= ID_MIGHT_NEED_BOUNCE;
145 		cookiesize += (sizeof(bus_dma_segment_t) *
146 		    (map->_dm_segcnt - 1));
147 	}
148 
149 	/*
150 	 * Allocate our cookie.
151 	 */
152 	if ((cookiestore = malloc(cookiesize, M_DEVBUF, (flags & BUS_DMA_NOWAIT)
153 	    ? (M_NOWAIT | M_ZERO) : (M_WAITOK | M_ZERO))) == NULL) {
154 		error = ENOMEM;
155 		goto out;
156 	}
157 	cookie = (struct isadma_bounce_cookie *)cookiestore;
158 	cookie->id_flags = cookieflags;
159 	map->_dm_cookie = cookie;
160 	map->_dm_cookiesize = cookiesize;
161 
162 	if (cookieflags & ID_MIGHT_NEED_BOUNCE) {
163 		/*
164 		 * Allocate the bounce pages now if the caller
165 		 * wishes us to do so.
166 		 */
167 		if ((flags & BUS_DMA_ALLOCNOW) == 0)
168 			goto out;
169 
170 		error = isadma_bounce_alloc_bouncebuf(t, map, size, flags);
171 	}
172 
173  out:
174 	if (error) {
175 		free(map->_dm_cookie, M_DEVBUF, cookiesize);
176 		_bus_dmamap_destroy(t, map);
177 	}
178 	return (error);
179 }
180 
181 /*
182  * Destroy an ISA DMA map.
183  */
184 void
isadma_bounce_dmamap_destroy(bus_dma_tag_t t,bus_dmamap_t map)185 isadma_bounce_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
186 {
187 	struct isadma_bounce_cookie *cookie = map->_dm_cookie;
188 
189 	/*
190 	 * Free any bounce pages this map might hold.
191 	 */
192 	if (cookie->id_flags & ID_HAS_BOUNCE)
193 		isadma_bounce_free_bouncebuf(t, map);
194 
195 	free(cookie, M_DEVBUF, map->_dm_cookiesize);
196 	_bus_dmamap_destroy(t, map);
197 }
198 
199 /*
200  * Load an ISA DMA map with a linear buffer.
201  */
202 int
isadma_bounce_dmamap_load(bus_dma_tag_t t,bus_dmamap_t map,void * buf,size_t buflen,struct proc * p,int flags)203 isadma_bounce_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
204     size_t buflen, struct proc *p, int flags)
205 {
206 	struct isadma_bounce_cookie *cookie = map->_dm_cookie;
207 	int error;
208 
209 	/*
210 	 * Make sure that on error condition we return "no valid mappings."
211 	 */
212 	map->dm_mapsize = 0;
213 	map->dm_nsegs = 0;
214 
215 	/*
216 	 * Try to load the map the normal way.  If this errors out,
217 	 * and we can bounce, we will.
218 	 */
219 	error = _bus_dmamap_load_direct(t, map, buf, buflen, p, flags);
220 	if (error == 0 ||
221 	    (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0))
222 		return (error);
223 
224 	/*
225 	 * First attempt failed; bounce it.
226 	 */
227 
228 	/*
229 	 * Allocate bounce pages, if necessary.
230 	 */
231 	if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
232 		error = isadma_bounce_alloc_bouncebuf(t, map, buflen, flags);
233 		if (error)
234 			return (error);
235 	}
236 
237 	/*
238 	 * Cache a pointer to the caller's buffer and load the DMA map
239 	 * with the bounce buffer.
240 	 */
241 	cookie->id_origbuf = buf;
242 	cookie->id_origbuflen = buflen;
243 	cookie->id_buftype = ID_BUFTYPE_LINEAR;
244 	error = _bus_dmamap_load_direct(t, map, cookie->id_bouncebuf, buflen,
245 	    p, flags);
246 	if (error) {
247 		/*
248 		 * Free the bounce pages, unless our resources
249 		 * are reserved for our exclusive use.
250 		 */
251 		if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
252 			isadma_bounce_free_bouncebuf(t, map);
253 		return (error);
254 	}
255 
256 	/* ...so isadma_bounce_dmamap_sync() knows we're bouncing */
257 	cookie->id_flags |= ID_IS_BOUNCING;
258 	map->_dm_window = t;
259 	return (0);
260 }
261 
262 /*
263  * Like isadma_bounce_dmamap_load(), but for mbufs.
264  */
265 int
isadma_bounce_dmamap_load_mbuf(bus_dma_tag_t t,bus_dmamap_t map,struct mbuf * m0,int flags)266 isadma_bounce_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map,
267     struct mbuf *m0, int flags)
268 {
269 	struct isadma_bounce_cookie *cookie = map->_dm_cookie;
270 	int error;
271 
272 	/*
273 	 * Make sure on error condition we return "no valid mappings."
274 	 */
275 	map->dm_mapsize = 0;
276 	map->dm_nsegs = 0;
277 
278 #ifdef DIAGNOSTIC
279 	if ((m0->m_flags & M_PKTHDR) == 0)
280 		panic("isadma_bounce_dmamap_load_mbuf: no packet header");
281 #endif
282 
283 	if (m0->m_pkthdr.len > map->_dm_size)
284 		return (EINVAL);
285 
286 	/*
287 	 * Try to load the map the normal way.  If this errors out,
288 	 * and we can bounce, we will.
289 	 */
290 	error = _bus_dmamap_load_mbuf_direct(t, map, m0, flags);
291 	if (error == 0 ||
292 	    (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0))
293 		return (error);
294 
295 	/*
296 	 * First attempt failed; bounce it.
297 	 */
298 
299 	/*
300 	 * Allocate bounce pages, if necessary.
301 	 */
302 	if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
303 		error = isadma_bounce_alloc_bouncebuf(t, map, m0->m_pkthdr.len,
304 		    flags);
305 		if (error)
306 			return (error);
307 	}
308 
309 	/*
310 	 * Cache a pointer to the caller's buffer and load the DMA map
311 	 * with the bounce buffer.
312 	 */
313 	cookie->id_origbuf = m0;
314 	cookie->id_origbuflen = m0->m_pkthdr.len;	/* not really used */
315 	cookie->id_buftype = ID_BUFTYPE_MBUF;
316 	error = _bus_dmamap_load_direct(t, map, cookie->id_bouncebuf,
317 	    m0->m_pkthdr.len, NULL, flags);
318 	if (error) {
319 		/*
320 		 * Free the bounce pages, unless our resources
321 		 * are reserved for our exclusive use.
322 		 */
323 		if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
324 			isadma_bounce_free_bouncebuf(t, map);
325 		return (error);
326 	}
327 
328 	/* ...so isadma_bounce_dmamap_sync() knows we're bouncing */
329 	cookie->id_flags |= ID_IS_BOUNCING;
330 	map->_dm_window = t;
331 	return (0);
332 }
333 
334 /*
335  * Like isadma_bounce_dmamap_load(), but for uios.
336  */
337 int
isadma_bounce_dmamap_load_uio(bus_dma_tag_t t,bus_dmamap_t map,struct uio * uio,int flags)338 isadma_bounce_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map,
339     struct uio *uio, int flags)
340 {
341 
342 	panic("isadma_bounce_dmamap_load_uio: not implemented");
343 }
344 
345 /*
346  * Like isadma_bounce_dmamap_load(), but for raw memory allocated with
347  * bus_dmamem_alloc().
348  */
349 int
isadma_bounce_dmamap_load_raw(bus_dma_tag_t t,bus_dmamap_t map,bus_dma_segment_t * segs,int nsegs,bus_size_t size,int flags)350 isadma_bounce_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
351     bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
352 {
353 
354 	panic("isadma_bounce_dmamap_load_raw: not implemented");
355 }
356 
357 /*
358  * Unload an ISA DMA map.
359  */
360 void
isadma_bounce_dmamap_unload(bus_dma_tag_t t,bus_dmamap_t map)361 isadma_bounce_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
362 {
363 	struct isadma_bounce_cookie *cookie = map->_dm_cookie;
364 
365 	/*
366 	 * If we have bounce pages, free them, unless they're
367 	 * reserved for our exclusive use.
368 	 */
369 	if ((cookie->id_flags & ID_HAS_BOUNCE) &&
370 	    (map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
371 		isadma_bounce_free_bouncebuf(t, map);
372 
373 	cookie->id_flags &= ~ID_IS_BOUNCING;
374 	cookie->id_buftype = ID_BUFTYPE_INVALID;
375 
376 	/*
377 	 * Do the generic bits of the unload.
378 	 */
379 	_bus_dmamap_unload(t, map);
380 }
381 
382 void
isadma_bounce_dmamap_sync(bus_dma_tag_t t,bus_dmamap_t map,bus_addr_t offset,bus_size_t len,int ops)383 isadma_bounce_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
384 	bus_size_t len, int ops)
385 {
386 	struct isadma_bounce_cookie *cookie = map->_dm_cookie;
387 
388 	/*
389 	 * Mixing PRE and POST operations is not allowed.
390 	 */
391 	if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
392 	    (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
393 		panic("isadma_bounce_dmamap_sync: mix PRE and POST");
394 
395 #ifdef DIAGNOSTIC
396 	if ((ops & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) {
397 		if (offset >= map->dm_mapsize)
398 			panic("isadma_bounce_dmamap_sync: bad offset");
399 		if (len == 0 || (offset + len) > map->dm_mapsize)
400 			panic("isadma_bounce_dmamap_sync: bad length");
401 	}
402 #endif
403 
404 	/*
405 	 * If we're not bouncing, just drain the write buffer
406 	 * and return.
407 	 */
408 	if ((cookie->id_flags & ID_IS_BOUNCING) == 0) {
409 		alpha_mb();
410 		return;
411 	}
412 
413 	switch (cookie->id_buftype) {
414 	case ID_BUFTYPE_LINEAR:
415 		/*
416 		 * Nothing to do for pre-read.
417 		 */
418 
419 		if (ops & BUS_DMASYNC_PREWRITE) {
420 			/*
421 			 * Copy the caller's buffer to the bounce buffer.
422 			 */
423 			memcpy((char *)cookie->id_bouncebuf + offset,
424 			    (char *)cookie->id_origbuf + offset, len);
425 		}
426 
427 		if (ops & BUS_DMASYNC_POSTREAD) {
428 			/*
429 			 * Copy the bounce buffer to the caller's buffer.
430 			 */
431 			memcpy((char *)cookie->id_origbuf + offset,
432 			    (char *)cookie->id_bouncebuf + offset, len);
433 		}
434 
435 		/*
436 		 * Nothing to do for post-write.
437 		 */
438 		break;
439 
440 	case ID_BUFTYPE_MBUF:
441 	    {
442 		struct mbuf *m, *m0 = cookie->id_origbuf;
443 		bus_size_t minlen, moff;
444 
445 		/*
446 		 * Nothing to do for pre-read.
447 		 */
448 
449 		if (ops & BUS_DMASYNC_PREWRITE) {
450 			/*
451 			 * Copy the caller's buffer to the bounce buffer.
452 			 */
453 			m_copydata(m0, offset, len,
454 			    (char *)cookie->id_bouncebuf + offset);
455 		}
456 
457 		if (ops & BUS_DMASYNC_POSTREAD) {
458 			/*
459 			 * Copy the bounce buffer to the caller's buffer.
460 			 */
461 			for (moff = offset, m = m0; m != NULL && len != 0;
462 			     m = m->m_next) {
463 				/* Find the beginning mbuf. */
464 				if (moff >= m->m_len) {
465 					moff -= m->m_len;
466 					continue;
467 				}
468 
469 				/*
470 				 * Now at the first mbuf to sync; nail
471 				 * each one until we have exhausted the
472 				 * length.
473 				 */
474 				minlen = len < m->m_len - moff ?
475 				    len : m->m_len - moff;
476 
477 				memcpy(mtod(m, caddr_t) + moff,
478 				    (char *)cookie->id_bouncebuf + offset,
479 				    minlen);
480 
481 				moff = 0;
482 				len -= minlen;
483 				offset += minlen;
484 			}
485 		}
486 
487 		/*
488 		 * Nothing to do for post-write.
489 		 */
490 		break;
491 	    }
492 
493 	case ID_BUFTYPE_UIO:
494 		panic("isadma_bounce_dmamap_sync: ID_BUFTYPE_UIO");
495 		break;
496 
497 	case ID_BUFTYPE_RAW:
498 		panic("isadma_bounce_dmamap_sync: ID_BUFTYPE_RAW");
499 		break;
500 
501 	case ID_BUFTYPE_INVALID:
502 		panic("isadma_bounce_dmamap_sync: ID_BUFTYPE_INVALID");
503 		break;
504 
505 	default:
506 		panic("isadma_bounce_dmamap_sync: unknown buffer type %d",
507 		    cookie->id_buftype);
508 	}
509 
510 	/* Drain the write buffer. */
511 	alpha_mb();
512 }
513 
514 /*
515  * Allocate memory safe for ISA DMA.
516  */
517 int
isadma_bounce_dmamem_alloc(bus_dma_tag_t t,bus_size_t size,bus_size_t alignment,bus_size_t boundary,bus_dma_segment_t * segs,int nsegs,int * rsegs,int flags)518 isadma_bounce_dmamem_alloc(bus_dma_tag_t t, bus_size_t size,
519     bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs,
520     int nsegs, int *rsegs, int flags)
521 {
522 	int error;
523 
524 	/* Try in ISA addressable region first */
525 	error = _bus_dmamem_alloc_range(t, size, alignment, boundary,
526 	    segs, nsegs, rsegs, flags, 0, ISA_DMA_BOUNCE_THRESHOLD);
527 	if (!error)
528 		return (error);
529 
530 	/* Otherwise try anywhere (we'll bounce later) */
531 	error = _bus_dmamem_alloc_range(t, size, alignment, boundary,
532 	    segs, nsegs, rsegs, flags, (paddr_t)0, (paddr_t)-1);
533 	return (error);
534 }
535 
536 /**********************************************************************
537  * ISA DMA utility functions
538  **********************************************************************/
539 
540 int
isadma_bounce_alloc_bouncebuf(bus_dma_tag_t t,bus_dmamap_t map,bus_size_t size,int flags)541 isadma_bounce_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map,
542     bus_size_t size, int flags)
543 {
544 	struct isadma_bounce_cookie *cookie = map->_dm_cookie;
545 	int error = 0;
546 
547 	cookie->id_bouncebuflen = round_page(size);
548 	error = _bus_dmamem_alloc_range(t, cookie->id_bouncebuflen,
549 	    PAGE_SIZE, map->_dm_boundary, cookie->id_bouncesegs,
550 	    map->_dm_segcnt, &cookie->id_nbouncesegs, flags,
551 	    0, ISA_DMA_BOUNCE_THRESHOLD);
552 	if (error)
553 		goto out;
554 	error = _bus_dmamem_map(t, cookie->id_bouncesegs,
555 	    cookie->id_nbouncesegs, cookie->id_bouncebuflen,
556 	    (caddr_t *)&cookie->id_bouncebuf, flags);
557 
558  out:
559 	if (error) {
560 		_bus_dmamem_free(t, cookie->id_bouncesegs,
561 		    cookie->id_nbouncesegs);
562 		cookie->id_bouncebuflen = 0;
563 		cookie->id_nbouncesegs = 0;
564 	} else
565 		cookie->id_flags |= ID_HAS_BOUNCE;
566 
567 	return (error);
568 }
569 
570 void
isadma_bounce_free_bouncebuf(bus_dma_tag_t t,bus_dmamap_t map)571 isadma_bounce_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map)
572 {
573 	struct isadma_bounce_cookie *cookie = map->_dm_cookie;
574 
575 	_bus_dmamem_unmap(t, cookie->id_bouncebuf,
576 	    cookie->id_bouncebuflen);
577 	_bus_dmamem_free(t, cookie->id_bouncesegs,
578 	    cookie->id_nbouncesegs);
579 	cookie->id_bouncebuflen = 0;
580 	cookie->id_nbouncesegs = 0;
581 	cookie->id_flags &= ~ID_HAS_BOUNCE;
582 }
583