1 /* $Id$
2  * --------------------------------------------------------------------------
3  *
4  *           //=====   //===== ===//=== //===//  //       //   //===//
5  *          //        //         //    //    // //       //   //    //
6  *         //====//  //         //    //===//  //       //   //===<<
7  *              //  //         //    //       //       //   //    //
8  *       ======//  //=====    //    //       //=====  //   //===//
9  *
10  * -------------- An SCTP implementation according to RFC 4960 --------------
11  *
12  * Copyright (C) 2000 by Siemens AG, Munich, Germany.
13  * Copyright (C) 2001-2004 Andreas Jungmaier
14  * Copyright (C) 2004-2019 Thomas Dreibholz
15  *
16  * Acknowledgements:
17  * Realized in co-operation between Siemens AG and the University of
18  * Duisburg-Essen, Institute for Experimental Mathematics, Computer
19  * Networking Technology group.
20  * This work was partially funded by the Bundesministerium fuer Bildung und
21  * Forschung (BMBF) of the Federal Republic of Germany
22  * (Förderkennzeichen 01AK045).
23  * The authors alone are responsible for the contents.
24  *
25  * This library is free software: you can redistribute it and/or modify it
26  * under the terms of the GNU Lesser General Public License as published by
27  * the Free Software Foundation, either version 2.1 of the License, or
28  * (at your option) any later version.
29  *
30  * This library is distributed in the hope that it will be useful,
31  * but WITHOUT ANY WARRANTY; without even the implied warranty of
32  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
33  * GNU General Public License for more details.
34  *
35  * You should have received a copy of the GNU Lesser General Public License
36  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
37  *
38  * Contact: sctp-discussion@sctp.de
39  *          dreibh@iem.uni-due.de
40  *          tuexen@fh-muenster.de
41  *          andreas.jungmaier@web.de
42  */
43 
44 #include "adaptation.h"
45 #include "reltransfer.h"
46 #include "flowcontrol.h"
47 #include "recvctrl.h"
48 #include "pathmanagement.h"
49 #include "distribution.h"
50 #include "SCTP-control.h"
51 #include "bundling.h"
52 
53 #include <string.h>
54 #include <stdio.h>
55 
56 #define MAX_NUM_OF_CHUNKS   500
57 
58 static chunk_data *rtx_chunks[MAX_NUM_OF_CHUNKS];
59 
60 /* #define Current_event_log_ 6 */
61 
62 /**
63  * this struct contains all necessary data for retransmissions
64  * and processing of received SACKs
65  */
66 typedef struct rtx_buffer_struct
67 {
68     /*@{ */
69     /** storing the lowest tsn that is in the list */
70     unsigned int lowest_tsn;
71     /** */
72     unsigned int highest_tsn;
73     /** */
74     unsigned int num_of_chunks;
75     /** */
76     unsigned int highest_acked;
77     /** a list that is ordered by ascending tsn values */
78     GList *chunk_list;
79     /** */
80     struct timeval sack_arrival_time;
81     /** */
82     struct timeval saved_send_time;
83     /** this val stores 0 if retransmitted chunks have been acked, else 1 */
84     unsigned int save_num_of_txm;
85     /** */
86     unsigned int newly_acked_bytes;
87     /** */
88     unsigned int num_of_addresses;
89     /** */
90     unsigned int my_association;
91     /** */
92     unsigned int peer_arwnd;
93     /** */
94     gboolean all_chunks_are_unacked;
95     /** */
96     gboolean shutdown_received;
97     /** */
98     gboolean fast_recovery_active;
99     /** the exit point is only valid, if we are in fast recovery */
100     unsigned int fr_exit_point;
101     unsigned int advancedPeerAckPoint;
102     /** */
103     unsigned int lastSentForwardTSN;
104     unsigned int lastReceivedCTSNA;
105 
106     GArray *prChunks;
107 /*@} */
108 } rtx_buffer;
109 
110 
111 /**
112  * after submitting results from a SACK to flowcontrol, the counters in
113  * reliable transfer must be reset
114  * @param rtx   pointer to a rtx_buffer, where acked bytes per address will be reset to 0
115  */
rtx_reset_bytecounters(rtx_buffer * rtx)116 void rtx_reset_bytecounters(rtx_buffer * rtx)
117 {
118     rtx->newly_acked_bytes = 0L;
119     return;
120 }
121 
122 
123 /**
124  * function creates and allocs new rtx_buffer structure.
125  * There is one such structure per established association
126  * @param   number_of_destination_addresses     number of paths to the peer of the association
127  * @return pointer to the newly created structure
128  */
rtx_new_reltransfer(unsigned int number_of_destination_addresses,unsigned int iTSN)129 void *rtx_new_reltransfer(unsigned int number_of_destination_addresses, unsigned int iTSN)
130 {
131     rtx_buffer *tmp;
132 
133     tmp = (rtx_buffer*)malloc(sizeof(rtx_buffer));
134     if (!tmp)
135         error_log(ERROR_FATAL, "Malloc failed");
136 
137     event_logi(VVERBOSE,
138                "================== Reltransfer: number_of_destination_addresses = %d",
139                number_of_destination_addresses);
140 
141     tmp->chunk_list = NULL;
142 
143     tmp->lowest_tsn = iTSN-1;
144     tmp->highest_tsn = iTSN-1;
145     tmp->lastSentForwardTSN = iTSN-1;
146     tmp->highest_acked = iTSN - 1;
147     tmp->lastReceivedCTSNA = iTSN - 1;
148     tmp->newly_acked_bytes = 0L;
149     tmp->num_of_chunks = 0L;
150     tmp->save_num_of_txm = 0L;
151     tmp->peer_arwnd = 0L;
152     tmp->shutdown_received = FALSE;
153     tmp->fast_recovery_active = FALSE;
154     tmp->all_chunks_are_unacked = TRUE;
155     tmp->fr_exit_point = 0L;
156     tmp->num_of_addresses = number_of_destination_addresses;
157     tmp->advancedPeerAckPoint = iTSN - 1;   /* a save bet */
158     tmp->prChunks = g_array_new(FALSE, TRUE, sizeof(pr_stream_data));
159     tmp->my_association = mdi_readAssociationID();
160     event_logi(VVERBOSE, "RTX : Association-ID== %d ", tmp->my_association);
161     if (tmp->my_association == 0)
162         error_log(ERROR_FATAL, "Association was not set, should be......");
163     rtx_reset_bytecounters(tmp);
164     return (tmp);
165 }
166 
167 /**
168  * function deletes a rtx_buffer structure (when it is not needed anymore)
169  * @param rtx_instance pointer to a rtx_buffer, that was previously created
170             with rtx_new_reltransfer()
171  */
rtx_delete_reltransfer(void * rtx_instance)172 void rtx_delete_reltransfer(void *rtx_instance)
173 {
174     rtx_buffer *rtx;
175     rtx = (rtx_buffer *) rtx_instance;
176     event_log(INTERNAL_EVENT_0, "deleting reliable transfer");
177     if (rtx->chunk_list != NULL)
178         error_log(ERROR_MINOR, "List is being deleted, but chunks are still queued...");
179 
180     g_list_foreach(rtx->chunk_list, &free_list_element, GINT_TO_POINTER(2));
181     g_list_free(rtx->chunk_list);
182     g_array_free(rtx->prChunks, TRUE);
183 
184     free(rtx_instance);
185 }
186 
187 
188 /**
189  * helper function that calls pm_chunksAcked()
190  * and tells path management, if new chunks have  been acked, and new RTT may be guessed
191  * @param  adr_idx  CHECKME : address where chunks have been acked (is this correct ?);
192             may we take src address of the SACK, or must we take destination address of our data ?
193  * @param    rtx    pointer to the currently active rtx structure
194  */
rtx_rtt_update(unsigned int adr_idx,rtx_buffer * rtx)195 void rtx_rtt_update(unsigned int adr_idx, rtx_buffer * rtx)
196 {
197     /* FIXME : check this routine !!!!!!!!!!! */
198     int rtt;
199     event_logi(INTERNAL_EVENT_0, "rtx_update_rtt(address=%u... ", adr_idx);
200     if (rtx->save_num_of_txm == 1) {
201         rtx->save_num_of_txm = 0;
202         rtt = adl_timediff_to_msecs(&(rtx->sack_arrival_time), &(rtx->saved_send_time));
203         if (rtt != -1) {
204             event_logii(ERROR_MINOR, "Calling pm_chunksAcked(%u, %d)...", adr_idx, rtt);
205             pm_chunksAcked((short)adr_idx, (unsigned int)rtt);
206         }
207     } else {
208         event_logi(VERBOSE, "Calling pm_chunksAcked(%u, 0)...", adr_idx);
209         pm_chunksAcked((short)adr_idx, (unsigned int)0L);
210     }
211     return;
212 }
213 
214 
215 /**
216  * this function enters fast recovery and sets correct exit point
217  * iff fast recovery is not already active
218  */
rtx_enter_fast_recovery(void)219 int rtx_enter_fast_recovery(void)
220 {
221     rtx_buffer *rtx = NULL;
222     rtx = (rtx_buffer *) mdi_readReliableTransfer();
223     if (!rtx) {
224         error_log(ERROR_MAJOR, "rtx_buffer instance not set !");
225         return (SCTP_MODULE_NOT_FOUND);
226     }
227 
228     if (rtx->fast_recovery_active == FALSE) {
229         event_logi(INTERNAL_EVENT_0, "=============> Entering FAST RECOVERY !!!, Exit Point: %u <================", rtx->highest_tsn);
230         rtx->fast_recovery_active = TRUE;
231         rtx->fr_exit_point = rtx->highest_tsn;
232     }
233     return SCTP_SUCCESS;
234 }
235 
236 
237 /**
238  * this function leaves fast recovery if it was activated, and all chunks up to
239  * fast recovery exit point were acknowledged.
240  */
rtx_check_fast_recovery(rtx_buffer * rtx,unsigned int ctsna)241 static inline int rtx_check_fast_recovery(rtx_buffer* rtx, unsigned int ctsna)
242 {
243     if (rtx->fast_recovery_active == TRUE) {
244         if (after (ctsna, rtx->fr_exit_point) || ctsna == rtx->fr_exit_point) {
245             event_logi(INTERNAL_EVENT_0, "=============> Leaving FAST RECOVERY !!! CTSNA: %u <================", ctsna);
246             rtx->fast_recovery_active = FALSE;
247             rtx->fr_exit_point = 0;
248         }
249     }
250     return SCTP_SUCCESS;
251 }
252 
253 /**
254  * this function returns true, if fast recovery is active
255  * else it returns FALSE
256  */
rtx_is_in_fast_recovery(void)257 gboolean rtx_is_in_fast_recovery(void)
258 {
259     rtx_buffer *rtx = NULL;
260     rtx = (rtx_buffer *) mdi_readReliableTransfer();
261     if (!rtx) {
262         error_log(ERROR_MAJOR, "rtx_buffer instance not set !");
263         return (FALSE);
264     }
265     return rtx->fast_recovery_active;
266 }
267 
268 
269 /**
270  * Function takes out chunks up to ctsna, updates newly acked bytes
271  * @param   ctsna   the ctsna value, that has just been received in a sack
272  * @return -1 if error (such as ctsna > than all chunk_tsn), 0 on success
273  */
rtx_dequeue_up_to(unsigned int ctsna,unsigned int addr_index)274 int rtx_dequeue_up_to(unsigned int ctsna, unsigned int addr_index)
275 {
276     rtx_buffer *rtx;
277     chunk_data *dat, *old_dat;
278 /*
279     boolean deleted_chunk = FALSE;
280     guint i=0, list_length = 0;
281 */
282     unsigned int chunk_tsn;
283     GList* tmp;
284 
285     event_logi(INTERNAL_EVENT_0, "rtx_dequeue_up_to...%u ", ctsna);
286 
287     rtx = (rtx_buffer *) mdi_readReliableTransfer();
288     if (!rtx) {
289         error_log(ERROR_MAJOR, "rtx_buffer instance not set !");
290         return (-1);
291     }
292     if (rtx->chunk_list == NULL) {
293         event_log(INTERNAL_EVENT_0, "List is NULL in rtx_dequeue_up_to()");
294         return -1;
295     }
296 
297     /* first remove all stale chunks from flowcontrol list           */
298     /* so that these are not referenced after they are freed here    */
299     fc_dequeue_acked_chunks(ctsna);
300 
301     tmp = g_list_first(rtx->chunk_list);
302 
303     while (tmp) {
304         dat = (chunk_data*)g_list_nth_data(rtx->chunk_list, 0);
305         if (!dat) return -1;
306 
307         chunk_tsn = dat->chunk_tsn;
308 
309         event_logiiii(VVERBOSE,
310                       " dat->num_of_transmissions==%u, chunk_tsn==%u, chunk_len=%u, ctsna==%u ",
311                       dat->num_of_transmissions, chunk_tsn, dat->chunk_len, ctsna);
312 
313         tmp = g_list_next(tmp);
314 
315         if (before(dat->chunk_tsn, ctsna) || (dat->chunk_tsn == ctsna)) {
316 
317             if (dat->num_of_transmissions < 1)
318                 error_log(ERROR_FATAL, "Somehow dat->num_of_transmissions is less than 1 !");
319 
320             if (dat->hasBeenAcked == FALSE && dat->hasBeenDropped == FALSE) {
321                 rtx->newly_acked_bytes += dat->chunk_len;
322                 dat->hasBeenAcked = TRUE;
323                 if (dat->num_of_transmissions == 1 && addr_index == dat->last_destination) {
324                     rtx->save_num_of_txm = 1;
325                     rtx->saved_send_time = dat->transmission_time;
326                     event_logiii(VERBOSE,
327                                  "Saving Time (after dequeue) : %lu secs, %06lu usecs for tsn=%u",
328                                  dat->transmission_time.tv_sec,
329                                  dat->transmission_time.tv_usec, dat->chunk_tsn);
330                 }
331             }
332 
333             event_logi(INTERNAL_EVENT_0, "Now delete chunk with tsn...%u", chunk_tsn);
334             old_dat = dat;
335             rtx->chunk_list = g_list_remove(rtx->chunk_list, (gpointer)dat);
336             free(old_dat);
337         }
338         /* it is a sorted list, so it is safe to get out in this case */
339         if (after(chunk_tsn, ctsna))
340             break;
341 
342     }
343     return 0;
344 }
345 
346 
rtx_update_fwtsn_list(rtx_buffer * rtx,chunk_data * dat)347 static int rtx_update_fwtsn_list(rtx_buffer *rtx, chunk_data* dat)
348 {
349     int count = 0, result = 0, arrayLen = 0;
350     pr_stream_data prChunk, *prPtr=NULL;
351     SCTP_data_chunk* prChunkData = (SCTP_data_chunk*)dat->data;
352 
353     prChunk.stream_id = ntohs(prChunkData->stream_id);
354     prChunk.stream_sn = ntohs(prChunkData->stream_sn);
355 
356     arrayLen = rtx->prChunks->len;
357     if (arrayLen == 0) {
358         rtx->prChunks = g_array_append_val(rtx->prChunks, prChunk);
359         result = 2;
360     } else {
361         for (count = 0; count < arrayLen; count++) {
362             prPtr = &g_array_index(rtx->prChunks, pr_stream_data, count);
363 
364             if (prChunk.stream_id < prPtr->stream_id) {    /* prepend */
365                 rtx->prChunks = g_array_insert_val(rtx->prChunks, count, prChunk);
366                 event_logii(VVERBOSE, "FW-TSN: prepended chunk (SID=%u, SSN=%u)",prChunk.stream_id, prChunk.stream_sn);
367                 result = -1;
368                 break;
369             } else if (prChunk.stream_id == prPtr->stream_id) {   /* check/replace */
370                 if (sAfter(prChunk.stream_sn,prPtr->stream_sn)) {
371                     event_logii(VVERBOSE, "FW-TSN: replaced chunk (SID=%u, SSN=%u)",prChunk.stream_id, prChunk.stream_sn);
372                     prPtr->stream_sn = prChunk.stream_sn;
373                     result = 0;
374                     break;
375                 } else {
376                     result = -2;
377                     break;
378                 }
379             } else if (count== arrayLen -1)  { /* and append */
380                 rtx->prChunks = g_array_insert_val(rtx->prChunks, count+1, prChunk);
381                 event_logii(VVERBOSE, "FW-TSN: appended chunk (SID=%u, SSN=%u)",prChunk.stream_id, prChunk.stream_sn);
382                 result = 1;
383                 break;
384             }
385         }
386     }
387     event_logiiii(VERBOSE, "Scheduling Chunk (TSN=%u, SID=%u, SSN=%u) for FW-TSN Report, Result: %d",
388                 dat->chunk_tsn, prChunk.stream_id, prChunk.stream_sn, result);
389     return result;
390 }
391 
392 
rtx_advancePeerAckPoint(rtx_buffer * rtx)393 static int rtx_advancePeerAckPoint(rtx_buffer *rtx)
394 {
395     chunk_data *dat = NULL;
396     GList* tmp = NULL;
397 
398     /* restart with a fresh array */
399     g_array_free(rtx->prChunks, TRUE);
400     rtx->prChunks = g_array_new(FALSE, TRUE, sizeof(pr_stream_data));
401 
402     tmp = g_list_first(rtx->chunk_list);
403 
404     while (tmp) {
405         dat = (chunk_data*)g_list_nth_data(tmp, 0);
406         if (!dat) return -1;
407         if (!dat->hasBeenDropped) return 0;
408         event_logi(VVERBOSE, "rtx_advancePeerAckPoint: Set advancedPeerAckPoint to %u", dat->chunk_tsn);
409         rtx->advancedPeerAckPoint = dat->chunk_tsn;
410         rtx_update_fwtsn_list(rtx, dat);
411         tmp = g_list_next(tmp);
412     }
413     return 0;
414 }
415 
416 
rtx_send_forward_tsn(rtx_buffer * rtx,unsigned int forward_tsn,unsigned int idx,gboolean sendAtOnce)417 int rtx_send_forward_tsn(rtx_buffer *rtx, unsigned int forward_tsn, unsigned int idx, gboolean sendAtOnce){
418 
419     int result;
420     unsigned int count;
421 
422     SCTP_forward_tsn_chunk chk;
423     pr_stream_data * psd;
424     pr_stream_data   hton_psd;
425     for (count = 0; count < rtx->prChunks->len; count++) {
426         psd =  &g_array_index(rtx->prChunks, pr_stream_data, count);
427         event_logii(VVERBOSE, "rtx_send_forward_tsn: chunk SID=%u, SSN=%u", psd->stream_id, psd->stream_sn);
428         hton_psd.stream_id = htons(psd->stream_id);
429         hton_psd.stream_sn = htons(psd->stream_sn);
430         memcpy(&(chk.variableParams[count*sizeof(pr_stream_data)]), &hton_psd, sizeof(pr_stream_data));
431     }
432     chk.forward_tsn                = htonl(forward_tsn);
433     chk.chunk_header.chunk_id      = CHUNK_FORWARD_TSN;
434     chk.chunk_header.chunk_flags   = 0;
435     chk.chunk_header.chunk_length  = htons((unsigned short)(sizeof(SCTP_chunk_header)+
436                                            sizeof(unsigned int)+
437                                            rtx->prChunks->len*sizeof(pr_stream_data)));
438 
439     event_logi(INTERNAL_EVENT_0, "===================>  Sending FORWARD TSN : %u",forward_tsn);
440 
441     result = bu_put_Ctrl_Chunk((SCTP_simple_chunk *) &chk, &idx);
442     if (sendAtOnce == TRUE) {
443         result = bu_sendAllChunks(&idx);
444     }
445     rtx->lastSentForwardTSN = forward_tsn;
446     return result;
447 }
448 
449 
rtx_get_obpa(unsigned int adIndex,unsigned int * totalInFlight)450 int rtx_get_obpa(unsigned int adIndex, unsigned int *totalInFlight)
451 {
452     rtx_buffer *rtx=NULL;
453     chunk_data *dat=NULL;
454     int count, len, numBytesPerAddress = 0, numTotalBytes = 0;
455 
456     rtx = (rtx_buffer *) mdi_readReliableTransfer();
457     if (!rtx) {
458         error_log(ERROR_FATAL, "rtx_buffer instance not set !");
459         return SCTP_MODULE_NOT_FOUND;
460     }
461     len = g_list_length(rtx->chunk_list);
462     if (len == 0) {
463         *totalInFlight = 0;
464         return 0;
465     }
466     for (count = 0; count < len; count++) {
467         dat = (chunk_data*)g_list_nth_data(rtx->chunk_list, count);
468         if (dat == NULL) break;
469         /* do not count chunks that were retransmitted by T3 timer              */
470         /* dat->hasBeenRequeued will be set to FALSE when these are sent again  */
471         if (!dat->hasBeenDropped && !dat->hasBeenAcked && !dat->hasBeenRequeued) {
472             if (dat->last_destination == adIndex) {
473                 numBytesPerAddress+= dat->chunk_len;
474             }
475             numTotalBytes += dat->chunk_len;
476         }
477     }
478     *totalInFlight = numTotalBytes;
479     return numBytesPerAddress;
480 }
481 
482 /**
483  * this is called by bundling, when a SACK needs to be processed. This is a LONG function !
484  * FIXME : check correct update of rtx->lowest_tsn !
485  * FIXME : handling of out-of-order SACKs
486  * CHECK : did SACK ack lowest outstanding tsn, restart t3 timer (section 7.2.4.4) )
487  * @param  adr_index   index of the address where we got that sack
488  * @param  sack_chunk  pointer to the sack chunk
489  * @return -1 on error, 0 if okay.
490  */
rtx_process_sack(unsigned int adr_index,void * sack_chunk,unsigned int totalLen)491 int rtx_process_sack(unsigned int adr_index, void *sack_chunk, unsigned int totalLen)
492 {
493     rtx_buffer *rtx=NULL;
494     SCTP_sack_chunk *sack=NULL;
495     fragment *frag=NULL;
496     chunk_data *dat=NULL;
497     GList* tmp_list = NULL;
498     int result;
499     unsigned int advertised_rwnd, old_own_ctsna;
500     unsigned int low, hi, ctsna, pos;
501     unsigned int chunk_len, var_len, gap_len, dup_len;
502     unsigned int num_of_dups, num_of_gaps;
503     unsigned int max_rtx_arraysize;
504     unsigned int retransmitted_bytes = 0L;
505     int chunks_to_rtx = 0;
506     guint i=0;
507     boolean rtx_necessary = FALSE, all_acked = FALSE, new_acked = FALSE;
508 
509     event_logi(INTERNAL_EVENT_0, "rtx_process_sack(address==%u)", adr_index);
510 
511     rtx = (rtx_buffer *) mdi_readReliableTransfer();
512     if (!rtx) {
513         error_log(ERROR_MAJOR, "rtx_buffer instance not set !");
514         return (-1);
515     }
516 
517     /*      chunk_list_debug(rtx->chunk_list); */
518 
519     sack = (SCTP_sack_chunk *) sack_chunk;
520     ctsna = ntohl(sack->cumulative_tsn_ack);
521 
522     /* discard old SACKs */
523     if (before(ctsna, rtx->highest_acked)) return 0;
524     else rtx->highest_acked = ctsna;
525 
526     rtx->lastReceivedCTSNA = ctsna;
527 
528     old_own_ctsna = rtx->lowest_tsn;
529     event_logii(VERBOSE, "Received ctsna==%u, old_own_ctsna==%u", ctsna, old_own_ctsna);
530 
531     adl_gettime(&(rtx->sack_arrival_time));
532 
533     event_logii(VERBOSE, "SACK Arrival Time : %lu secs, %06lu usecs",
534                 rtx->sack_arrival_time.tv_sec, rtx->sack_arrival_time.tv_usec);
535 
536     /* a false value here may do evil things !!!!! */
537     chunk_len = ntohs(sack->chunk_header.chunk_length);
538     /* this is just a very basic safety check */
539     if (chunk_len > totalLen) return -1;
540 
541     rtx_check_fast_recovery(rtx,  ctsna);
542 
543     /* maybe add some more sanity checks  !!! */
544     advertised_rwnd = ntohl(sack->a_rwnd);
545     num_of_gaps = ntohs(sack->num_of_fragments);
546     num_of_dups = ntohs(sack->num_of_duplicates);
547     /* var_len contains gap acks AND duplicates ! Thanks to Janar for pointing this out */
548     var_len = chunk_len - sizeof(SCTP_chunk_header) - 2 * sizeof(unsigned int) - 2 * sizeof(unsigned short);
549     gap_len = num_of_gaps * sizeof(unsigned int);
550     dup_len = num_of_dups * sizeof(unsigned int);
551     if (var_len != gap_len+dup_len) {
552         event_logiiii(EXTERNAL_EVENT, "Drop SACK chunk (incorrect length fields) chunk_len=%u, var_len=%u, gap_len=%u, dup_len=%u",
553                      chunk_len, var_len, gap_len, dup_len);
554         return -1;
555     }
556 
557     event_logiiiii(VVERBOSE, "chunk_len=%u, a_rwnd=%u, var_len=%u, gap_len=%u, du_len=%u",
558                     chunk_len, advertised_rwnd, var_len, gap_len, dup_len);
559 
560     if (after(ctsna, rtx->lowest_tsn) || (ctsna == rtx->lowest_tsn)) {
561         event_logiii(VVERBOSE, "after(%u, %u) == true, call rtx_dequeue_up_to(%u)",
562                      ctsna, rtx->lowest_tsn, ctsna);
563         result = rtx_dequeue_up_to(ctsna, adr_index);
564         if (result < 0) {
565             event_log(EXTERNAL_EVENT_X,
566                       "Bad ctsna arrived in SACK or no data in queue - discarding SACK");
567             return -1;
568         }
569         rtx->lowest_tsn = ctsna;
570         event_logi(VVERBOSE, "Updated rtx->lowest_tsn==ctsna==%u", ctsna);
571     }
572 
573     chunk_list_debug(VVERBOSE, rtx->chunk_list);
574 
575     if (ntohs(sack->num_of_fragments) != 0) {
576         event_logi(VERBOSE, "Processing %u fragment reports", ntohs(sack->num_of_fragments));
577         max_rtx_arraysize = g_list_length(rtx->chunk_list);
578         if (max_rtx_arraysize == 0) {
579             /*rxc_send_sack_everytime(); */
580             event_log(VERBOSE,
581                       "Size of retransmission list was zero, we received fragment report -> ignore");
582         } else {
583             /* this may become expensive !!!!!!!!!!!!!!!! */
584             pos = 0;
585             dat = (chunk_data*)g_list_nth_data(rtx->chunk_list, i);
586             if (rtx->chunk_list != NULL && dat != NULL) {
587                 do {
588                 frag = (fragment *) & (sack->fragments_and_dups[pos]);
589                     low = ctsna + ntohs(frag->start);
590                     hi = ctsna + ntohs(frag->stop);
591                     event_logiii(VVERBOSE, "chunk_tsn==%u, lo==%u, hi==%u", dat->chunk_tsn, low, hi);
592                     if (after(dat->chunk_tsn, hi)) {
593                         event_logii(VVERBOSE, "after(%u,%u)==true", dat->chunk_tsn, hi);
594                         pos += sizeof(fragment);
595                         if (pos >= gap_len)
596                             break;
597                         continue;
598                     }
599                     if (before(dat->chunk_tsn, low)) {
600                         /* this chunk is in a gap... */
601                         dat->gap_reports++;
602                         event_logiii(VVERBOSE,
603                                      "Chunk in a gap: before(%u,%u)==true -- Marking it up (%u Gap Reports)!",
604                                      dat->chunk_tsn, low, dat->gap_reports);
605                         if (dat->gap_reports >= 4) {
606                             /* FIXME : Get MTU of address, where RTX is to take place, instead of MAX_SCTP_PDU */
607                             event_logi(VVERBOSE, "Got four gap_reports, ==checking== chunk %u for rtx OR drop", dat->chunk_tsn);
608                             /* check sum of chunk sizes (whether it exceeds MTU for current address */
609                             if(dat->hasBeenDropped == FALSE) {
610                                 if (timerisset(&dat->expiry_time) && timercmp(&(rtx->sack_arrival_time), &(dat->expiry_time), >)) {
611                                     event_logi(VVERBOSE, "Got four gap_reports, dropping chunk %u !!!", dat->chunk_tsn);
612                                     dat->hasBeenDropped = TRUE;
613                                     /* this is a trick... */
614                                     dat->hasBeenFastRetransmitted = TRUE;
615                                 } else if (dat->hasBeenFastRetransmitted == FALSE) {
616                                     event_logi(VVERBOSE, "Got four gap_reports, scheduling %u for RTX", dat->chunk_tsn);
617                                     /* retransmit it, chunk is not yet expired */
618                                     rtx_necessary = TRUE;
619                                     rtx_chunks[chunks_to_rtx] = dat;
620                                     dat->gap_reports = 0;
621                                     dat->hasBeenFastRetransmitted = TRUE;
622                                     chunks_to_rtx++;
623                                     /* preparation for what is in section 6.2.1.C */
624                                     retransmitted_bytes += dat->chunk_len;
625                                 }
626                             } /*  if(dat->hasBeenDropped == FALSE)  */
627                         }     /*  if (dat->gap_reports == 4) */
628                         /* read next chunk */
629                         i++;
630                         dat = (chunk_data*)g_list_nth_data(rtx->chunk_list, i);
631                         if (dat == NULL)
632                             break; /* was the last chunk in the list */
633                         if (chunks_to_rtx == MAX_NUM_OF_CHUNKS)
634                             break;
635                         else continue;
636 
637                     } else if (between(low, dat->chunk_tsn, hi)) {
638                         event_logiii(VVERBOSE, "between(%u,%u,%u)==true", low, dat->chunk_tsn, hi);
639                         if (dat->hasBeenAcked == FALSE && dat->hasBeenDropped == FALSE) {
640                             rtx->newly_acked_bytes += dat->chunk_len;
641                             dat->hasBeenAcked = TRUE;
642                             rtx->all_chunks_are_unacked = FALSE;
643                             dat->gap_reports = 0;
644                             if (dat->num_of_transmissions == 1 && adr_index == dat->last_destination) {
645                                 rtx->saved_send_time = dat->transmission_time;
646                                 rtx->save_num_of_txm = 1;
647                                 event_logiii(VERBOSE, "Saving Time (chunk in gap) : %lu secs, %06lu usecs for tsn=%u",
648                                                      dat->transmission_time.tv_sec,
649                                                      dat->transmission_time.tv_usec, dat->chunk_tsn);
650 
651                             }
652                         }
653 
654                         if (dat->num_of_transmissions < 1) {
655                             error_log(ERROR_FATAL, "Somehow dat->num_of_transmissions is less than 1 !");
656                             break;
657                         }
658                         /* reset number of gap reports so it does not get fast retransmitted */
659                         dat->gap_reports = 0;
660 
661                         i++;
662                         dat = (chunk_data*)g_list_nth_data(rtx->chunk_list, i);
663                         if (dat == NULL)
664                             break; /* was the last chunk in the list or chunk is empty*/
665                         else continue;
666                     } else if (after(dat->chunk_tsn, hi)) {
667                         error_log(ERROR_MINOR, "Problem with fragment boundaries (low_2 <= hi_1)");
668                         break;
669                     }
670 
671                 }
672                 while ((pos < gap_len));
673 
674             }  /* end: if(rtx->chunk_list != NULL && dat != NULL) */
675             else {
676                 event_log(EXTERNAL_EVENT,
677                           "Received duplicated SACK for Chunks that are not in the queue anymore");
678             }
679         }
680 
681     } else {                    /* no gaps reported in this SACK */
682         /* do nothing */
683         if (rtx->all_chunks_are_unacked == FALSE) {
684             /* okay, we have chunks in the queue that were acked by a gap report before       */
685             /* and reneged: reset their status to unacked, since that is what peer reported   */
686             /* fast retransmit reneged chunks, as per section   6.2.1.D.iii) of RFC 4960      */
687             event_log(VVERBOSE, "rtx_process_sack: resetting all *hasBeenAcked* attributes");
688             tmp_list = g_list_first(rtx->chunk_list);
689             while (tmp_list) {
690                 dat = (chunk_data*)g_list_nth_data(tmp_list, 0);
691                 if (!dat) break;
692                 if (dat->hasBeenAcked == TRUE && dat->hasBeenDropped == FALSE) {
693                     dat->hasBeenAcked = FALSE;
694                     rtx_necessary = TRUE;
695                     rtx_chunks[chunks_to_rtx] = dat;
696                     dat->gap_reports = 0;
697                     dat->hasBeenFastRetransmitted = TRUE;
698                     event_logi(VVERBOSE, "rtx_process_sack: RENEG --> fast retransmitting chunk tsn %u ", dat->chunk_tsn);
699                     chunks_to_rtx++;
700                     /* preparation for what is in section 6.2.1.C */
701                     retransmitted_bytes += dat->chunk_len;
702                 }
703                 tmp_list = g_list_next(tmp_list);
704             }
705             rtx->all_chunks_are_unacked = TRUE;
706         }
707     }
708 
709     event_log(INTERNAL_EVENT_0, "Marking of Chunks done in rtx_process_sack()");
710     chunk_list_debug(VVERBOSE, rtx->chunk_list);
711 
712     /* also tell pathmanagement, that we got a SACK, possibly updating RTT/RTO. */
713     rtx_rtt_update(adr_index, rtx);
714 
715     /*
716      * new_acked==TRUE means our own ctsna has advanced :
717      * also see section 6.2.1 (Note)
718      */
719     if (rtx->chunk_list == NULL) {
720         if (rtx->highest_tsn == rtx->highest_acked) {
721             all_acked = TRUE;
722         }
723         /* section 6.2.1.D.ii) */
724         rtx->peer_arwnd = advertised_rwnd;
725         rtx->lowest_tsn = rtx->highest_tsn;
726         if (after(rtx->lowest_tsn, old_own_ctsna)) new_acked = TRUE;
727 
728         /* in the case where shutdown was requested by the ULP, and all is acked (i.e. ALL queues are empty) ! */
729         if (rtx->shutdown_received == TRUE) {
730             if (fc_readNumberOfQueuedChunks() == 0) {
731                 sci_allChunksAcked();
732             }
733         }
734     } else {
735         /* there are still chunks in that queue */
736         if (rtx->chunk_list != NULL)
737             dat = (chunk_data*)g_list_nth_data(rtx->chunk_list, 0);
738         if (dat == NULL) {
739             error_log(ERROR_FATAL, "Problem with RTX-chunklist, CHECK Program and List Handling");
740             return -1;
741         }
742         rtx->lowest_tsn = dat->chunk_tsn;
743         /* new_acked is true, when own  ctsna advances... */
744         if (after(rtx->lowest_tsn, old_own_ctsna)) new_acked = TRUE;
745     }
746 
747     if (rtx->shutdown_received == TRUE) rxc_send_sack_everytime();
748 
749     event_logiiii(VVERBOSE,
750                   "rtx->lowest_tsn==%u, new_acked==%s, all_acked==%s, rtx_necessary==%s\n",
751                   rtx->lowest_tsn, ((new_acked == TRUE) ? "TRUE" : "FALSE"),
752                   ((all_acked == TRUE) ? "TRUE" : "FALSE"),
753                   ((rtx_necessary == TRUE) ? "TRUE" : "FALSE"));
754 
755     if (rtx_necessary == FALSE) {
756         fc_sack_info(adr_index, advertised_rwnd, ctsna, all_acked, new_acked,
757                      rtx->newly_acked_bytes, rtx->num_of_addresses);
758         rtx_reset_bytecounters(rtx);
759     } else {
760         /* retval = */
761         fc_fast_retransmission(adr_index, advertised_rwnd,ctsna,
762                                             retransmitted_bytes,
763                                             all_acked, new_acked,
764                                             rtx->newly_acked_bytes,
765                                             rtx->num_of_addresses,
766                                             chunks_to_rtx, rtx_chunks);
767         rtx_reset_bytecounters(rtx);
768     }
769 
770     if (before(rtx->advancedPeerAckPoint,ctsna)) {
771         rtx->advancedPeerAckPoint = ctsna;
772     }
773 
774     event_logiii(VERBOSE, "FORWARD_TSN check: ctsna: %u, advPeerAckPoint %u, rtx->lowest: %u",
775         ctsna, rtx->advancedPeerAckPoint,rtx->lowest_tsn);
776     /*-----------------------------------------------------------------------------*/
777     if (mdi_supportsPRSCTP() == TRUE) {
778         rtx_advancePeerAckPoint(rtx);
779         if (after(rtx->advancedPeerAckPoint, ctsna)) {
780             result = rtx_send_forward_tsn(rtx, rtx->advancedPeerAckPoint,adr_index, TRUE);
781             event_logi(INTERNAL_EVENT_0, "rtx_process_sack: sent FORWARD_TSN, result : %d", result);
782         }
783     }
784     /*-----------------------------------------------------------------------------*/
785 
786     return 0;
787 }
788 
789 
790 /**
791  * called from flow-control to trigger retransmission of chunks that have previously
792  * been sent to the address that timed out.
793  * It is only called from flowcontrol, so association should be set correctly here
794  * @param  assoc_id     pointer to the id value of the association, where timeout occurred
795  * @param  address      address that timed out
796  * @param   mtu         current path mtu (this needs to be fixed, too !)
797  * @param   chunks      pointer to an array, that will contain pointers to chunks that need to
798                         be retransmitted after this function returns. Provide space !
799  * @return  -1 on error, 0 for empty list, else number of chunks that can be retransmitted
800  */
rtx_t3_timeout(void * assoc_id,unsigned int address,unsigned int mtu,chunk_data ** chunks)801 int rtx_t3_timeout(void *assoc_id, unsigned int address, unsigned int mtu, chunk_data ** chunks)
802 {
803     rtx_buffer *rtx;
804     /* assume a SACK with 5 fragments and 5 duplicates :-) */
805     /* it's size == 20+5*4+5*4 == 60        */
806     unsigned int size = 60;
807     int chunks_to_rtx = 0, result=0;
808     struct timeval now;
809     GList *tmp;
810     chunk_data *dat=NULL;
811     event_logi(INTERNAL_EVENT_0, "========================= rtx_t3_timeout (address==%u) =====================", address);
812 
813     rtx = (rtx_buffer *) mdi_readReliableTransfer();
814 
815     if (rtx->chunk_list == NULL) return 0;
816 
817     adl_gettime(&now);
818 
819     tmp = g_list_first(rtx->chunk_list);
820 
821     while (tmp) {
822         if (((chunk_data *)(tmp->data))->num_of_transmissions < 1) {
823             error_log(ERROR_FATAL, "Somehow chunk->num_of_transmissions is less than 1 !");
824             break;
825         }
826         /* only take chunks that were transmitted to *address* */
827         if (((chunk_data *)(tmp->data))->last_destination == address) {
828             if (((chunk_data *)(tmp->data))->hasBeenDropped == FALSE) {
829                 if (timerisset( &((chunk_data *)(tmp->data))->expiry_time)) {
830                     if (timercmp(&now, &((chunk_data *)(tmp->data))->expiry_time, > )) {
831                         /* chunk has expired, maybe send FORWARD_TSN */
832                         ((chunk_data *)(tmp->data))->hasBeenDropped = TRUE;
833                     } else { /* chunk has not yet expired */
834                         chunks[chunks_to_rtx] = (chunk_data*)tmp->data;
835                         size += chunks[chunks_to_rtx]->chunk_len;
836                         event_logii(VVERBOSE, "Scheduling chunk (tsn==%u), len==%u for rtx",
837                                     chunks[chunks_to_rtx]->chunk_tsn, chunks[chunks_to_rtx]->chunk_len);
838                         /* change SCI2002 */
839                         chunks[chunks_to_rtx]->gap_reports = 0;
840                         chunks_to_rtx++;
841                     }
842                 } else {
843                     chunks[chunks_to_rtx] = (chunk_data*)tmp->data;
844                     size += chunks[chunks_to_rtx]->chunk_len;
845                     event_logii(VVERBOSE, "Scheduling chunk (tsn==%u), len==%u for rtx",
846                             chunks[chunks_to_rtx]->chunk_tsn, chunks[chunks_to_rtx]->chunk_len);
847                     /* change SCI2002 */
848                     chunks[chunks_to_rtx]->gap_reports = 0;
849 
850                     chunks_to_rtx++;
851                 }
852             }       /* hasBeenDropped == FALSE     */
853         }           /* last_destination == address */
854         tmp = g_list_next(tmp);
855     }
856     event_logi(VVERBOSE, "Scheduled %d chunks for rtx", chunks_to_rtx);
857 
858     if (rtx->chunk_list != NULL) {
859         dat = (chunk_data*)g_list_nth_data(rtx->chunk_list, 0);
860         if (dat == NULL) {
861            error_log(ERROR_FATAL, "Problem with RTX-chunklist, CHECK Program and List Handling");
862            return chunks_to_rtx;
863         }
864         rtx->lowest_tsn = dat->chunk_tsn;
865     } else {
866         rtx->lowest_tsn = rtx->highest_tsn;
867     }
868 
869 
870     if (mdi_supportsPRSCTP() == TRUE) {
871         /* properly advance rtx->advancedPeerAckPoint. If it is larger than last ctsna, send FW-TSN */
872         rtx_advancePeerAckPoint(rtx);
873         if (after(rtx->advancedPeerAckPoint, rtx->lastReceivedCTSNA)) {
874             result = rtx_send_forward_tsn(rtx, rtx->advancedPeerAckPoint,address, TRUE);
875             event_logi(INTERNAL_EVENT_0, "rtx_process_sack: sent FORWARD_TSN, result : %d", result);
876         }
877     }
878 
879     return chunks_to_rtx;
880 }
881 
882 
883 /**
884  * a function called by FlowCtrl, when chunks have been given to the bundling
885  * instance, but need to be kept in the buffer until acknowledged
886  * @return 0 if OK, -1 if there is an error (list error)
887  */
rtx_save_retrans_chunks(void * data_chunk)888 int rtx_save_retrans_chunks(void *data_chunk)
889 {
890     chunk_data *dat;
891     rtx_buffer *rtx;
892 
893     event_log(INTERNAL_EVENT_0, "rtx_save_retrans_chunks");
894 
895     rtx = (rtx_buffer *) mdi_readReliableTransfer();
896     if (!rtx) {
897         error_log(ERROR_MAJOR, "rtx_buffer instance not set !");
898         return (-1);
899     }
900 
901     /*      chunk_list_debug(rtx->chunk_list); */
902 
903     dat = (chunk_data *) data_chunk;
904 
905     /* TODO : check, if all values are set correctly */
906     dat->gap_reports = 0L;
907     rtx->chunk_list = g_list_insert_sorted(rtx->chunk_list, dat, (GCompareFunc)sort_tsn);
908 
909     if (after(dat->chunk_tsn, rtx->highest_tsn))
910         rtx->highest_tsn = dat->chunk_tsn;
911     else
912         error_log(ERROR_MINOR, "Data Chunk has TSN that was already assigned (i.e. is too small)");
913 
914     chunk_list_debug(VVERBOSE, rtx->chunk_list);
915 
916     rtx->num_of_chunks = g_list_length(rtx->chunk_list);
917     return 0;
918 }
919 
920 /**
921  * output debug messages for the list of saved chunks
922  * @param   event_log_level  INTERNAL_EVENT_0 INTERNAL_EVENT_1 EXTERNAL_EVENT_X EXTERNAL_EVENT
923  * @param   chunk_list  the list about which we print information
924  */
chunk_list_debug(short event_log_level,GList * chunk_list)925 void chunk_list_debug(short event_log_level, GList * chunk_list)
926 {
927     chunk_data *dat;
928     unsigned int size, counter;
929     unsigned int last_tsn;
930     guint i=0;
931 
932     last_tsn = 0;
933     if (event_log_level <= Current_event_log_) {
934 /*    if (1) { */
935         event_log(event_log_level, "------------- Chunk List Debug ------------------------");
936         if ((size = g_list_length(chunk_list)) == 0) {
937             event_log(event_log_level, " Size of List == 0 ! ");
938         } else if (size <= 200) {
939             event_logi(event_log_level, " Size of List == %u ! Printing first 10 chunks....", size);
940             dat = (chunk_data*)g_list_nth_data(chunk_list, 0);
941             last_tsn = dat->chunk_tsn - 1;
942             if (size > 10) counter = 10;
943             else counter = size;
944             for (i=0; i<counter; i++) {
945                 dat = (chunk_data*)g_list_nth_data(chunk_list, i);
946                 event_logii(event_log_level,
947                             "________________ Chunk _________________\nChunk Size %u  -- TSN : %u  ",
948                             dat->chunk_len, dat->chunk_tsn);
949                 event_logiii(event_log_level, "Gap repts=%u -- initial dest=%d  Transmissions = %u",
950                               dat->gap_reports, dat->initial_destination, dat->num_of_transmissions);
951                 event_logii(event_log_level,  "Transmission Time : %lu secs, %06lu usecs",
952                             dat->transmission_time.tv_sec, dat->transmission_time.tv_usec);
953                 event_logii(event_log_level, "Destination[%u] == %u", dat->num_of_transmissions,
954                             dat->last_destination);
955 
956                 if (dat->chunk_len > 10000)
957                     error_log(ERROR_FATAL, "Corrput TSN length in queue 1 ! Terminate");
958 
959                 if (! after(dat->chunk_tsn, last_tsn)) error_log(ERROR_FATAL, "TSN not in sequence ! Bye");
960                 last_tsn = dat->chunk_tsn;
961             }
962             for (i=counter; i<size; i++) {
963                 dat = (chunk_data*)g_list_nth_data(chunk_list, i);
964                 if (! after(dat->chunk_tsn, last_tsn))
965                     error_log(ERROR_FATAL, "Higher TSNs not in sequence ! Terminate");
966                 if (dat->chunk_tsn - last_tsn > 10000)
967                     error_log(ERROR_FATAL, "Corrput TSN in queue ! Terminate");
968                 if (dat->chunk_len > 10000)
969                     error_log(ERROR_FATAL, "Corrput TSN length in queue 2 ! Terminate");
970 
971                 last_tsn = dat->chunk_tsn;
972             }
973             event_log(event_log_level, "------------- Chunk List Debug : DONE  ------------------------");
974         } else {
975             event_logi(event_log_level, " Size of List == %u ! ", size);
976         }
977     }
978 }
979 
980 
981 /**
982  * function that returns the consecutive tsn number that has been acked by the peer.
983  * @return the ctsna value
984  */
rtx_readLocalTSNacked()985 unsigned int rtx_readLocalTSNacked()
986 {
987     rtx_buffer *rtx;
988 
989     rtx = (rtx_buffer *) mdi_readReliableTransfer();
990     if (!rtx) {
991         event_log(INTERNAL_EVENT_0, "rtx_buffer instance not set !");
992         return (0);
993     }
994     return rtx->lowest_tsn;
995 }
996 
rtx_is_lowest_tsn(unsigned int atsn)997 gboolean rtx_is_lowest_tsn(unsigned int atsn)
998 {
999     rtx_buffer *rtx;
1000 
1001     rtx = (rtx_buffer *) mdi_readReliableTransfer();
1002     if (!rtx) {
1003         event_log(INTERNAL_EVENT_0, "rtx_buffer instance not set !");
1004         return (0);
1005     }
1006     return rtx->lowest_tsn == atsn;
1007 }
1008 
1009 
1010 /**
1011  * called, when a Cookie, that indicates the peer's restart, is received in the ESTABLISHED state
1012     -> we need to restart too
1013  */
rtx_restart_reliable_transfer(void * rtx_instance,unsigned int numOfPaths,unsigned int iTSN)1014 void* rtx_restart_reliable_transfer(void* rtx_instance, unsigned int numOfPaths, unsigned int iTSN)
1015 {
1016     void * new_rtx = NULL;
1017     /* ******************************************************************* */
1018     /* IMPLEMENTATION NOTE: It is an implementation decision on how
1019        to handle any pending datagrams. The implementation may elect
1020        to either A) send all messages back to its upper layer with the
1021        restart report, or B) automatically re-queue any datagrams
1022        pending by marking all of them as never-sent and assigning
1023        new TSN's at the time of their initial transmissions based upon
1024        the updated starting TSN (as defined in section 5).
1025        Version 13 says : SCTP data chunks MAY be retained !
1026        (this is implementation specific)
1027        ******************************************************************** */
1028     if (!rtx_instance) {
1029         error_log(ERROR_MAJOR, "rtx_buffer instance not set !");
1030         return NULL;
1031     }
1032     event_logi(INTERNAL_EVENT_0, "Restarting Reliable Transfer with %u Paths", numOfPaths);
1033 
1034     rtx_delete_reltransfer(rtx_instance);
1035     /* For ease of implementation we will delete all old data ! */
1036     /* chunk_list_debug(VVERBOSE, rtx->chunk_list); */
1037     new_rtx = rtx_new_reltransfer(numOfPaths, iTSN);
1038 
1039     return new_rtx;
1040 }
1041 
rtx_dequeueOldestUnackedChunk(unsigned char * buf,unsigned int * len,unsigned int * tsn,unsigned short * sID,unsigned short * sSN,unsigned int * pID,unsigned char * flags,gpointer * ctx)1042 int rtx_dequeueOldestUnackedChunk(unsigned char *buf, unsigned int *len, unsigned int *tsn,
1043                                   unsigned short *sID, unsigned short *sSN,unsigned int* pID,
1044                                   unsigned char* flags, gpointer* ctx)
1045 {
1046     int listlen, result;
1047     rtx_buffer *rtx;
1048     chunk_data *dat = NULL;
1049     SCTP_data_chunk* dchunk;
1050 
1051     rtx = (rtx_buffer *) mdi_readReliableTransfer();
1052     if (!rtx) {
1053         error_log(ERROR_MAJOR, "rtx_buffer instance not set !");
1054         return SCTP_MODULE_NOT_FOUND;
1055     }
1056     if (rtx->chunk_list == NULL) return  SCTP_UNSPECIFIED_ERROR;
1057     listlen = g_list_length(rtx->chunk_list);
1058     if (listlen <= 0) return SCTP_UNSPECIFIED_ERROR;
1059     dat = (chunk_data*)g_list_nth_data(rtx->chunk_list, 0);
1060     if (dat->num_of_transmissions == 0) return SCTP_UNSPECIFIED_ERROR;
1061     if ((*len) <  (dat->chunk_len - FIXED_DATA_CHUNK_SIZE)) return SCTP_BUFFER_TOO_SMALL;
1062 
1063     dchunk = (SCTP_data_chunk*) dat->data;
1064     *len = dat->chunk_len - FIXED_DATA_CHUNK_SIZE;
1065     memcpy(buf, dchunk->data, dat->chunk_len - FIXED_DATA_CHUNK_SIZE);
1066     *tsn = dat->chunk_tsn;
1067     *sID = ntohs(dchunk->stream_id);
1068     *sSN = ntohs(dchunk->stream_sn);
1069     *pID = dchunk->protocolId;
1070     *flags = dchunk->chunk_flags;
1071     *ctx = dat->context;
1072     event_logiii(VERBOSE, "rtx_dequeueOldestUnackedChunk() returns chunk tsn %u, num-trans: %u, chunks left: %u",
1073             dat->chunk_tsn, dat->num_of_transmissions, listlen-1);
1074 
1075     result = fc_dequeueUnackedChunk(dat->chunk_tsn);
1076     event_logi(VERBOSE, "fc_dequeueUnackedChunk() returns  %u", result);
1077     rtx->chunk_list = g_list_remove(rtx->chunk_list, (gpointer) dat);
1078     /* be careful ! data may only be freed once: this module ONLY takes care of unacked chunks */
1079     chunk_list_debug(VVERBOSE, rtx->chunk_list);
1080 
1081     free(dat);
1082     return (listlen-1);
1083 }
1084 
1085 
1086 /**
1087  * Function returns the number of chunks that are waiting in the queue to be acked
1088  * @return size of the retransmission queue
1089  */
rtx_readNumberOfUnackedChunks()1090 unsigned int rtx_readNumberOfUnackedChunks()
1091 {
1092     unsigned int queue_len;
1093     rtx_buffer *rtx;
1094 
1095     rtx = (rtx_buffer *) mdi_readReliableTransfer();
1096     if (!rtx) {
1097         error_log(ERROR_MAJOR, "rtx_buffer instance not set !");
1098         return 0;
1099     }
1100     queue_len = g_list_length(rtx->chunk_list);
1101     event_logi(VERBOSE, "rtx_readNumberOfUnackedChunks() returns %u", queue_len);
1102     return queue_len;
1103 }
1104 
1105 
1106 /**
1107  * function to return the last a_rwnd value we got from our peer
1108  * @return  peers advertised receiver window
1109  */
rtx_read_remote_receiver_window()1110 unsigned int rtx_read_remote_receiver_window()
1111 {
1112     rtx_buffer *rtx;
1113 
1114     rtx = (rtx_buffer *) mdi_readReliableTransfer();
1115     if (!rtx) {
1116         error_log(ERROR_MAJOR, "rtx_buffer instance not set !");
1117         return 0;
1118     }
1119     event_logi(VERBOSE, "rtx_read_remote_receiver_window returns %u", rtx->peer_arwnd);
1120     return rtx->peer_arwnd;
1121 }
1122 
1123 
1124 /**
1125  * function to set the a_rwnd value when we got it from our peer
1126  * @param  new_arwnd      peers newly advertised receiver window
1127  * @return  0 for success, -1 for error
1128  */
rtx_set_remote_receiver_window(unsigned int new_arwnd)1129 int rtx_set_remote_receiver_window(unsigned int new_arwnd)
1130 {
1131     rtx_buffer *rtx;
1132 
1133     rtx = (rtx_buffer *) mdi_readReliableTransfer();
1134     if (!rtx) {
1135         error_log(ERROR_MAJOR, "rtx_buffer instance not set !");
1136         return -1;
1137     }
1138     event_logi(VERBOSE, "rtx_set_his_receiver_window(%u)", new_arwnd);
1139     rtx->peer_arwnd = new_arwnd;
1140     return 0;
1141 }
1142 
1143 /**
1144  * function that is called by SCTP-Control, when ULP requests
1145  * shutdown in an established association
1146  * @return  0 for success, -1 for error
1147  */
rtx_shutdown()1148 int rtx_shutdown()
1149 {
1150     rtx_buffer *rtx;
1151 
1152     rtx = (rtx_buffer *) mdi_readReliableTransfer();
1153     if (!rtx) {
1154         error_log(ERROR_MAJOR, "rtx_buffer instance not set !");
1155         return -1;
1156     }
1157     event_log(VERBOSE, "rtx_shutdown() activated");
1158     rtx->shutdown_received = TRUE;
1159     event_log(VERBOSE, "calling fc_shutdown()");
1160     fc_shutdown();
1161     return 0;
1162 }
1163 
1164 
1165 /*
1166    CHECKME : Check retransmission procedures case when SHUTDOWN is initiated.
1167  */
1168 
1169 
1170 /**
1171  * function that is called by SCTP-Control, when peer indicates
1172  * shutdown and sends us his last ctsna...this function dequeues
1173  * all chunks, and returns the number of chunks left in the queue
1174  * @param  ctsna    up to this tsn we can dequeue all chunks here
1175  * @return  number of chunks that are still queued
1176  */
rtx_rcv_shutdown_ctsna(unsigned int ctsna)1177 unsigned int rtx_rcv_shutdown_ctsna(unsigned int ctsna)
1178 {
1179     rtx_buffer *rtx;
1180     int result;
1181     int rtx_queue_len = 0;
1182     gboolean all_acked = FALSE, new_acked = FALSE;
1183 
1184     event_logi(INTERNAL_EVENT_0, "rtx_rcv_shutdown_ctsna(ctsna==%u)", ctsna);
1185 
1186     rtx = (rtx_buffer *) mdi_readReliableTransfer();
1187     if (!rtx) {
1188         error_log(ERROR_MAJOR, "rtx_buffer instance not set !");
1189         return (0);
1190     }
1191     rxc_send_sack_everytime();
1192 
1193     if (after(ctsna, rtx->lowest_tsn) || (ctsna == rtx->lowest_tsn)) {
1194         event_logiii(VVERBOSE, "after(%u, %u) == true, call rtx_dequeue_up_to(%u)",
1195                      ctsna, rtx->lowest_tsn, ctsna);
1196         result = rtx_dequeue_up_to(ctsna , 0);
1197         if (result < 0) {
1198             event_log(VVERBOSE, "Bad ctsna arrived in shutdown or no chunks in queue");
1199         }
1200         rtx->lowest_tsn = ctsna;
1201         event_logi(VVERBOSE, "Updated rtx->lowest_tsn==ctsna==%u", ctsna);
1202         rtx_queue_len =  g_list_length(rtx->chunk_list);
1203 
1204         if (rtx->newly_acked_bytes != 0) new_acked = TRUE;
1205         if (rtx_queue_len == 0) all_acked = TRUE;
1206         fc_sack_info(0, rtx->peer_arwnd, ctsna, (boolean)all_acked, (boolean)new_acked,
1207                      rtx->newly_acked_bytes, rtx->num_of_addresses);
1208         rtx_reset_bytecounters(rtx);
1209     } else {
1210         rtx_queue_len =  g_list_length(rtx->chunk_list);
1211     }
1212 
1213 
1214     if (rtx->shutdown_received == TRUE) {
1215         if (fc_readNumberOfQueuedChunks() == 0 && rtx_queue_len == 0) {
1216             sci_allChunksAcked();
1217         }
1218     }
1219     return (rtx_queue_len);
1220 }
1221 
1222 
1223 
1224