1 /*
2 * Copyright (c) 2021 Fastly, Kazuho Oku
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a copy
5 * of this software and associated documentation files (the "Software"), to
6 * deal in the Software without restriction, including without limitation the
7 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
8 * sell copies of the Software, and to permit persons to whom the Software is
9 * furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
20 * IN THE SOFTWARE.
21 */
22 #include <math.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <arpa/inet.h>
26 #include <getopt.h>
27 #include <netinet/in.h>
28 #include <openssl/bio.h>
29 #include <openssl/crypto.h>
30 #include <openssl/err.h>
31 #include <openssl/pem.h>
32 #include "picotls.h"
33 #include "picotls/openssl.h"
34 #include "quicly.h"
35 #include "quicly/cc.h"
36 #include "quicly/defaults.h"
37
38 FILE *quicly_trace_fp;
39
40 static double now = 1000;
41
new_address(void)42 static quicly_address_t new_address(void)
43 {
44 static uint32_t next_ipaddr = 1;
45 quicly_address_t addr = {};
46 addr.sin.sin_family = AF_INET;
47 addr.sin.sin_addr.s_addr = htonl(next_ipaddr);
48 addr.sin.sin_port = htons(54321);
49 ++next_ipaddr;
50 return addr;
51 }
52
53 struct net_endpoint;
54
55 /**
56 * Packet
57 */
58 struct net_packet {
59 /**
60 * used by nodes to maintain the linked-list of packets being queued
61 */
62 struct net_packet *next;
63 /**
64 * source
65 */
66 struct net_endpoint *src;
67 /**
68 * destination
69 */
70 quicly_address_t dest;
71 /**
72 * used by queues to retain when the packet entered that queue
73 */
74 double enter_at;
75 /**
76 * size of the packet
77 */
78 size_t size;
79 /**
80 * the packet
81 */
82 uint8_t bytes[1];
83 };
84
85 struct net_queue {
86 struct net_packet *first, **append_at;
87 size_t size;
88 };
89
90 struct net_node {
91 void (*forward_)(struct net_node *node, struct net_packet *packet);
92 double (*next_run_at)(struct net_node *node);
93 void (*run)(struct net_node *node);
94 };
95
96 struct net_delay {
97 struct net_node super;
98 struct net_node *next_node;
99 struct net_queue queue;
100 double delay;
101 };
102
103 struct net_random_loss {
104 struct net_node super;
105 struct net_node *next_node;
106 double loss_ratio;
107 };
108
109 struct net_bottleneck {
110 struct net_node super;
111 struct net_node *next_node;
112 struct net_queue queue;
113 double next_emit_at;
114 double bytes_per_sec;
115 size_t capacity;
116 };
117
118 struct net_endpoint {
119 struct net_node super;
120 quicly_address_t addr;
121 double start_at;
122 struct net_endpoint_conn {
123 quicly_conn_t *quic;
124 struct net_node *egress;
125 } conns[10];
126 quicly_context_t *accept_ctx;
127 };
128
net_packet_create(struct net_endpoint * src,quicly_address_t * dest,ptls_iovec_t vec)129 static struct net_packet *net_packet_create(struct net_endpoint *src, quicly_address_t *dest, ptls_iovec_t vec)
130 {
131 struct net_packet *p = malloc(offsetof(struct net_packet, bytes) + vec.len);
132
133 p->next = NULL;
134 p->src = src;
135 p->dest = *dest;
136 p->enter_at = now;
137 p->size = vec.len;
138 memcpy(p->bytes, vec.base, vec.len);
139
140 return p;
141 }
142
net_packet_destroy(struct net_packet * packet)143 static void net_packet_destroy(struct net_packet *packet)
144 {
145 free(packet);
146 }
147
net_queue_enqueue(struct net_queue * self,struct net_packet * packet)148 static void net_queue_enqueue(struct net_queue *self, struct net_packet *packet)
149 {
150 packet->next = NULL;
151 packet->enter_at = now;
152 *self->append_at = packet;
153 self->append_at = &packet->next;
154 self->size += packet->size;
155 }
156
net_queue_dequeue(struct net_queue * self)157 static struct net_packet *net_queue_dequeue(struct net_queue *self)
158 {
159 struct net_packet *packet = self->first;
160 assert(packet != NULL);
161 if ((self->first = packet->next) == NULL)
162 self->append_at = &self->first;
163 self->size -= packet->size;
164 return packet;
165 }
166
net_delay_forward(struct net_node * _self,struct net_packet * packet)167 static void net_delay_forward(struct net_node *_self, struct net_packet *packet)
168 {
169 struct net_delay *self = (struct net_delay *)_self;
170 net_queue_enqueue(&self->queue, packet);
171 }
172
net_delay_next_run_at(struct net_node * _self)173 static double net_delay_next_run_at(struct net_node *_self)
174 {
175 struct net_delay *self = (struct net_delay *)_self;
176 return self->queue.first != NULL ? self->queue.first->enter_at + self->delay : INFINITY;
177 }
178
net_delay_run(struct net_node * _self)179 static void net_delay_run(struct net_node *_self)
180 {
181 struct net_delay *self = (struct net_delay *)_self;
182
183 while (self->queue.first != NULL && self->queue.first->enter_at + self->delay <= now) {
184 struct net_packet *packet = net_queue_dequeue(&self->queue);
185 self->next_node->forward_(self->next_node, packet);
186 }
187 }
188
net_delay_init(struct net_delay * self,double delay)189 static void net_delay_init(struct net_delay *self, double delay)
190 {
191 *self = (struct net_delay){
192 .super = {net_delay_forward, net_delay_next_run_at, net_delay_run},
193 .queue = {.append_at = &self->queue.first},
194 .delay = delay,
195 };
196 }
197
net_random_loss_forward(struct net_node * _self,struct net_packet * packet)198 static void net_random_loss_forward(struct net_node *_self, struct net_packet *packet)
199 {
200 struct net_random_loss *self = (struct net_random_loss *)_self;
201
202 if (rand() % 65536 < self->loss_ratio * 65536) {
203 printf("{\"random-loss\": \"drop\", \"at\": %f, \"packet-src\": %" PRIu32 "}\n", now,
204 ntohl(packet->src->addr.sin.sin_addr.s_addr));
205 net_packet_destroy(packet);
206 return;
207 }
208
209 self->next_node->forward_(self->next_node, packet);
210 }
211
net_random_loss_next_run_at(struct net_node * self)212 static double net_random_loss_next_run_at(struct net_node *self)
213 {
214 return INFINITY;
215 }
216
net_random_loss_init(struct net_random_loss * self,double loss_ratio)217 static void net_random_loss_init(struct net_random_loss *self, double loss_ratio)
218 {
219 *self = (struct net_random_loss){
220 .super = {net_random_loss_forward, net_random_loss_next_run_at, NULL},
221 .loss_ratio = loss_ratio,
222 };
223 }
224
net_bottleneck_print_stats(struct net_bottleneck * self,const char * event,struct net_packet * packet)225 static void net_bottleneck_print_stats(struct net_bottleneck *self, const char *event, struct net_packet *packet)
226 {
227 printf("{\"bottleneck\": \"%s\", \"at\": %f, \"queue-size\": %zu, \"packet-src\": %" PRIu32 ", \"packet-size\": %zu}\n", event,
228 now, self->queue.size, ntohl(packet->src->addr.sin.sin_addr.s_addr), packet->size);
229 }
230
net_bottleneck_forward(struct net_node * _self,struct net_packet * packet)231 static void net_bottleneck_forward(struct net_node *_self, struct net_packet *packet)
232 {
233 struct net_bottleneck *self = (struct net_bottleneck *)_self;
234
235 /* drop the packet if the queue is full */
236 if (self->queue.size + packet->size > self->capacity) {
237 net_bottleneck_print_stats(self, "drop", packet);
238 net_packet_destroy(packet);
239 return;
240 }
241
242 net_bottleneck_print_stats(self, "enqueue", packet);
243 net_queue_enqueue(&self->queue, packet);
244 }
245
net_bottleneck_next_run_at(struct net_node * _self)246 static double net_bottleneck_next_run_at(struct net_node *_self)
247 {
248 struct net_bottleneck *self = (struct net_bottleneck *)_self;
249
250 if (self->queue.first == NULL)
251 return INFINITY;
252
253 double emit_at = self->queue.first->enter_at;
254 if (emit_at < self->next_emit_at)
255 emit_at = self->next_emit_at;
256
257 return emit_at;
258 }
259
net_bottleneck_run(struct net_node * _self)260 static void net_bottleneck_run(struct net_node *_self)
261 {
262 struct net_bottleneck *self = (struct net_bottleneck *)_self;
263
264 if (net_bottleneck_next_run_at(&self->super) > now)
265 return;
266
267 /* detach packet */
268 struct net_packet *packet = net_queue_dequeue(&self->queue);
269 net_bottleneck_print_stats(self, "dequeue", packet);
270
271 /* update next emission timer */
272 self->next_emit_at = now + (double)packet->size / self->bytes_per_sec;
273
274 /* forward to the next node */
275 self->next_node->forward_(self->next_node, packet);
276 }
277
net_bottleneck_init(struct net_bottleneck * self,double bytes_per_sec,double capacity_in_sec)278 static void net_bottleneck_init(struct net_bottleneck *self, double bytes_per_sec, double capacity_in_sec)
279 {
280 *self = (struct net_bottleneck){
281 .super = {net_bottleneck_forward, net_bottleneck_next_run_at, net_bottleneck_run},
282 .queue = {.append_at = &self->queue.first},
283 .bytes_per_sec = bytes_per_sec,
284 .capacity = (size_t)(bytes_per_sec * capacity_in_sec),
285 };
286 }
287
288 static quicly_cid_plaintext_t next_quic_cid;
289
net_endpoint_forward(struct net_node * _self,struct net_packet * packet)290 static void net_endpoint_forward(struct net_node *_self, struct net_packet *packet)
291 {
292 struct net_endpoint *self = (struct net_endpoint *)_self;
293
294 size_t off = 0;
295 while (off != packet->size) {
296 /* decode packet */
297 quicly_decoded_packet_t qp;
298 if (quicly_decode_packet(self->conns[0].quic != NULL ? quicly_get_context(self->conns[0].quic) : self->accept_ctx, &qp,
299 packet->bytes, packet->size, &off) == SIZE_MAX)
300 break;
301 /* find the matching connection, or where new state should be created */
302 struct net_endpoint_conn *conn;
303 for (conn = self->conns; conn->quic != NULL; ++conn)
304 if (quicly_is_destination(conn->quic, &packet->dest.sa, &packet->src->addr.sa, &qp))
305 break;
306 /* let the existing connection handle the packet, or accept a new connection */
307 if (conn->quic != NULL) {
308 quicly_receive(conn->quic, &packet->dest.sa, &packet->src->addr.sa, &qp);
309 } else {
310 assert(self->accept_ctx != NULL && "a packet for which we do not have state must be a new connection request");
311 if (quicly_accept(&conn->quic, self->accept_ctx, &packet->dest.sa, &packet->src->addr.sa, &qp, NULL, &next_quic_cid,
312 NULL) == 0) {
313 assert(conn->quic != NULL);
314 ++next_quic_cid.master_id;
315 conn->egress = &packet->src->super;
316 } else {
317 assert(conn->quic == NULL);
318 }
319 }
320 }
321
322 net_packet_destroy(packet);
323 }
324
net_endpoint_next_run_at(struct net_node * _self)325 static double net_endpoint_next_run_at(struct net_node *_self)
326 {
327 struct net_endpoint *self = (struct net_endpoint *)_self;
328
329 if (now < self->start_at)
330 return self->start_at;
331
332 double at = INFINITY;
333 for (struct net_endpoint_conn *conn = self->conns; conn->quic != NULL; ++conn) {
334 /* value is incremented by 0.1ms to avoid the timer firing earlier than specified due to rounding error */
335 double conn_at = quicly_get_first_timeout(conn->quic) / 1000. + 0.0001;
336 if (conn_at < at)
337 at = conn_at;
338 }
339 if (at < now)
340 at = now;
341 return at;
342 }
343
net_endpoint_run(struct net_node * _self)344 static void net_endpoint_run(struct net_node *_self)
345 {
346 struct net_endpoint *self = (struct net_endpoint *)_self;
347
348 if (now < self->start_at)
349 return;
350
351 for (struct net_endpoint_conn *conn = self->conns; conn->quic != NULL; ++conn) {
352 quicly_address_t dest, src;
353 struct iovec datagrams[10];
354 size_t num_datagrams = PTLS_ELEMENTSOF(datagrams);
355 uint8_t buf[PTLS_ELEMENTSOF(datagrams) * 1500];
356 int ret;
357 if ((ret = quicly_send(conn->quic, &dest, &src, datagrams, &num_datagrams, buf, sizeof(buf))) == 0) {
358 for (size_t i = 0; i < num_datagrams; ++i) {
359 struct net_packet *packet =
360 net_packet_create(self, &dest, ptls_iovec_init(datagrams[i].iov_base, datagrams[i].iov_len));
361 conn->egress->forward_(conn->egress, packet);
362 }
363 } else {
364 assert(ret != QUICLY_ERROR_FREE_CONNECTION);
365 }
366 }
367 }
368
net_endpoint_init(struct net_endpoint * endpoint)369 static void net_endpoint_init(struct net_endpoint *endpoint)
370 {
371 *endpoint = (struct net_endpoint){
372 .super = {net_endpoint_forward, net_endpoint_next_run_at, net_endpoint_run},
373 .addr = new_address(),
374 };
375 }
376
run_nodes(struct net_node ** nodes)377 static void run_nodes(struct net_node **nodes)
378 {
379 double next_now = INFINITY;
380 for (struct net_node **node = nodes; *node != NULL; ++node) {
381 double at = (*node)->next_run_at(*node);
382 assert(at >= now);
383 if (next_now > at)
384 next_now = at;
385 }
386
387 if (isinf(next_now))
388 return;
389
390 now = next_now;
391 for (struct net_node **node = nodes; *node != NULL; ++node) {
392 if ((*node)->next_run_at(*node) <= now)
393 (*node)->run(*node);
394 }
395 }
396
tls_now_cb(ptls_get_time_t * self)397 static uint64_t tls_now_cb(ptls_get_time_t *self)
398 {
399 return (uint64_t)(now * 1000);
400 }
401
quic_now_cb(quicly_now_t * self)402 static int64_t quic_now_cb(quicly_now_t *self)
403 {
404 return (int64_t)(now * 1000);
405 }
406
stream_destroy_cb(quicly_stream_t * stream,int err)407 static void stream_destroy_cb(quicly_stream_t *stream, int err)
408 {
409 }
410
stream_egress_shift_cb(quicly_stream_t * stream,size_t delta)411 static void stream_egress_shift_cb(quicly_stream_t *stream, size_t delta)
412 {
413 }
414
stream_egress_emit_cb(quicly_stream_t * stream,size_t off,void * dst,size_t * len,int * wrote_all)415 static void stream_egress_emit_cb(quicly_stream_t *stream, size_t off, void *dst, size_t *len, int *wrote_all)
416 {
417 assert(quicly_is_client(stream->conn));
418 memset(dst, 'A', *len);
419 *wrote_all = 0;
420 }
421
stream_on_stop_sending_cb(quicly_stream_t * stream,int err)422 static void stream_on_stop_sending_cb(quicly_stream_t *stream, int err)
423 {
424 assert(!"unexpected");
425 }
426
stream_on_receive_cb(quicly_stream_t * stream,size_t off,const void * src,size_t len)427 static void stream_on_receive_cb(quicly_stream_t *stream, size_t off, const void *src, size_t len)
428 {
429 assert(!quicly_is_client(stream->conn));
430 assert(!quicly_recvstate_transfer_complete(&stream->recvstate));
431
432 if (stream->recvstate.data_off < stream->recvstate.received.ranges[0].end)
433 quicly_stream_sync_recvbuf(stream, stream->recvstate.received.ranges[0].end - stream->recvstate.data_off);
434 }
435
stream_on_receive_reset_cb(quicly_stream_t * stream,int err)436 static void stream_on_receive_reset_cb(quicly_stream_t *stream, int err)
437 {
438 assert(!"unexpected");
439 }
440
stream_open_cb(quicly_stream_open_t * self,quicly_stream_t * stream)441 static int stream_open_cb(quicly_stream_open_t *self, quicly_stream_t *stream)
442 {
443 static const quicly_stream_callbacks_t stream_callbacks = {stream_destroy_cb, stream_egress_shift_cb,
444 stream_egress_emit_cb, stream_on_stop_sending_cb,
445 stream_on_receive_cb, stream_on_receive_reset_cb};
446 stream->callbacks = &stream_callbacks;
447 return 0;
448 }
449
usage(const char * cmd)450 static void usage(const char *cmd)
451 {
452 printf("Usage: %s ...\n"
453 "\n"
454 "Options:\n"
455 " -n <cc> adds a sender using specified controller\n"
456 " -b <bytes_per_sec> bottleneck bandwidth (default: 1000000, i.e., 1MB/s)\n"
457 " -l <seconds> number of seconds to simulate (default: 100)\n"
458 " -d <delay> delay to be introduced between the sender and the botteneck, in seconds (default: 0.1)\n"
459 " -q <seconds> maximum depth of the bottleneck queue, in seconds (default: 0.1)\n"
460 " -r <rate> introduce random loss at specified probability (default: 0)\n"
461 " -s <seconds> delay until the sender is introduced to the simulation (default: 0)\n"
462 " -t emits trace as well\n"
463 " -h print this help\n"
464 "\n",
465 cmd);
466 }
467
468 #define RSA_PRIVATE_KEY \
469 "-----BEGIN RSA PRIVATE KEY-----\n" \
470 "MIIEpAIBAAKCAQEA7zZheZ4ph98JaedBNv9kqsVA9CSmhd69kBc9ZAfVFMA4VQwp\n" \
471 "rOj3ZGrxf20HB3FkvqGvew9ZogUF6NjbPumeiUObGpP21Y5wcYlPL4aojlrwMB/e\n" \
472 "OxOCpuRyQTRSSe1hDPvdJABQdmshDP5ZSEBLdUSgrNn4KWhIDjFj1AHXIMqeqTXe\n" \
473 "tFuRgNzHdtbXQx+UWBis2B6qZJuqSArb2msVOC8D5gNznPPlQw7FbdPCaLNXSb6G\n" \
474 "nI0E0uj6QmYlAw9s6nkgP/zxjfFldqPNUprGcEqTwmAb8VVtd7XbANYrzubZ4Nn6\n" \
475 "/WXrCrVxWUmh/7Spgdwa/I4Nr1JHv9HHyL2z/wIDAQABAoIBAEVPf2zKrAPnVwXt\n" \
476 "cJLr6xIj908GM43EXS6b3TjXoCDUFT5nOMgV9GCPMAwY3hmE/IjTtlG0v+bXB8BQ\n" \
477 "3S3caQgio5VO3A1CqUfsXhpKLRqaNM/s2+pIG+oZdRV5gIJVGnK1o3yj7qxxG/F0\n" \
478 "3Q+3OWXwDZIn0eTFh2M9YkxygA/KtkREZWv8Q8qZpdOpJSBYZyGE97Jqy/yGc+DQ\n" \
479 "Vpoa9B8WwnIdUn47TkZfsbzqGIYZxatJQDC1j7Y+F8So7zBbUhpz7YqATQwf5Efm\n" \
480 "K2xwvlwfdwykq6ffEr2M/Xna0220G2JZlGq3Cs2X9GT9Pt9OS86Bz+EL46ELo0tZ\n" \
481 "yfHQe/kCgYEA+zh4k2be6fhQG+ChiG3Ue5K/kH2prqyGBus61wHnt8XZavqBevEy\n" \
482 "4pdmvJ6Q1Ta9Z2YCIqqNmlTdjZ6B35lvAK8YFITGy0MVV6K5NFYVfhALWCQC2r3B\n" \
483 "6uH39FQ0mDo3gS5ZjYlUzbu67LGFnyX+pyMr2oxlhI1fCY3VchXQAOsCgYEA88Nt\n" \
484 "CwSOaZ1fWmyNAgXEAX1Jx4XLFYgjcA/YBXW9gfQ0AfufB346y53PsgjX1lB+Bbcg\n" \
485 "cY/o5W7F0b3A0R4K5LShlPCq8iB2DC+VnpKwTgo8ylh+VZCPy2BmMK0jrrmyqWeg\n" \
486 "PzwgP0lp+7l/qW8LDImeYi8nWoqd6f1ye4iJdD0CgYEAlIApJljk5EFYeWIrmk3y\n" \
487 "EKoKewsNRqfNAkICoh4KL2PQxaAW8emqPq9ol47T5nVZOMnf8UYINnZ8EL7l3psA\n" \
488 "NtNJ1Lc4G+cnsooKGJnaUo6BZjTDSzJocsPoopE0Fdgz/zS60yOe8Y5LTKcTaaQ4\n" \
489 "B+yOe74KNHSs/STOS4YBUskCgYAIqaRBZPsOo8oUs5DbRostpl8t2QJblIf13opF\n" \
490 "v2ZprN0ASQngwUqjm8sav5e0BQ5Fc7mSb5POO36KMp0ckV2/vO+VFGxuyFqJmlNN\n" \
491 "3Fapn1GDu1tZ/RYvGxDmn/CJsA26WXVnaeKXfStoB7KSueCBpI5dXOGgJRbxjtE3\n" \
492 "tKV13QKBgQCtmLtTJPJ0Z+9n85C8kBonk2MCnD9JTYWoDQzNMYGabthzSqJqcEek\n" \
493 "dvhr82XkcHM+r6+cirjdQr4Qj7/2bfZesHl5XLvoJDB1YJIXnNJOELwbktrJrXLc\n" \
494 "dJ+MMvPvBAMah/tqr2DqgTGfWLDt9PJiCJVsuN2kD9toWHV08pY0Og==\n" \
495 "-----END RSA PRIVATE KEY-----\n"
496
497 #define RSA_CERTIFICATE \
498 "-----BEGIN CERTIFICATE-----\n" \
499 "MIIDOjCCAiKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAWMRQwEgYDVQQDEwtIMk8g\n" \
500 "VGVzdCBDQTAeFw0xNDEyMTAxOTMzMDVaFw0yNDEyMDcxOTMzMDVaMBsxGTAXBgNV\n" \
501 "BAMTEDEyNy4wLjAuMS54aXAuaW8wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK\n" \
502 "AoIBAQDvNmF5nimH3wlp50E2/2SqxUD0JKaF3r2QFz1kB9UUwDhVDCms6PdkavF/\n" \
503 "bQcHcWS+oa97D1miBQXo2Ns+6Z6JQ5sak/bVjnBxiU8vhqiOWvAwH947E4Km5HJB\n" \
504 "NFJJ7WEM+90kAFB2ayEM/llIQEt1RKCs2fgpaEgOMWPUAdcgyp6pNd60W5GA3Md2\n" \
505 "1tdDH5RYGKzYHqpkm6pICtvaaxU4LwPmA3Oc8+VDDsVt08Jos1dJvoacjQTS6PpC\n" \
506 "ZiUDD2zqeSA//PGN8WV2o81SmsZwSpPCYBvxVW13tdsA1ivO5tng2fr9ZesKtXFZ\n" \
507 "SaH/tKmB3Br8jg2vUke/0cfIvbP/AgMBAAGjgY0wgYowCQYDVR0TBAIwADAsBglg\n" \
508 "hkgBhvhCAQ0EHxYdT3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwHQYDVR0O\n" \
509 "BBYEFJXhddVQ68vtPvxoHWHsYkLnu3+4MDAGA1UdIwQpMCehGqQYMBYxFDASBgNV\n" \
510 "BAMTC0gyTyBUZXN0IENBggkAmqS1V7DvzbYwDQYJKoZIhvcNAQELBQADggEBAJQ2\n" \
511 "uvzL/lZnrsF4cvHhl/mg+s/RjHwvqFRrxOWUeWu2BQOGdd1Izqr8ZbF35pevPkXe\n" \
512 "j3zQL4Nf8OxO/gx4w0165KL4dYxEW7EaxsDQUI2aXSW0JNSvK2UGugG4+E4aT+9y\n" \
513 "cuBCtfWbL4/N6IMt2QW17B3DcigkreMoZavnnqRecQWkOx4nu0SmYg1g2QV4kRqT\n" \
514 "nvLt29daSWjNhP3dkmLTxn19umx26/JH6rqcgokDfHHO8tlDbc9JfyxYH01ZP2Ps\n" \
515 "esIiGa/LBXfKiPXxyHuNVQI+2cMmIWYf+Eu/1uNV3K55fA8806/FeklcQe/vvSCU\n" \
516 "Vw6RN5S/14SQnMYWr7E=\n" \
517 "-----END CERTIFICATE-----\n"
518
main(int argc,char ** argv)519 int main(int argc, char **argv)
520 {
521 ERR_load_CRYPTO_strings();
522 OpenSSL_add_all_algorithms();
523
524 ptls_iovec_t cert = {};
525 {
526 BIO *bio = BIO_new_mem_buf(RSA_CERTIFICATE, strlen(RSA_CERTIFICATE));
527 X509 *x509 = PEM_read_bio_X509(bio, NULL, NULL, NULL);
528 assert(x509 != NULL || !!"failed to load certificate");
529 BIO_free(bio);
530 cert.len = i2d_X509(x509, &cert.base);
531 X509_free(x509);
532 }
533
534 ptls_openssl_sign_certificate_t cert_signer;
535 {
536 BIO *bio = BIO_new_mem_buf(RSA_PRIVATE_KEY, strlen(RSA_PRIVATE_KEY));
537 EVP_PKEY *pkey = PEM_read_bio_PrivateKey(bio, NULL, NULL, NULL);
538 assert(pkey != NULL || !"failed to load private key");
539 BIO_free(bio);
540 ptls_openssl_init_sign_certificate(&cert_signer, pkey);
541 EVP_PKEY_free(pkey);
542 }
543 ptls_get_time_t tls_now = {tls_now_cb};
544 ptls_context_t tlsctx = {.random_bytes = ptls_openssl_random_bytes,
545 .get_time = &tls_now,
546 .key_exchanges = ptls_openssl_key_exchanges,
547 .cipher_suites = ptls_openssl_cipher_suites,
548 .certificates = {&cert, 1},
549 .sign_certificate = &cert_signer.super};
550 quicly_amend_ptls_context(&tlsctx);
551
552 quicly_stream_open_t stream_open = {stream_open_cb};
553 quicly_now_t quic_now = {quic_now_cb};
554 quicly_context_t quicctx = quicly_spec_context;
555 quicctx.now = &quic_now;
556 quicctx.tls = &tlsctx;
557 quicctx.stream_open = &stream_open;
558 quicctx.transport_params.max_streams_uni = 10;
559 quicctx.transport_params.max_stream_data.uni = 128 * 1024 * 1024;
560 quicctx.transport_params.max_data = 128 * 1024 * 1824;
561 quicctx.transport_params.min_ack_delay_usec = UINT64_MAX; /* disable ack-delay extension */
562
563 struct net_bottleneck bottleneck_node;
564 struct net_random_loss random_loss_node;
565 struct {
566 struct net_endpoint node;
567 quicly_context_t accept_ctx;
568 } server_node;
569 struct net_node *nodes[20] = {}, **node_insert_at = nodes;
570
571 net_endpoint_init(&server_node.node);
572 server_node.accept_ctx = quicctx;
573 server_node.node.accept_ctx = &server_node.accept_ctx;
574 *node_insert_at++ = &server_node.node.super;
575
576 /* parse args */
577 double delay = 0.1, bw = 1e6, depth = 0.1, start = 0, random_loss = 0;
578 unsigned length = 100;
579 int ch;
580 while ((ch = getopt(argc, argv, "n:b:d:s:l:q:r:th")) != -1) {
581 switch (ch) {
582 case 'n': {
583 quicly_cc_type_t **cc;
584 for (cc = quicly_cc_all_types; *cc != NULL; ++cc)
585 if (strcmp((*cc)->name, optarg) == 0)
586 break;
587 if (*cc != NULL) {
588 quicctx.init_cc = (*cc)->cc_init;
589 } else {
590 fprintf(stderr, "unknown congestion controller: %s\n", optarg);
591 exit(1);
592 }
593 struct net_delay *delay_node = malloc(sizeof(*delay_node));
594 net_delay_init(delay_node, delay);
595 delay_node->next_node = &bottleneck_node.super;
596 *node_insert_at++ = &delay_node->super;
597 struct net_endpoint *client_node = malloc(sizeof(*client_node));
598 net_endpoint_init(client_node);
599 client_node->start_at = now + start;
600 int ret = quicly_connect(&client_node->conns[0].quic, &quicctx, "hello.example.com", &server_node.node.addr.sa,
601 &client_node->addr.sa, &next_quic_cid, ptls_iovec_init(NULL, 0), NULL, NULL);
602 ++next_quic_cid.master_id;
603 assert(ret == 0);
604 quicly_stream_t *stream;
605 ret = quicly_open_stream(client_node->conns[0].quic, &stream, 1);
606 assert(ret == 0);
607 ret = quicly_stream_sync_sendbuf(stream, 1);
608 assert(ret == 0);
609 client_node->conns[0].egress = &delay_node->super;
610 *node_insert_at++ = &client_node->super;
611 } break;
612 case 'b':
613 if (sscanf(optarg, "%lf", &bw) != 1) {
614 fprintf(stderr, "invalid bandwidth: %s\n", optarg);
615 exit(1);
616 }
617 break;
618 case 'd':
619 if (sscanf(optarg, "%lf", &delay) != 1) {
620 fprintf(stderr, "invalid delay value: %s\n", optarg);
621 exit(1);
622 }
623 break;
624 case 's':
625 if (sscanf(optarg, "%lf", &start) != 1) {
626 fprintf(stderr, "invaild start: %s\n", optarg);
627 exit(1);
628 }
629 break;
630 case 'l':
631 if (sscanf(optarg, "%u", &length) != 1) {
632 fprintf(stderr, "invalid length: %s\n", optarg);
633 exit(1);
634 }
635 break;
636 case 'q':
637 if (sscanf(optarg, "%lf", &depth) != 1) {
638 fprintf(stderr, "invalid queue depth: %s\n", optarg);
639 exit(1);
640 }
641 break;
642 case 'r':
643 if (sscanf(optarg, "%lf", &random_loss) != 1) {
644 fprintf(stderr, "invalid random loss rate: %s\n", optarg);
645 exit(1);
646 }
647 break;
648 case 't':
649 quicly_trace_fp = stdout;
650 break;
651 default:
652 usage(argv[0]);
653 exit(0);
654 }
655 }
656 argc -= optind;
657 argv += optind;
658
659 /* setup bottleneck */
660 net_bottleneck_init(&bottleneck_node, bw, depth);
661 bottleneck_node.next_node = &server_node.node.super;
662 *node_insert_at++ = &bottleneck_node.super;
663
664 /* setup random loss */
665 if (random_loss != 0) {
666 net_random_loss_init(&random_loss_node, random_loss);
667 random_loss_node.next_node = &server_node.node.super;
668 bottleneck_node.next_node = &random_loss_node.super;
669 *node_insert_at++ = &random_loss_node.super;
670 }
671
672 while (now < 1000 + length)
673 run_nodes(nodes);
674
675 return 0;
676 }
677